summaryrefslogtreecommitdiff
path: root/contrib/ffmpeg/doc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/ffmpeg/doc')
-rw-r--r--contrib/ffmpeg/doc/Makefile20
-rw-r--r--contrib/ffmpeg/doc/TODO82
-rw-r--r--contrib/ffmpeg/doc/faq.texi312
-rw-r--r--contrib/ffmpeg/doc/ffmpeg-doc.texi1607
-rw-r--r--contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt172
-rw-r--r--contrib/ffmpeg/doc/ffplay-doc.texi104
-rw-r--r--contrib/ffmpeg/doc/ffserver-doc.texi224
-rw-r--r--contrib/ffmpeg/doc/ffserver.conf349
-rw-r--r--contrib/ffmpeg/doc/hooks.texi113
-rw-r--r--contrib/ffmpeg/doc/optimization.txt158
-rw-r--r--contrib/ffmpeg/doc/soc.txt24
-rwxr-xr-xcontrib/ffmpeg/doc/texi2pod.pl427
12 files changed, 3592 insertions, 0 deletions
diff --git a/contrib/ffmpeg/doc/Makefile b/contrib/ffmpeg/doc/Makefile
new file mode 100644
index 000000000..4fc9dfb8f
--- /dev/null
+++ b/contrib/ffmpeg/doc/Makefile
@@ -0,0 +1,20 @@
+-include ../config.mak
+
+VPATH=$(SRC_PATH_BARE)/doc
+
+all: ffmpeg-doc.html faq.html ffserver-doc.html ffplay-doc.html hooks.html \
+ ffmpeg.1 ffserver.1 ffplay.1
+
+%.html: %.texi Makefile
+ texi2html -monolithic -number $<
+
+%.pod: %-doc.texi
+ ./texi2pod.pl $< $@
+
+%.1: %.pod
+ pod2man --section=1 --center=" " --release=" " $< > $@
+
+clean:
+ rm -f *.html *.pod *.1
+
+.PHONY: all clean
diff --git a/contrib/ffmpeg/doc/TODO b/contrib/ffmpeg/doc/TODO
new file mode 100644
index 000000000..8271659d2
--- /dev/null
+++ b/contrib/ffmpeg/doc/TODO
@@ -0,0 +1,82 @@
+ffmpeg TODO list:
+----------------
+
+Fabrice's TODO list: (unordered)
+-------------------
+Short term:
+
+- seeking API and example in ffplay
+- use AVFMTCTX_DISCARD_PKT in ffplay so that DV has a chance to work
+- add RTSP regression test (both client and server)
+- make ffserver allocate AVFormatContext
+- clean up (incompatible change, for 0.5.0):
+ * AVStream -> AVComponent
+ * AVFormatContext -> AVInputStream/AVOutputStream
+ * suppress rate_emu from AVCodecContext
+- add new float/integer audio filterting and conversion : suppress
+ CODEC_ID_PCM_xxc and use CODEC_ID_RAWAUDIO.
+- fix telecine and frame rate conversion
+
+Long term (ask me if you want to help):
+
+- commit new imgconvert API and new PIX_FMT_xxx alpha formats
+- commit new LGPL'ed float and integer-only AC3 decoder
+- add WMA integer-only decoder
+- add new MPEG4-AAC audio decoder (both integer-only and float version)
+
+Michael's TODO list: (unordered) (if anyone wanna help with sth, just ask)
+-------------------
+- optimize H264 CABAC
+- more optimizations
+- simper rate control
+
+Francois' TODO list: (unordered, without any timeframe)
+-------------------
+- test MACE decoder against the openquicktime one as suggested by A'rpi
+- BeOS audio input grabbing backend
+- BeOS video input grabbing backend
+- have a REAL BeOS errno fix (return MKERROR(EXXX);), not a hack
+- publish my BeOS libposix on BeBits so I can officially support ffserver :)
+- check the whole code for thread-safety (global and init stuff)
+
+Philip'a TODO list: (alphabetically ordered) (please help)
+------------------
+- Add a multi-ffm filetype so that feeds can be recorded into multiple files rather
+ than one big file.
+- Authenticated users support -- where the authentication is in the URL
+- Change ASF files so that the embedded timestamp in the frames is right rather
+ than being an offset from the start of the stream
+- Make ffm files more resilient to changes in the codec structures so that you
+ can play old ffm files.
+
+unassigned TODO: (unordered)
+---------------
+- use AVFrame for audio codecs too
+- rework aviobuf.c buffering strategy and fix url_fskip
+- generate optimal huffman tables for mjpeg encoding
+- fix ffserver regression tests
+- support xvids motion estimation
+- support x264s motion estimation
+- support x264s rate control
+- SNOW: non translational motion compensation
+- SNOW: more optimal quantization
+- SNOW: 4x4 block support
+- SNOW: 1/8 pel motion compensation support
+- SNOW: iterative motion estimation based on subsampled images
+- FLAC: lossy encoding (viterbi and naive scalar quantization)
+- libavfilter
+- JPEG2000 decoder & encoder
+- MPEG4 GMC encoding support
+- macroblock based pixel format (better cache locality, somewhat complex, one paper claimed it faster for high res)
+- finish NUT implementation
+- seeking regression test
+- regression tests for codecs which dont have an encoder (I+P frame bitstream in svn)
+- add support for using mplayers video filters to ffmpeg
+- reverse engeneer RV30/RV40
+- finish implementation of WMV2 j-picture
+- H264 encoder
+- per MB ratecontrol (so VCD and such do work better)
+- replace/rewrite libavcodec/fdctref.c
+- write a script which iteratively changes all functions between always_inline and noinline and benchmarks the result to find the best set of inlined functions
+- set up roundup bugtracker somewhere with (newBug, reproduced, analyzed, fixed, worksForMe, duplicate, wontFix, invalid, needMoreInfo, newPatch, ok, applied, rejected, needChanges, newRequest, implemented, wontImplement, invalidReq) states and a checked integer
+- convert all the non SIMD asm into small asm vs. C testcases and submit them to the gcc devels so they can improve gcc
diff --git a/contrib/ffmpeg/doc/faq.texi b/contrib/ffmpeg/doc/faq.texi
new file mode 100644
index 000000000..9f1e8ec2d
--- /dev/null
+++ b/contrib/ffmpeg/doc/faq.texi
@@ -0,0 +1,312 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFmpeg FAQ
+@titlepage
+@sp 7
+@center @titlefont{FFmpeg FAQ}
+@sp 3
+@end titlepage
+
+
+@chapter General Problems
+
+@section I cannot read this file although this format seems to be supported by ffmpeg.
+
+Even if ffmpeg can read the file format, it may not support all its
+codecs. Please consult the supported codec list in the ffmpeg
+documentation.
+
+@section How do I encode JPEGs to another format ?
+
+If the JPEGs are named img1.jpg, img2.jpg, img3.jpg,..., use:
+
+@example
+ ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
+@end example
+
+@samp{%d} is replaced by the image number.
+
+@file{img%03d.jpg} generates @file{img001.jpg}, @file{img002.jpg}, etc...
+
+The same system is used for the other image formats.
+
+@section How do I encode movie to single pictures ?
+
+Use:
+
+@example
+ ffmpeg -i movie.mpg movie%d.jpg
+@end example
+
+The @file{movie.mpg} used as input will be converted to
+@file{movie1.jpg}, @file{movie2.jpg}, etc...
+
+Instead of relying on file format self-recognition, you may also use
+@table @option
+@item -vcodec ppm
+@item -vcodec png
+@item -vcodec mjpeg
+@end table
+to force the encoding.
+
+Applying that to the previous example:
+@example
+ ffmpeg -i movie.mpg -f image2 -vcodec mjpeg menu%d.jpg
+@end example
+
+Beware that there is no "jpeg" codec. Use "mjpeg" instead.
+
+@section FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it ?
+
+No. FFmpeg only supports open source codecs. Windows DLLs are not
+portable, bloated and often slow.
+
+@section I get "Unsupported codec (id=86043) for input stream #0.1". What is the problem ?
+
+This is the Qcelp codec, FFmpeg has no support for that codec currently. Try mencoder/mplayer it might work.
+
+@section Why do I see a slight quality degradation with multithreaded MPEG* encoding ?
+
+For multithreaded MPEG* encoding, the encoded slices must be independent,
+otherwise thread n would practically have to wait for n-1 to finish, so it's
+quite logical that there is a small reduction of quality. This is not a bug.
+
+@section How can I read from the standard input or write to the standard output ?
+
+Use @file{-} as filename.
+
+@section Why does ffmpeg not decode audio in VOB files ?
+
+The audio is AC3 (a.k.a. A/52). AC3 decoding is an optional component in ffmpeg
+as the component that handles AC3 decoding (liba52) is currently released under
+the GPL. If you have liba52 installed on your system, enable AC3 decoding
+with @code{./configure --enable-a52}. Take care: by
+enabling AC3, you automatically change the license of libavcodec from
+LGPL to GPL.
+
+@section Which codecs are supported by Windows ?
+
+Windows does not support standard formats like MPEG very well, unless you
+install some additional codecs
+
+The following list of video codecs should work on most Windows systems:
+@table @option
+@item msmpeg4v2
+.avi/.asf
+@item msmpeg4
+.asf only
+@item wmv1
+.asf only
+@item wmv2
+.asf only
+@item mpeg4
+only if you have some MPEG-4 codec installed like ffdshow or XviD
+@item mpeg1
+.mpg only
+@end table
+Note, ASF files often have .wmv or .wma extensions in Windows. It should also
+be mentioned that Microsoft claims a patent on the ASF format, and may sue
+or threaten users who create ASF files with non-Microsoft software. It is
+strongly advised to avoid ASF where possible.
+
+The following list of audio codecs should work on most Windows systems:
+@table @option
+@item adpcm_ima_wav
+@item adpcm_ms
+@item pcm
+@item mp3
+if some MP3 codec like LAME is installed
+@end table
+
+@section Why does the chrominance data seem to be sampled at a different time from the luminance data on bt8x8 captures on Linux?
+
+This is a well-known bug in the bt8x8 driver. For 2.4.26 there is a patch at
+(@url{http://mplayerhq.hu/~michael/bttv-420-2.4.26.patch}). This may also
+apply cleanly to other 2.4-series kernels.
+
+@section How do I avoid the ugly aliasing artifacts in bt8x8 captures on Linux?
+
+Pass 'combfilter=1 lumafilter=1' to the bttv driver. Note though that 'combfilter=1'
+will cause somewhat too strong filtering. A fix is to apply (@url{http://mplayerhq.hu/~michael/bttv-comb-2.4.26.patch})
+or (@url{http://mplayerhq.hu/~michael/bttv-comb-2.6.6.patch})
+and pass 'combfilter=2'.
+
+@section I have a problem with an old version of ffmpeg; where should I report it?
+Nowhere. Upgrade to the latest release or if there is no recent release upgrade
+to Subversion HEAD. You could also try to report it. Maybe you will get lucky and
+become the first person in history to get an answer different from "upgrade
+to Subversion HEAD".
+
+@section -f jpeg doesn't work.
+
+Try '-f image2 test%d.jpg'.
+
+@section Why can I not change the framerate?
+
+Some codecs, like MPEG-1/2, only allow a small number of fixed framerates.
+Choose a different codec with the -vcodec command line option.
+
+@section ffmpeg does not work; What is wrong?
+
+Try a 'make distclean' in the ffmpeg source directory. If this does not help see
+(@url{http://ffmpeg.org/bugreports.php}).
+
+@section How do I encode XviD or DivX video with ffmpeg?
+
+Both XviD and DivX (version 4+) are implementations of the ISO MPEG-4
+standard (note that there are many other coding formats that use this
+same standard). Thus, use '-vcodec mpeg4' to encode these formats. The
+default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want
+a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will
+force the fourcc 'xvid' to be stored as the video fourcc rather than the
+default.
+
+@section How do I encode videos which play on the iPod?
+
+@table @option
+@item needed stuff
+-acodec aac -vcodec mpeg4 width<=320 height<=240
+@item working stuff
+4mv, title
+@item non-working stuff
+B-frames
+@item example command line
+ffmpeg -i input -acodec aac -ab 128 -vcodec mpeg4 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -s 320x180 -title X output.mp4
+@end table
+
+@section How do I encode videos which play on the PSP?
+
+@table @option
+@item needed stuff
+-acodec aac -vcodec mpeg4 width*height<=76800 width%16=0 height%16=0 -ar 24000 -r 30000/1001 or 15000/1001 -f psp
+@item working stuff
+4mv, title
+@item non-working stuff
+B-frames
+@item example command line
+ffmpeg -i input -acodec aac -ab 128 -vcodec mpeg4 -b 1200kb -ar 24000 -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp output.mp4
+@item needed stuff for H.264
+-acodec aac -vcodec h264 width*height<=76800 width%16=0? height%16=0? -ar 48000 -coder 1 -r 30000/1001 or 15000/1001 -f psp
+@item working stuff for H.264
+title, loop filter
+@item non-working stuff for H.264
+CAVLC
+@item example command line
+ffmpeg -i input -acodec aac -ab 128 -vcodec h264 -b 1200kb -ar 48000 -mbd 2 -coder 1 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp -flags loop -trellis 2 -partitions parti4x4+parti8x8+partp4x4+partp8x8+partb8x8 output.mp4
+@end table
+
+@section How can I read DirectShow files?
+
+If you have built FFmpeg with @code{./configure --enable-avisynth}
+(only possible on MinGW/Cygwin platforms),
+then you may use any file that DirectShow can read as input.
+(Be aware that this feature has been recently added,
+so you will need to help yourself in case of problems.)
+
+Just create an "input.avs" text file with this single line ...
+@example
+ DirectShowSource("C:\path to your file\yourfile.asf")
+@end example
+... and then feed that text file to FFmpeg:
+@example
+ ffmpeg -i input.avs
+@end example
+
+For ANY other help on Avisynth, please visit @url{http://www.avisynth.org/}.
+
+@chapter Development
+
+@section When will the next FFmpeg version be released? / Why are FFmpeg releases so few and far between?
+
+Like most open source projects FFmpeg suffers from a certain lack of
+manpower. For this reason the developers have to prioritize the work
+they do and putting out releases is not at the top of the list, fixing
+bugs and reviewing patches takes precedence. Please don't complain or
+request more timely and/or frequent releases unless you are willing to
+help out creating them.
+
+@section Why doesn't FFmpeg support feature [xyz]?
+
+Because no one has taken on that task yet. FFmpeg development is
+driven by the tasks that are important to the individual developers.
+If there is a feature that is important to you, the best way to get
+it implemented is to undertake the task yourself.
+
+
+@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat ?
+
+Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively,
+examine the source code for one of the many open source projects that
+already incorporate ffmpeg at (@url{projects.php}).
+
+@section Can you support my C compiler XXX ?
+
+No. Only GCC is supported. GCC is ported to most systems available and there
+is no need to pollute the source code with @code{#ifdef}s
+related to the compiler.
+
+@section Can I use FFmpeg or libavcodec under Windows ?
+
+Yes, but the MinGW tools @emph{must} be used to compile FFmpeg. You
+can link the resulting DLLs with any other Windows program. Read the
+@emph{Native Windows Compilation} and @emph{Visual C++ compatibility}
+sections in the FFmpeg documentation to find more information.
+
+@section Can you add automake, libtool or autoconf support ?
+
+No. These tools are too bloated and they complicate the build. Moreover,
+since only @samp{gcc} is supported they would add little advantages in
+terms of portability.
+
+@section Why not rewrite ffmpeg in object-oriented C++ ?
+
+ffmpeg is already organized in a highly modular manner and does not need to
+be rewritten in a formal object language. Further, many of the developers
+favor straight C; it works for them. For more arguments on this matter,
+read "Programming Religion" at (@url{http://lkml.org/faq/lkmlfaq-15.html}).
+
+@section Why are the ffmpeg programs devoid of debugging symbols ?
+
+The build process creates ffmpeg_g, ffplay_g, etc. which contain full debug
+information. Those binaries are strip'd to create ffmpeg, ffplay, etc. If
+you need the debug information, used the *_g versions.
+
+@section I do not like the LGPL, can I contribute code under the GPL instead ?
+
+Yes, as long as the code is optional and can easily and cleanly be placed
+under #ifdef CONFIG_GPL without breaking anything. So for example a new codec
+or filter would be OK under GPL while a bugfix to LGPL code would not.
+
+@section I want to compile xyz.c alone but my compiler produced many errors.
+
+Common code is in its own files in libav* and is used by the individual
+codecs. They will not work without the common parts, you have to compile
+the whole libav*. If you wish, disable some parts with configure switches.
+You can also try to hack it and remove more, but if you had problems fixing
+the compilation failure then you are probably not qualified for this.
+
+@section Visual C++ produces many errors.
+
+Visual C++ is not compliant to the C standard and does not support
+the inline assembly used in FFmpeg.
+If you wish - for whatever weird reason - to use Visual C++ for your
+project then you can link the Visual C++ code with libav* as long as
+you compile the latter with a working C compiler. For more information, see
+the @emph{Visual C++ compatibility} section in the FFmpeg documentation.
+
+There have been efforts to make FFmpeg compatible with Visual C++ in the
+past. However, they have all been rejected as too intrusive, especially
+since MinGW does the job perfectly adequately. None of the core developers
+work with Visual C++ and thus this item is low priority. Should you find
+the silver bullet that solves this problem, feel free to shoot it at us.
+
+@section I have a file in memory / a API different from *open/*read/ libc how do i use it with libavformat ?
+
+You have to implement a URLProtocol, see libavformat/file.c in FFmpeg
+and libmpdemux/demux_lavf.c in MPlayer sources.
+
+@section I get "No compatible shell script interpreter found." in MSys.
+
+The standard MSys bash (2.04) is broken. You need to install 2.05 or later.
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffmpeg-doc.texi b/contrib/ffmpeg/doc/ffmpeg-doc.texi
new file mode 100644
index 000000000..2d814c0fb
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffmpeg-doc.texi
@@ -0,0 +1,1607 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFmpeg Documentation
+@titlepage
+@sp 7
+@center @titlefont{FFmpeg Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+FFmpeg is a very fast video and audio converter. It can also grab from
+a live audio/video source.
+
+The command line interface is designed to be intuitive, in the sense
+that FFmpeg tries to figure out all parameters that can possibly be
+derived automatically. You usually only have to specify the target
+bitrate you want.
+
+FFmpeg can also convert from any sample rate to any other, and resize
+video on the fly with a high quality polyphase filter.
+
+@chapter Quick Start
+
+@c man begin EXAMPLES
+@section Video and Audio grabbing
+
+FFmpeg can use a video4linux compatible video source and any Open Sound
+System audio source:
+
+@example
+ffmpeg /tmp/out.mpg
+@end example
+
+Note that you must activate the right video source and channel before
+launching FFmpeg with any TV viewer such as xawtv
+(@url{http://bytesex.org/xawtv/}) by Gerd Knorr. You also
+have to set the audio recording levels correctly with a
+standard mixer.
+
+@section Video and Audio file format conversion
+
+* FFmpeg can use any supported file format and protocol as input:
+
+Examples:
+
+* You can use YUV files as input:
+
+@example
+ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
+@end example
+
+It will use the files:
+@example
+/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
+/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
+@end example
+
+The Y files use twice the resolution of the U and V files. They are
+raw files, without header. They can be generated by all decent video
+decoders. You must specify the size of the image with the @option{-s} option
+if FFmpeg cannot guess it.
+
+* You can input from a raw YUV420P file:
+
+@example
+ffmpeg -i /tmp/test.yuv /tmp/out.avi
+@end example
+
+test.yuv is a file containing raw YUV planar data. Each frame is composed
+of the Y plane followed by the U and V planes at half vertical and
+horizontal resolution.
+
+* You can output to a raw YUV420P file:
+
+@example
+ffmpeg -i mydivx.avi hugefile.yuv
+@end example
+
+* You can set several input files and output files:
+
+@example
+ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
+@end example
+
+Converts the audio file a.wav and the raw YUV video file a.yuv
+to MPEG file a.mpg.
+
+* You can also do audio and video conversions at the same time:
+
+@example
+ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
+@end example
+
+Converts a.wav to MPEG audio at 22050Hz sample rate.
+
+* You can encode to several formats at the same time and define a
+mapping from input stream to output streams:
+
+@example
+ffmpeg -i /tmp/a.wav -ab 64 /tmp/a.mp2 -ab 128 /tmp/b.mp2 -map 0:0 -map 0:0
+@end example
+
+Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. '-map
+file:index' specifies which input stream is used for each output
+stream, in the order of the definition of output streams.
+
+* You can transcode decrypted VOBs
+
+@example
+ffmpeg -i snatch_1.vob -f avi -vcodec mpeg4 -b 800k -g 300 -bf 2 -acodec mp3 -ab 128 snatch.avi
+@end example
+
+This is a typical DVD ripping example; the input is a VOB file, the
+output an AVI file with MPEG-4 video and MP3 audio. Note that in this
+command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
+GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
+input video. Furthermore, the audio stream is MP3-encoded so you need
+to enable LAME support by passing @code{--enable-mp3lame} to configure.
+The mapping is particularly useful for DVD transcoding
+to get the desired audio language.
+
+NOTE: To see the supported input formats, use @code{ffmpeg -formats}.
+@c man end
+
+@chapter Invocation
+
+@section Syntax
+
+The generic syntax is:
+
+@example
+@c man begin SYNOPSIS
+ffmpeg [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{outfile}@}...
+@c man end
+@end example
+@c man begin DESCRIPTION
+If no input file is given, audio/video grabbing is done.
+
+As a general rule, options are applied to the next specified
+file. Therefore, order is important, and you can have the same
+option on the command line multiple times. Each occurrence is
+then applied to the next input or output file.
+
+* To set the video bitrate of the output file to 64kbit/s:
+@example
+ffmpeg -i input.avi -b 64k output.avi
+@end example
+
+* To force the frame rate of the input and output file to 24 fps:
+@example
+ffmpeg -r 24 -i input.avi output.avi
+@end example
+
+* To force the frame rate of the output file to 24 fps:
+@example
+ffmpeg -i input.avi -r 24 output.avi
+@end example
+
+* To force the frame rate of input file to 1 fps and the output file to 24 fps:
+@example
+ffmpeg -r 1 -i input.avi -r 24 output.avi
+@end example
+
+The format option may be needed for raw input files.
+
+By default, FFmpeg tries to convert as losslessly as possible: It
+uses the same audio and video parameters for the outputs as the one
+specified for the inputs.
+@c man end
+
+@c man begin OPTIONS
+@section Main options
+
+@table @option
+@item -L
+Show license.
+
+@item -h
+Show help.
+
+@item -version
+Show version.
+
+@item -formats
+Show available formats, codecs, protocols, ...
+
+@item -f fmt
+Force format.
+
+@item -i filename
+input filename
+
+@item -y
+Overwrite output files.
+
+@item -t duration
+Set the recording time in seconds.
+@code{hh:mm:ss[.xxx]} syntax is also supported.
+
+@item -fs limit_size
+Set the file size limit.
+
+@item -ss position
+Seek to given time position in seconds.
+@code{hh:mm:ss[.xxx]} syntax is also supported.
+
+@item -itsoffset offset
+Set the input time offset in seconds.
+@code{[-]hh:mm:ss[.xxx]} syntax is also supported.
+This option affects all the input files that follow it.
+The offset is added to the timestamps of the input files.
+Specifying a positive offset means that the corresponding
+streams are delayed by 'offset' seconds.
+
+@item -title string
+Set the title.
+
+@item -timestamp time
+Set the timestamp.
+
+@item -author string
+Set the author.
+
+@item -copyright string
+Set the copyright.
+
+@item -comment string
+Set the comment.
+
+@item -album string
+Set the album.
+
+@item -track number
+Set the track.
+
+@item -year number
+Set the year.
+
+@item -v verbose
+Control amount of logging.
+
+@item -target type
+Specify target file type ("vcd", "svcd", "dvd", "dv", "dv50", "pal-vcd",
+"ntsc-svcd", ... ). All the format options (bitrate, codecs,
+buffer sizes) are then set automatically. You can just type:
+
+@example
+ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
+@end example
+
+Nevertheless you can specify additional options as long as you know
+they do not conflict with the standard, as in:
+
+@example
+ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
+@end example
+
+@item -dframes number
+Set the number of data frames to record.
+
+@item -scodec codec
+Force subtitle codec ('copy' to copy stream).
+
+@item -newsubtitle
+Add a new subtitle stream to the current output stream.
+
+@item -slang code
+Set the ISO 639 language code (3 letters) of the current subtitle stream.
+
+@end table
+
+@section Video Options
+
+@table @option
+@item -b bitrate
+Set the video bitrate in bit/s (default = 200 kb/s).
+@item -vframes number
+Set the number of video frames to record.
+@item -r fps
+Set frame rate (Hz value, fraction or abbreviation), (default = 25).
+@item -s size
+Set frame size. The format is @samp{wxh} (default = 160x128).
+The following abbreviations are recognized:
+@table @samp
+@item sqcif
+128x96
+@item qcif
+176x144
+@item cif
+352x288
+@item 4cif
+704x576
+@end table
+
+@item -aspect aspect
+Set aspect ratio (4:3, 16:9 or 1.3333, 1.7777).
+@item -croptop size
+Set top crop band size (in pixels).
+@item -cropbottom size
+Set bottom crop band size (in pixels).
+@item -cropleft size
+Set left crop band size (in pixels).
+@item -cropright size
+Set right crop band size (in pixels).
+@item -padtop size
+Set top pad band size (in pixels).
+@item -padbottom size
+Set bottom pad band size (in pixels).
+@item -padleft size
+Set left pad band size (in pixels).
+@item -padright size
+Set right pad band size (in pixels).
+@item -padcolor (hex color)
+Set color of padded bands. The value for padcolor is expressed
+as a six digit hexadecimal number where the first two digits
+represent red, the middle two digits green and last two digits
+blue (default = 000000 (black)).
+@item -vn
+Disable video recording.
+@item -bt tolerance
+Set video bitrate tolerance (in bit/s).
+@item -maxrate bitrate
+Set max video bitrate tolerance (in bit/s).
+@item -minrate bitrate
+Set min video bitrate tolerance (in bit/s).
+@item -bufsize size
+Set rate control buffer size (in bits).
+@item -vcodec codec
+Force video codec to @var{codec}. Use the @code{copy} special value to
+tell that the raw codec data must be copied as is.
+@item -sameq
+Use same video quality as source (implies VBR).
+
+@item -pass n
+Select the pass number (1 or 2). It is useful to do two pass
+encoding. The statistics of the video are recorded in the first
+pass and the video is generated at the exact requested bitrate
+in the second pass.
+
+@item -passlogfile file
+Set two pass logfile name to @var{file}.
+
+@item -newvideo
+Add a new video stream to the current output stream.
+
+@end table
+
+@section Advanced Video Options
+
+@table @option
+@item -pix_fmt format
+Set pixel format.
+@item -g gop_size
+Set the group of pictures size.
+@item -intra
+Use only intra frames.
+@item -vdt n
+Discard threshold.
+@item -qscale q
+Use fixed video quantizer scale (VBR).
+@item -qmin q
+minimum video quantizer scale (VBR)
+@item -qmax q
+maximum video quantizer scale (VBR)
+@item -qdiff q
+maximum difference between the quantizer scales (VBR)
+@item -qblur blur
+video quantizer scale blur (VBR)
+@item -qcomp compression
+video quantizer scale compression (VBR)
+
+@item -lmin lambda
+minimum video lagrange factor (VBR)
+@item -lmax lambda
+max video lagrange factor (VBR)
+@item -mblmin lambda
+minimum macroblock quantizer scale (VBR)
+@item -mblmax lambda
+maximum macroblock quantizer scale (VBR)
+
+These four options (lmin, lmax, mblmin, mblmax) use 'lambda' units,
+but you may use the QP2LAMBDA constant to easily convert from 'q' units:
+@example
+ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
+@end example
+
+@item -rc_init_cplx complexity
+initial complexity for single pass encoding
+@item -b_qfactor factor
+qp factor between P- and B-frames
+@item -i_qfactor factor
+qp factor between P- and I-frames
+@item -b_qoffset offset
+qp offset between P- and B-frames
+@item -i_qoffset offset
+qp offset between P- and I-frames
+@item -rc_eq equation
+Set rate control equation (@pxref{FFmpeg formula
+evaluator}) (default = @code{tex^qComp}).
+@item -rc_override override
+rate control override for specific intervals
+@item -me method
+Set motion estimation method to @var{method}.
+Available methods are (from lowest to best quality):
+@table @samp
+@item zero
+Try just the (0, 0) vector.
+@item phods
+@item log
+@item x1
+@item epzs
+(default method)
+@item full
+exhaustive search (slow and marginally better than epzs)
+@end table
+
+@item -dct_algo algo
+Set DCT algorithm to @var{algo}. Available values are:
+@table @samp
+@item 0
+FF_DCT_AUTO (default)
+@item 1
+FF_DCT_FASTINT
+@item 2
+FF_DCT_INT
+@item 3
+FF_DCT_MMX
+@item 4
+FF_DCT_MLIB
+@item 5
+FF_DCT_ALTIVEC
+@end table
+
+@item -idct_algo algo
+Set IDCT algorithm to @var{algo}. Available values are:
+@table @samp
+@item 0
+FF_IDCT_AUTO (default)
+@item 1
+FF_IDCT_INT
+@item 2
+FF_IDCT_SIMPLE
+@item 3
+FF_IDCT_SIMPLEMMX
+@item 4
+FF_IDCT_LIBMPEG2MMX
+@item 5
+FF_IDCT_PS2
+@item 6
+FF_IDCT_MLIB
+@item 7
+FF_IDCT_ARM
+@item 8
+FF_IDCT_ALTIVEC
+@item 9
+FF_IDCT_SH4
+@item 10
+FF_IDCT_SIMPLEARM
+@end table
+
+@item -er n
+Set error resilience to @var{n}.
+@table @samp
+@item 1
+FF_ER_CAREFUL (default)
+@item 2
+FF_ER_COMPLIANT
+@item 3
+FF_ER_AGGRESSIVE
+@item 4
+FF_ER_VERY_AGGRESSIVE
+@end table
+
+@item -ec bit_mask
+Set error concealment to @var{bit_mask}. @var{bit_mask} is a bit mask of
+the following values:
+@table @samp
+@item 1
+FF_EC_GUESS_MVS (default = enabled)
+@item 2
+FF_EC_DEBLOCK (default = enabled)
+@end table
+
+@item -bf frames
+Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4).
+@item -mbd mode
+macroblock decision
+@table @samp
+@item 0
+FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in FFmpeg).
+@item 1
+FF_MB_DECISION_BITS: Choose the one which needs the fewest bits.
+@item 2
+FF_MB_DECISION_RD: rate distortion
+@end table
+
+@item -4mv
+Use four motion vector by macroblock (MPEG-4 only).
+@item -part
+Use data partitioning (MPEG-4 only).
+@item -bug param
+Work around encoder bugs that are not auto-detected.
+@item -strict strictness
+How strictly to follow the standards.
+@item -aic
+Enable Advanced intra coding (h263+).
+@item -umv
+Enable Unlimited Motion Vector (h263+)
+
+@item -deinterlace
+Deinterlace pictures.
+@item -ilme
+Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
+Use this option if your input file is interlaced and you want
+to keep the interlaced format for minimum losses.
+The alternative is to deinterlace the input stream with
+@option{-deinterlace}, but deinterlacing introduces losses.
+@item -psnr
+Calculate PSNR of compressed frames.
+@item -vstats
+Dump video coding statistics to @file{vstats_HHMMSS.log}.
+@item -vhook module
+Insert video processing @var{module}. @var{module} contains the module
+name and its parameters separated by spaces.
+@item -top n
+top=1/bottom=0/auto=-1 field first
+@item -dc precision
+Intra_dc_precision.
+@item -vtag fourcc/tag
+Force video tag/fourcc.
+@item -qphist
+Show QP histogram.
+@item -vbsf bitstream filter
+Bitstream filters available are "dump_extra", "remove_extra", "noise".
+@end table
+
+@section Audio Options
+
+@table @option
+@item -aframes number
+Set the number of audio frames to record.
+@item -ar freq
+Set the audio sampling frequency (default = 44100 Hz).
+@item -ab bitrate
+Set the audio bitrate in kbit/s (default = 64).
+@item -ac channels
+Set the number of audio channels (default = 1).
+@item -an
+Disable audio recording.
+@item -acodec codec
+Force audio codec to @var{codec}. Use the @code{copy} special value to
+specify that the raw codec data must be copied as is.
+@item -newaudio
+Add a new audio track to the output file. If you want to specify parameters,
+do so before @code{-newaudio} (@code{-acodec}, @code{-ab}, etc..).
+
+Mapping will be done automatically, if the number of output streams is equal to
+the number of input streams, else it will pick the first one that matches. You
+can override the mapping using @code{-map} as usual.
+
+Example:
+@example
+ffmpeg -i file.mpg -vcodec copy -acodec ac3 -ab 384 test.mpg -acodec mp2 -ab 192 -newaudio
+@end example
+@item -alang code
+Set the ISO 639 language code (3 letters) of the current audio stream.
+@end table
+
+@section Advanced Audio options:
+
+@table @option
+@item -atag fourcc/tag
+Force audio tag/fourcc.
+@item -absf bitstream filter
+Bitstream filters available are "dump_extra", "remove_extra", "noise", "mp3comp", "mp3decomp".
+@end table
+
+@section Subtitle options:
+
+@table @option
+@item -scodec codec
+Force subtitle codec ('copy' to copy stream).
+@item -newsubtitle
+Add a new subtitle stream to the current output stream.
+@item -slang code
+Set the ISO 639 language code (3 letters) of the current subtitle stream.
+@end table
+
+@section Audio/Video grab options
+
+@table @option
+@item -vd device
+sEt video grab device (e.g. @file{/dev/video0}).
+@item -vc channel
+Set video grab channel (DV1394 only).
+@item -tvstd standard
+Set television standard (NTSC, PAL (SECAM)).
+@item -dv1394
+Set DV1394 grab.
+@item -ad device
+Set audio device (e.g. @file{/dev/dsp}).
+@item -grab format
+Request grabbing using.
+@item -gd device
+Set grab device.
+@end table
+
+@section Advanced options
+
+@table @option
+@item -map input stream id[:input stream id]
+Set stream mapping from input streams to output streams.
+Just enumerate the input streams in the order you want them in the output.
+[input stream id] sets the (input) stream to sync against.
+@item -map_meta_data outfile:infile
+Set meta data information of outfile from infile.
+@item -debug
+Print specific debug info.
+@item -benchmark
+Add timings for benchmarking.
+@item -dump
+Dump each input packet.
+@item -hex
+When dumping packets, also dump the payload.
+@item -bitexact
+Only use bit exact algorithms (for codec testing).
+@item -ps size
+Set packet size in bits.
+@item -re
+Read input at native frame rate. Mainly used to simulate a grab device.
+@item -loop_input
+Loop over the input stream. Currently it works only for image
+streams. This option is used for automatic FFserver testing.
+@item -loop_output number_of_times
+Repeatedly loop output for formats that support looping such as animated GIF
+(0 will loop the output infinitely).
+@item -threads count
+Thread count.
+@item -vsync parameter
+Video sync method. Video will be stretched/squeezed to match the timestamps,
+it is done by duplicating and dropping frames. With -map you can select from
+which stream the timestamps should be taken. You can leave either video or
+audio unchanged and sync the remaining stream(s) to the unchanged one.
+@item -async samples_per_second
+Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
+the parameter is the maximum samples per second by which the audio is changed.
+-async 1 is a special case where only the start of the audio stream is corrected
+without any later correction.
+@end table
+
+@node FFmpeg formula evaluator
+@section FFmpeg formula evaluator
+
+When evaluating a rate control string, FFmpeg uses an internal formula
+evaluator.
+
+The following binary operators are available: @code{+}, @code{-},
+@code{*}, @code{/}, @code{^}.
+
+The following unary operators are available: @code{+}, @code{-},
+@code{(...)}.
+
+The following functions are available:
+@table @var
+@item sinh(x)
+@item cosh(x)
+@item tanh(x)
+@item sin(x)
+@item cos(x)
+@item tan(x)
+@item exp(x)
+@item log(x)
+@item squish(x)
+@item gauss(x)
+@item abs(x)
+@item max(x, y)
+@item min(x, y)
+@item gt(x, y)
+@item lt(x, y)
+@item eq(x, y)
+@item bits2qp(bits)
+@item qp2bits(qp)
+@end table
+
+The following constants are available:
+@table @var
+@item PI
+@item E
+@item iTex
+@item pTex
+@item tex
+@item mv
+@item fCode
+@item iCount
+@item mcVar
+@item var
+@item isI
+@item isP
+@item isB
+@item avgQP
+@item qComp
+@item avgIITex
+@item avgPITex
+@item avgPPTex
+@item avgBPTex
+@item avgTex
+@end table
+
+@c man end
+
+@ignore
+
+@setfilename ffmpeg
+@settitle FFmpeg video converter
+
+@c man begin SEEALSO
+ffserver(1), ffplay(1) and the HTML documentation of @file{ffmpeg}.
+@c man end
+
+@c man begin AUTHOR
+Fabrice Bellard
+@c man end
+
+@end ignore
+
+@section Protocols
+
+The filename can be @file{-} to read from standard input or to write
+to standard output.
+
+FFmpeg also handles many protocols specified with an URL syntax.
+
+Use 'ffmpeg -formats' to see a list of the supported protocols.
+
+The protocol @code{http:} is currently used only to communicate with
+FFserver (see the FFserver documentation). When FFmpeg will be a
+video player it will also be used for streaming :-)
+
+@chapter Tips
+
+@itemize
+@item For streaming at very low bitrate application, use a low frame rate
+and a small GOP size. This is especially true for RealVideo where
+the Linux player does not seem to be very fast, so it can miss
+frames. An example is:
+
+@example
+ffmpeg -g 3 -r 3 -t 10 -b 50k -s qcif -f rv10 /tmp/b.rm
+@end example
+
+@item The parameter 'q' which is displayed while encoding is the current
+quantizer. The value 1 indicates that a very good quality could
+be achieved. The value 31 indicates the worst quality. If q=31 appears
+too often, it means that the encoder cannot compress enough to meet
+your bitrate. You must either increase the bitrate, decrease the
+frame rate or decrease the frame size.
+
+@item If your computer is not fast enough, you can speed up the
+compression at the expense of the compression ratio. You can use
+'-me zero' to speed up motion estimation, and '-intra' to disable
+motion estimation completely (you have only I-frames, which means it
+is about as good as JPEG compression).
+
+@item To have very low audio bitrates, reduce the sampling frequency
+(down to 22050 kHz for MPEG audio, 22050 or 11025 for AC3).
+
+@item To have a constant quality (but a variable bitrate), use the option
+'-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst
+quality).
+
+@item When converting video files, you can use the '-sameq' option which
+uses the same quality factor in the encoder as in the decoder.
+It allows almost lossless encoding.
+
+@end itemize
+
+
+@chapter external libraries
+
+FFmpeg can be hooked up with a number of external libraries to add support
+for more formats.
+
+@section AMR
+
+AMR comes in two different flavors, WB and NB. FFmpeg can make use of the
+AMR WB (floating-point mode) and the AMR NB (both floating-point and
+fixed-point mode) reference decoders and encoders.
+
+@itemize
+
+@item For AMR WB floating-point download TS26.204 V5.1.0 from
+@url{http://www.3gpp.org/ftp/Specs/archive/26_series/26.204/26204-510.zip}
+and extract the source to @file{libavcodec/amrwb_float/}.
+
+@item For AMR NB floating-point download TS26.104 REL-5 V5.1.0 from
+@url{http://www.3gpp.org/ftp/Specs/archive/26_series/26.104/26104-510.zip}
+and extract the source to @file{libavcodec/amr_float/}.
+If you try this on Alpha, you may need to change @code{Word32} to
+@code{int} in @file{amr/typedef.h}.
+
+@item For AMR NB fixed-point download TS26.073 REL-5 V5.1.0 from
+@url{http://www.3gpp.org/ftp/Specs/archive/26_series/26.073/26073-510.zip}
+and extract the source to @file{libavcodec/amr}.
+You must also add @code{-DMMS_IO} and remove @code{-pedantic-errors}
+to/from @code{CFLAGS} in @file{libavcodec/amr/makefile}, i.e.
+``@code{CFLAGS = -Wall -I. \$(CFLAGS_\$(MODE)) -D\$(VAD) -DMMS_IO}''.
+
+@end itemize
+
+
+@chapter Supported File Formats and Codecs
+
+You can use the @code{-formats} option to have an exhaustive list.
+
+@section File Formats
+
+FFmpeg supports the following file formats through the @code{libavformat}
+library:
+
+@multitable @columnfractions .4 .1 .1 .4
+@item Supported File Format @tab Encoding @tab Decoding @tab Comments
+@item MPEG audio @tab X @tab X
+@item MPEG-1 systems @tab X @tab X
+@tab muxed audio and video
+@item MPEG-2 PS @tab X @tab X
+@tab also known as @code{VOB} file
+@item MPEG-2 TS @tab @tab X
+@tab also known as DVB Transport Stream
+@item ASF@tab X @tab X
+@item AVI@tab X @tab X
+@item WAV@tab X @tab X
+@item Macromedia Flash@tab X @tab X
+@tab Only embedded audio is decoded.
+@item FLV @tab X @tab X
+@tab Macromedia Flash video files
+@item Real Audio and Video @tab X @tab X
+@item Raw AC3 @tab X @tab X
+@item Raw MJPEG @tab X @tab X
+@item Raw MPEG video @tab X @tab X
+@item Raw PCM8/16 bits, mulaw/Alaw@tab X @tab X
+@item Raw CRI ADX audio @tab X @tab X
+@item Raw Shorten audio @tab @tab X
+@item SUN AU format @tab X @tab X
+@item NUT @tab X @tab X @tab NUT Open Container Format
+@item QuickTime @tab X @tab X
+@item MPEG-4 @tab X @tab X
+@tab MPEG-4 is a variant of QuickTime.
+@item Raw MPEG4 video @tab X @tab X
+@item DV @tab X @tab X
+@item 4xm @tab @tab X
+@tab 4X Technologies format, used in some games.
+@item Playstation STR @tab @tab X
+@item Id RoQ @tab @tab X
+@tab Used in Quake III, Jedi Knight 2, other computer games.
+@item Interplay MVE @tab @tab X
+@tab Format used in various Interplay computer games.
+@item WC3 Movie @tab @tab X
+@tab Multimedia format used in Origin's Wing Commander III computer game.
+@item Sega FILM/CPK @tab @tab X
+@tab Used in many Sega Saturn console games.
+@item Westwood Studios VQA/AUD @tab @tab X
+@tab Multimedia formats used in Westwood Studios games.
+@item Id Cinematic (.cin) @tab @tab X
+@tab Used in Quake II.
+@item FLIC format @tab @tab X
+@tab .fli/.flc files
+@item Sierra VMD @tab @tab X
+@tab Used in Sierra CD-ROM games.
+@item Sierra Online @tab @tab X
+@tab .sol files used in Sierra Online games.
+@item Matroska @tab @tab X
+@item Electronic Arts Multimedia @tab @tab X
+@tab Used in various EA games; files have extensions like WVE and UV2.
+@item Nullsoft Video (NSV) format @tab @tab X
+@item ADTS AAC audio @tab X @tab X
+@item Creative VOC @tab X @tab X @tab Created for the Sound Blaster Pro.
+@item American Laser Games MM @tab @tab X
+@tab Multimedia format used in games like Mad Dog McCree
+@item AVS @tab @tab X
+@tab Multimedia format used by the Creature Shock game.
+@item Smacker @tab @tab X
+@tab Multimedia format used by many games.
+@item GXF @tab X @tab X
+@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley playout servers.
+@item CIN @tab @tab X
+@tab Multimedia format used by Delphine Software games.
+@item MXF @tab @tab X
+@tab Material eXchange Format SMPTE 377M, used by D-Cinema, broadcast industry.
+@item SEQ @tab @tab X
+@tab Tiertex .seq files used in the DOS CDROM version of the game Flashback.
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@section Image Formats
+
+FFmpeg can read and write images for each frame of a video sequence. The
+following image formats are supported:
+
+@multitable @columnfractions .4 .1 .1 .4
+@item Supported Image Format @tab Encoding @tab Decoding @tab Comments
+@item PGM, PPM @tab X @tab X
+@item PAM @tab X @tab X @tab PAM is a PNM extension with alpha support.
+@item PGMYUV @tab X @tab X @tab PGM with U and V components in YUV 4:2:0
+@item JPEG @tab X @tab X @tab Progressive JPEG is not supported.
+@item .Y.U.V @tab X @tab X @tab one raw file per component
+@item animated GIF @tab X @tab X @tab Only uncompressed GIFs are generated.
+@item PNG @tab X @tab X @tab 2 bit and 4 bit/pixel not supported yet.
+@item Targa @tab @tab X @tab Targa (.TGA) image format.
+@item TIFF @tab @tab X @tab Only 24 bit/pixel images are supported.
+@item SGI @tab X @tab X @tab SGI RGB image format
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@section Video Codecs
+
+@multitable @columnfractions .4 .1 .1 .4
+@item Supported Codec @tab Encoding @tab Decoding @tab Comments
+@item MPEG-1 video @tab X @tab X
+@item MPEG-2 video @tab X @tab X
+@item MPEG-4 @tab X @tab X
+@item MSMPEG4 V1 @tab X @tab X
+@item MSMPEG4 V2 @tab X @tab X
+@item MSMPEG4 V3 @tab X @tab X
+@item WMV7 @tab X @tab X
+@item WMV8 @tab X @tab X @tab not completely working
+@item WMV9 @tab @tab X @tab not completely working
+@item VC1 @tab @tab X
+@item H.261 @tab X @tab X
+@item H.263(+) @tab X @tab X @tab also known as RealVideo 1.0
+@item H.264 @tab @tab X
+@item RealVideo 1.0 @tab X @tab X
+@item RealVideo 2.0 @tab X @tab X
+@item MJPEG @tab X @tab X
+@item lossless MJPEG @tab X @tab X
+@item JPEG-LS @tab X @tab X @tab fourcc: MJLS, lossless and near-lossless is supported
+@item Apple MJPEG-B @tab @tab X
+@item Sunplus MJPEG @tab @tab X @tab fourcc: SP5X
+@item DV @tab X @tab X
+@item HuffYUV @tab X @tab X
+@item FFmpeg Video 1 @tab X @tab X @tab experimental lossless codec (fourcc: FFV1)
+@item FFmpeg Snow @tab X @tab X @tab experimental wavelet codec (fourcc: SNOW)
+@item Asus v1 @tab X @tab X @tab fourcc: ASV1
+@item Asus v2 @tab X @tab X @tab fourcc: ASV2
+@item Creative YUV @tab @tab X @tab fourcc: CYUV
+@item Sorenson Video 1 @tab X @tab X @tab fourcc: SVQ1
+@item Sorenson Video 3 @tab @tab X @tab fourcc: SVQ3
+@item On2 VP3 @tab @tab X @tab still experimental
+@item On2 VP5 @tab @tab X @tab fourcc: VP50
+@item On2 VP6 @tab @tab X @tab fourcc: VP62
+@item Theora @tab @tab X @tab still experimental
+@item Intel Indeo 3 @tab @tab X
+@item FLV @tab X @tab X @tab Sorenson H.263 used in Flash
+@item Flash Screen Video @tab @tab X @tab fourcc: FSV1
+@item ATI VCR1 @tab @tab X @tab fourcc: VCR1
+@item ATI VCR2 @tab @tab X @tab fourcc: VCR2
+@item Cirrus Logic AccuPak @tab @tab X @tab fourcc: CLJR
+@item 4X Video @tab @tab X @tab Used in certain computer games.
+@item Sony Playstation MDEC @tab @tab X
+@item Id RoQ @tab @tab X @tab Used in Quake III, Jedi Knight 2, other computer games.
+@item Xan/WC3 @tab @tab X @tab Used in Wing Commander III .MVE files.
+@item Interplay Video @tab @tab X @tab Used in Interplay .MVE files.
+@item Apple Animation @tab @tab X @tab fourcc: 'rle '
+@item Apple Graphics @tab @tab X @tab fourcc: 'smc '
+@item Apple Video @tab @tab X @tab fourcc: rpza
+@item Apple QuickDraw @tab @tab X @tab fourcc: qdrw
+@item Cinepak @tab @tab X
+@item Microsoft RLE @tab @tab X
+@item Microsoft Video-1 @tab @tab X
+@item Westwood VQA @tab @tab X
+@item Id Cinematic Video @tab @tab X @tab Used in Quake II.
+@item Planar RGB @tab @tab X @tab fourcc: 8BPS
+@item FLIC video @tab @tab X
+@item Duck TrueMotion v1 @tab @tab X @tab fourcc: DUCK
+@item Duck TrueMotion v2 @tab @tab X @tab fourcc: TM20
+@item VMD Video @tab @tab X @tab Used in Sierra VMD files.
+@item MSZH @tab @tab X @tab Part of LCL
+@item ZLIB @tab X @tab X @tab Part of LCL, encoder experimental
+@item TechSmith Camtasia @tab @tab X @tab fourcc: TSCC
+@item IBM Ultimotion @tab @tab X @tab fourcc: ULTI
+@item Miro VideoXL @tab @tab X @tab fourcc: VIXL
+@item QPEG @tab @tab X @tab fourccs: QPEG, Q1.0, Q1.1
+@item LOCO @tab @tab X @tab
+@item Winnov WNV1 @tab @tab X @tab
+@item Autodesk Animator Studio Codec @tab @tab X @tab fourcc: AASC
+@item Fraps FPS1 @tab @tab X @tab
+@item CamStudio @tab @tab X @tab fourcc: CSCD
+@item American Laser Games Video @tab @tab X @tab Used in games like Mad Dog McCree
+@item ZMBV @tab @tab X @tab
+@item AVS Video @tab @tab X @tab Video encoding used by the Creature Shock game.
+@item Smacker Video @tab @tab X @tab Video encoding used in Smacker.
+@item RTjpeg @tab @tab X @tab Video encoding used in NuppelVideo files.
+@item KMVC @tab @tab X @tab Codec used in Worms games.
+@item VMware Video @tab @tab X @tab Codec used in videos captured by VMware.
+@item Cin Video @tab @tab X @tab Codec used in Delphine Software games.
+@item Tiertex Seq Video @tab @tab X @tab Codec used in DOS CDROM FlashBack game.
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@section Audio Codecs
+
+@multitable @columnfractions .4 .1 .1 .1 .7
+@item Supported Codec @tab Encoding @tab Decoding @tab Comments
+@item MPEG audio layer 2 @tab IX @tab IX
+@item MPEG audio layer 1/3 @tab IX @tab IX
+@tab MP3 encoding is supported through the external library LAME.
+@item AC3 @tab IX @tab IX
+@tab liba52 is used internally for decoding.
+@item Vorbis @tab X @tab X
+@item WMA V1/V2 @tab @tab X
+@item AAC @tab X @tab X
+@tab Supported through the external library libfaac/libfaad.
+@item Microsoft ADPCM @tab X @tab X
+@item MS IMA ADPCM @tab X @tab X
+@item QT IMA ADPCM @tab @tab X
+@item 4X IMA ADPCM @tab @tab X
+@item G.726 ADPCM @tab X @tab X
+@item Duck DK3 IMA ADPCM @tab @tab X
+@tab Used in some Sega Saturn console games.
+@item Duck DK4 IMA ADPCM @tab @tab X
+@tab Used in some Sega Saturn console games.
+@item Westwood Studios IMA ADPCM @tab @tab X
+@tab Used in Westwood Studios games like Command and Conquer.
+@item SMJPEG IMA ADPCM @tab @tab X
+@tab Used in certain Loki game ports.
+@item CD-ROM XA ADPCM @tab @tab X
+@item CRI ADX ADPCM @tab X @tab X
+@tab Used in Sega Dreamcast games.
+@item Electronic Arts ADPCM @tab @tab X
+@tab Used in various EA titles.
+@item Creative ADPCM @tab @tab X
+@tab 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
+@item RA144 @tab @tab X
+@tab Real 14400 bit/s codec
+@item RA288 @tab @tab X
+@tab Real 28800 bit/s codec
+@item RADnet @tab X @tab IX
+@tab Real low bitrate AC3 codec, liba52 is used for decoding.
+@item AMR-NB @tab X @tab X
+@tab Supported through an external library.
+@item AMR-WB @tab X @tab X
+@tab Supported through an external library.
+@item DV audio @tab @tab X
+@item Id RoQ DPCM @tab @tab X
+@tab Used in Quake III, Jedi Knight 2, other computer games.
+@item Interplay MVE DPCM @tab @tab X
+@tab Used in various Interplay computer games.
+@item Xan DPCM @tab @tab X
+@tab Used in Origin's Wing Commander IV AVI files.
+@item Sierra Online DPCM @tab @tab X
+@tab Used in Sierra Online game audio files.
+@item Apple MACE 3 @tab @tab X
+@item Apple MACE 6 @tab @tab X
+@item FLAC lossless audio @tab @tab X
+@item Shorten lossless audio @tab @tab X
+@item Apple lossless audio @tab @tab X
+@tab QuickTime fourcc 'alac'
+@item FFmpeg Sonic @tab X @tab X
+@tab experimental lossy/lossless codec
+@item Qdesign QDM2 @tab @tab X
+@tab there are still some distortions
+@item Real COOK @tab @tab X
+@tab All versions except 5.1 are supported
+@item DSP Group TrueSpeech @tab @tab X
+@item True Audio (TTA) @tab @tab X
+@item Smacker Audio @tab @tab X
+@item WavPack Audio @tab @tab X
+@item Cin Audio @tab @tab X
+@tab Codec used in Delphine Software games.
+@item Intel Music Coder @tab @tab X
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@code{I} means that an integer-only version is available, too (ensures high
+performance on systems without hardware floating point support).
+
+@chapter Platform Specific information
+
+@section Linux
+
+FFmpeg should be compiled with at least GCC 2.95.3. GCC 3.2 is the
+preferred compiler now for FFmpeg. All future optimizations will depend on
+features only found in GCC 3.2.
+
+@section BSD
+
+BSD make will not build FFmpeg, you need to install and use GNU Make
+(@file{gmake}).
+
+@section Windows
+
+@subsection Native Windows compilation
+
+@itemize
+@item Install the current versions of MSYS and MinGW from
+@url{http://www.mingw.org/}. You can find detailed installation
+instructions in the download section and the FAQ.
+
+@item If you want to test the FFplay, also download
+the MinGW development library of SDL 1.2.x
+(@file{SDL-devel-1.2.x-mingw32.tar.gz}) from
+@url{http://www.libsdl.org}. Unpack it in a temporary directory, and
+unpack the archive @file{i386-mingw32msvc.tar.gz} in the MinGW tool
+directory. Edit the @file{sdl-config} script so that it gives the
+correct SDL directory when invoked.
+
+@item Extract the current version of FFmpeg.
+
+@item Start the MSYS shell (file @file{msys.bat}).
+
+@item Change to the FFmpeg directory and follow
+ the instructions of how to compile FFmpeg (file
+@file{INSTALL}). Usually, launching @file{./configure} and @file{make}
+suffices. If you have problems using SDL, verify that
+@file{sdl-config} can be launched from the MSYS command line.
+
+@item You can install FFmpeg in @file{Program Files/FFmpeg} by typing
+@file{make install}. Don't forget to copy @file{SDL.dll} to the place
+you launch @file{ffplay} from.
+
+@end itemize
+
+Notes:
+@itemize
+
+@item The target @file{make wininstaller} can be used to create a
+Nullsoft based Windows installer for FFmpeg and FFplay. @file{SDL.dll}
+must be copied to the FFmpeg directory in order to build the
+installer.
+
+@item By using @code{./configure --enable-shared} when configuring FFmpeg,
+you can build @file{avcodec.dll} and @file{avformat.dll}. With
+@code{make install} you install the FFmpeg DLLs and the associated
+headers in @file{Program Files/FFmpeg}.
+
+@item Visual C++ compatibility: If you used @code{./configure --enable-shared}
+when configuring FFmpeg, FFmpeg tries to use the Microsoft Visual
+C++ @code{lib} tool to build @code{avcodec.lib} and
+@code{avformat.lib}. With these libraries you can link your Visual C++
+code directly with the FFmpeg DLLs (see below).
+
+@end itemize
+
+@subsection Visual C++ compatibility
+
+FFmpeg will not compile under Visual C++ -- and it has too many
+dependencies on the GCC compiler to make a port viable. However,
+if you want to use the FFmpeg libraries in your own applications,
+you can still compile those applications using Visual C++. An
+important restriction to this is that you have to use the
+dynamically linked versions of the FFmpeg libraries (i.e. the
+DLLs), and you have to make sure that Visual-C++-compatible
+import libraries are created during the FFmpeg build process.
+
+This description of how to use the FFmpeg libraries with Visual C++ is
+based on Visual C++ 2005 Express Edition Beta 2. If you have a different
+version, you might have to modify the procedures slightly.
+
+Here are the step-by-step instructions for building the FFmpeg libraries
+so they can be used with Visual C++:
+
+@enumerate
+
+@item Install Visual C++ (if you haven't done so already).
+
+@item Install MinGW and MSYS as described above.
+
+@item Add a call to @file{vcvars32.bat} (which sets up the environment
+variables for the Visual C++ tools) as the first line of
+@file{msys.bat}. The standard location for @file{vcvars32.bat} is
+@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat},
+and the standard location for @file{msys.bat} is
+@file{C:\msys\1.0\msys.bat}. If this corresponds to your setup, add the
+following line as the first line of @file{msys.bat}:
+
+@code{call "C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat"}
+
+@item Start the MSYS shell (file @file{msys.bat}) and type @code{link.exe}.
+If you get a help message with the command line options of @code{link.exe},
+this means your environment variables are set up correctly, the
+Microsoft linker is on the path and will be used by FFmpeg to
+create Visual-C++-compatible import libraries.
+
+@item Extract the current version of FFmpeg and change to the FFmpeg directory.
+
+@item Type the command
+@code{./configure --enable-shared --disable-static --enable-memalign-hack}
+to configure and, if that didn't produce any errors,
+type @code{make} to build FFmpeg.
+
+@item The subdirectories @file{libavformat}, @file{libavcodec}, and
+@file{libavutil} should now contain the files @file{avformat.dll},
+@file{avformat.lib}, @file{avcodec.dll}, @file{avcodec.lib},
+@file{avutil.dll}, and @file{avutil.lib}, respectively. Copy the three
+DLLs to your System32 directory (typically @file{C:\Windows\System32}).
+
+@end enumerate
+
+And here is how to use these libraries with Visual C++:
+
+@enumerate
+
+@item Create a new console application ("File / New / Project") and then
+select "Win32 Console Application". On the appropriate page of the
+Application Wizard, uncheck the "Precompiled headers" option.
+
+@item Write the source code for your application, or, for testing, just
+copy the code from an existing sample application into the source file
+that Visual C++ has already created for you. (Note that your source
+filehas to have a @code{.cpp} extension; otherwise, Visual C++ won't
+compile the FFmpeg headers correctly because in C mode, it doesn't
+recognize the @code{inline} keyword.) For example, you can copy
+@file{output_example.c} from the FFmpeg distribution (but you will
+have to make minor modifications so the code will compile under
+C++, see below).
+
+@item Open the "Project / Properties" dialog box. In the "Configuration"
+combo box, select "All Configurations" so that the changes you make will
+affect both debug and release builds. In the tree view on the left hand
+side, select "C/C++ / General", then edit the "Additional Include
+Directories" setting to contain the complete paths to the
+@file{libavformat}, @file{libavcodec}, and @file{libavutil}
+subdirectories of your FFmpeg directory. Note that the directories have
+to be separated using semicolons. Now select "Linker / General" from the
+tree view and edit the "Additional Library Directories" setting to
+contain the same three directories.
+
+@item Still in the "Project / Properties" dialog box, select "Linker / Input"
+from the tree view, then add the files @file{avformat.lib},
+@file{avcodec.lib}, and @file{avutil.lib} to the end of the "Additional
+Dependencies". Note that the names of the libraries have to be separated
+using spaces.
+
+@item Now, select "C/C++ / Code Generation" from the tree view. Select
+"Debug" in the "Configuration" combo box. Make sure that "Runtime
+Library" is set to "Multi-threaded Debug DLL". Then, select "Release" in
+the "Configuration" combo box and make sure that "Runtime Library" is
+set to "Multi-threaded DLL".
+
+@item Click "OK" to close the "Project / Properties" dialog box and build
+the application. Hopefully, it should compile and run cleanly. If you
+used @file{output_example.c} as your sample application, you will get a
+few compiler errors, but they are easy to fix. The first type of error
+occurs because Visual C++ doesn't allow an @code{int} to be converted to
+an @code{enum} without a cast. To solve the problem, insert the required
+casts (this error occurs once for a @code{CodecID} and once for a
+@code{CodecType}). The second type of error occurs because C++ requires
+the return value of @code{malloc} to be cast to the exact type of the
+pointer it is being assigned to. Visual C++ will complain that, for
+example, @code{(void *)} is being assigned to @code{(uint8_t *)} without
+an explicit cast. So insert an explicit cast in these places to silence
+the compiler. The third type of error occurs because the @code{snprintf}
+library function is called @code{_snprintf} under Visual C++. So just
+add an underscore to fix the problem. With these changes,
+@file{output_example.c} should compile under Visual C++, and the
+resulting executable should produce valid video files.
+
+@end enumerate
+
+@subsection Cross compilation for Windows with Linux
+
+You must use the MinGW cross compilation tools available at
+@url{http://www.mingw.org/}.
+
+Then configure FFmpeg with the following options:
+@example
+./configure --enable-mingw32 --cross-prefix=i386-mingw32msvc-
+@end example
+(you can change the cross-prefix according to the prefix chosen for the
+MinGW tools).
+
+Then you can easily test FFmpeg with Wine
+(@url{http://www.winehq.com/}).
+
+@subsection Compilation under Cygwin
+
+Cygwin works very much like Unix.
+
+Just install your Cygwin with all the "Base" packages, plus the
+following "Devel" ones:
+@example
+binutils, gcc-core, make, subversion
+@end example
+
+Do not install binutils-20060709-1 (they are buggy on shared builds);
+use binutils-20050610-1 instead.
+
+Then run
+
+@example
+./configure --enable-static --disable-shared
+@end example
+
+to make a static build or
+
+@example
+./configure --enable-shared --disable-static
+@end example
+
+to build shared libraries.
+
+If you want to build FFmpeg with additional libraries, download Cygwin
+"Devel" packages for Ogg and Vorbis from any Cygwin packages repository
+and/or SDL, xvid, faac, faad2 packages from Cygwin Ports,
+(@url{http://cygwinports.dotsrc.org/}).
+
+@subsection Crosscompilation for Windows under Cygwin
+
+With Cygwin you can create Windows binaries that don't need the cygwin1.dll.
+
+Just install your Cygwin as explained before, plus these additional
+"Devel" packages:
+@example
+gcc-mingw-core, mingw-runtime, mingw-zlib
+@end example
+
+and add some special flags to your configure invocation.
+
+For a static build run
+@example
+./configure --enable-mingw32 --enable-memalign-hack --enable-static --disable-shared --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+@end example
+
+and for a build with shared libraries
+@example
+./configure --enable-mingw32 --enable-memalign-hack --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+@end example
+
+@section BeOS
+
+The configure script should guess the configuration itself.
+Networking support is currently not finished.
+errno issues fixed by Andrew Bachmann.
+
+Old stuff:
+
+François Revol - revol at free dot fr - April 2002
+
+The configure script should guess the configuration itself,
+however I still didn't test building on the net_server version of BeOS.
+
+FFserver is broken (needs poll() implementation).
+
+There are still issues with errno codes, which are negative in BeOS, and
+that FFmpeg negates when returning. This ends up turning errors into
+valid results, then crashes.
+(To be fixed)
+
+@chapter Developers Guide
+
+@section API
+@itemize @bullet
+@item libavcodec is the library containing the codecs (both encoding and
+decoding). Look at @file{libavcodec/apiexample.c} to see how to use it.
+
+@item libavformat is the library containing the file format handling (mux and
+demux code for several formats). Look at @file{ffplay.c} to use it in a
+player. See @file{output_example.c} to use it to generate audio or video
+streams.
+
+@end itemize
+
+@section Integrating libavcodec or libavformat in your program
+
+You can integrate all the source code of the libraries to link them
+statically to avoid any version problem. All you need is to provide a
+'config.mak' and a 'config.h' in the parent directory. See the defines
+generated by ./configure to understand what is needed.
+
+You can use libavcodec or libavformat in your commercial program, but
+@emph{any patch you make must be published}. The best way to proceed is
+to send your patches to the FFmpeg mailing list.
+
+@node Coding Rules
+@section Coding Rules
+
+FFmpeg is programmed in the ISO C90 language with a few additional
+features from ISO C99, namely:
+@itemize @bullet
+@item
+the @samp{inline} keyword;
+@item
+@samp{//} comments;
+@item
+designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
+@item
+compound literals (@samp{x = (struct s) @{ 17, 23 @};})
+@end itemize
+
+These features are supported by all compilers we care about, so we won't
+accept patches to remove their use unless they absolutely don't impair
+clarity and performance.
+
+All code must compile with GCC 2.95 and GCC 3.3. Currently, FFmpeg also
+compiles with several other compilers, such as the Compaq ccc compiler
+or Sun Studio 9, and we would like to keep it that way unless it would
+be exceedingly involved. To ensure compatibility, please don't use any
+additional C99 features or GCC extensions. Especially watch out for:
+@itemize @bullet
+@item
+mixing statements and declarations;
+@item
+@samp{long long} (use @samp{int64_t} instead);
+@item
+@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
+@item
+GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
+@end itemize
+
+Indent size is 4.
+The presentation is the one specified by 'indent -i4 -kr -nut'.
+The TAB character is forbidden outside of Makefiles as is any
+form of trailing whitespace. Commits containing either will be
+rejected by the Subversion repository.
+
+Main priority in FFmpeg is simplicity and small code size (=less
+bugs).
+
+Comments: Use the JavaDoc/Doxygen
+format (see examples below) so that code documentation
+can be generated automatically. All nontrivial functions should have a comment
+above them explaining what the function does, even if it's just one sentence.
+All structures and their member variables should be documented, too.
+@example
+/**
+ * @@file mpeg.c
+ * MPEG codec.
+ * @@author ...
+ */
+
+/**
+ * Summary sentence.
+ * more text ...
+ * ...
+ */
+typedef struct Foobar@{
+ int var1; /**< var1 description */
+ int var2; ///< var2 description
+ /** var3 description */
+ int var3;
+@} Foobar;
+
+/**
+ * Summary sentence.
+ * more text ...
+ * ...
+ * @@param my_parameter description of my_parameter
+ * @@return return value description
+ */
+int myfunc(int my_parameter)
+...
+@end example
+
+fprintf and printf are forbidden in libavformat and libavcodec,
+please use av_log() instead.
+
+@section Development Policy
+
+@enumerate
+@item
+ You must not commit code which breaks FFmpeg! (Meaning unfinished but
+ enabled code which breaks compilation or compiles but does not work or
+ breaks the regression tests)
+ You can commit unfinished stuff (for testing etc), but it must be disabled
+ (#ifdef etc) by default so it does not interfere with other developers'
+ work.
+@item
+ You don't have to over-test things. If it works for you, and you think it
+ should work for others, then commit. If your code has problems
+ (portability, triggers compiler bugs, unusual environment etc) they will be
+ reported and eventually fixed.
+@item
+ Do not commit unrelated changes together, split them into self-contained
+ pieces.
+@item
+ Do not change behavior of the program (renaming options etc) without
+ first discussing it on the ffmpeg-devel mailing list. Do not remove
+ functionality from the code. Just improve!
+
+ Note: Redundant code can be removed.
+@item
+ Do not commit changes to the build system (Makefiles, configure script)
+ which change behavior, defaults etc, without asking first. The same
+ applies to compiler warning fixes, trivial looking fixes and to code
+ maintained by other developers. We usually have a reason for doing things
+ the way we do. Send your changes as patches to the ffmpeg-devel mailing
+ list, and if the code maintainers say OK, you may commit. This does not
+ apply to files you wrote and/or maintain.
+@item
+ We refuse source indentation and other cosmetic changes if they are mixed
+ with functional changes, such commits will be rejected and removed. Every
+ developer has his own indentation style, you should not change it. Of course
+ if you (re)write something, you can use your own style, even though we would
+ prefer if the indentation throughout FFmpeg was consistent (Many projects
+ force a given indentation style - we don't.). If you really need to make
+ indentation changes (try to avoid this), separate them strictly from real
+ changes.
+
+ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
+ then either do NOT change the indentation of the inner part within (don't
+ move it to the right)! or do so in a separate commit
+@item
+ Always fill out the commit log message. Describe in a few lines what you
+ changed and why. You can refer to mailing list postings if you fix a
+ particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
+@item
+ If you apply a patch by someone else, include the name and email address in
+ the log message. Since the ffmpeg-cvslog mailing list is publicly
+ archived you should add some SPAM protection to the email address. Send an
+ answer to ffmpeg-devel (or wherever you got the patch from) saying that
+ you applied the patch.
+@item
+ Do NOT commit to code actively maintained by others without permission.
+ Send a patch to ffmpeg-devel instead. If noone answers within a reasonable
+ timeframe (12h for build failures and security fixes, 3 days small changes,
+ 1 week for big patches) then commit your patch if you think it's OK.
+ Also note, the maintainer can simply ask for more time to review!
+@item
+ Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
+ are sent there and reviewed by all the other developers. Bugs and possible
+ improvements or general questions regarding commits are discussed there. We
+ expect you to react if problems with your code are uncovered.
+@item
+ Update the documentation if you change behavior or add features. If you are
+ unsure how best to do this, send a patch to ffmpeg-devel, the documentation
+ maintainer(s) will review and commit your stuff.
+@item
+ Never write to unallocated memory, never write over the end of arrays,
+ always check values read from some untrusted source before using them
+ as array index or other risky things.
+@item
+ Remember to check if you need to bump versions for the specific libav
+ parts (libavutil, libavcodec, libavformat) you are changing. You need
+ to change the version integer and the version string.
+ Incrementing the first component means no backward compatibility to
+ previous versions (e.g. removal of a function from the public API).
+ Incrementing the second component means backward compatible change
+ (e.g. addition of a function to the public API).
+ Incrementing the third component means a noteworthy binary compatible
+ change (e.g. encoder bug fix that matters for the decoder).
+@item
+ If you add a new codec, remember to update the changelog, add it to
+ the supported codecs table in the documentation and bump the second
+ component of the @file{libavcodec} version number appropriately. If
+ it has a fourcc, add it to @file{libavformat/avienc.c}, even if it
+ is only a decoder.
+@end enumerate
+
+We think our rules are not too hard. If you have comments, contact us.
+
+Note, these rules are mostly borrowed from the MPlayer project.
+
+@section Submitting patches
+
+First, (@pxref{Coding Rules}) above if you didn't yet.
+
+When you submit your patch, try to send a unified diff (diff '-up'
+option). I cannot read other diffs :-)
+
+Also please do not submit patches which contain several unrelated changes.
+Split them into individual self-contained patches; this makes reviewing
+them much easier.
+
+Run the regression tests before submitting a patch so that you can
+verify that there are no big problems.
+
+Patches should be posted as base64 encoded attachments (or any other
+encoding which ensures that the patch won't be trashed during
+transmission) to the ffmpeg-devel mailing list, see
+@url{http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel}
+
+It also helps quite a bit if you tell us what the patch does (for example
+'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant
+and has no lrint()')
+
+We reply to all submitted patches and either apply or reject with some
+explanation why, but sometimes we are quite busy so it can take a week or two.
+
+@section Regression tests
+
+Before submitting a patch (or committing to the repository), you should at least
+test that you did not break anything.
+
+The regression tests build a synthetic video stream and a synthetic
+audio stream. These are then encoded and decoded with all codecs or
+formats. The CRC (or MD5) of each generated file is recorded in a
+result file. A 'diff' is launched to compare the reference results and
+the result file.
+
+The regression tests then go on to test the FFserver code with a
+limited set of streams. It is important that this step runs correctly
+as well.
+
+Run 'make test' to test all the codecs and formats.
+
+Run 'make fulltest' to test all the codecs, formats and FFserver.
+
+[Of course, some patches may change the results of the regression tests. In
+this case, the reference results of the regression tests shall be modified
+accordingly].
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt b/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt
new file mode 100644
index 000000000..2eb4ee71a
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt
@@ -0,0 +1,172 @@
+FFmpeg & evaluating performance on the PowerPC Architecture HOWTO
+
+(c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
+
+
+
+I - Introduction
+
+The PowerPC architecture and its SIMD extension AltiVec offer some
+interesting tools to evaluate performance and improve the code.
+This document tries to explain how to use those tools with FFmpeg.
+
+The architecture itself offers two ways to evaluate the performance of
+a given piece of code:
+
+1) The Time Base Registers (TBL)
+2) The Performance Monitor Counter Registers (PMC)
+
+The first ones are always available, always active, but they're not very
+accurate: the registers increment by one every four *bus* cycles. On
+my 667 Mhz tiBook (ppc7450), this means once every twenty *processor*
+cycles. So we won't use that.
+
+The PMC are much more useful: not only can they report cycle-accurate
+timing, but they can also be used to monitor many other parameters,
+such as the number of AltiVec stalls for every kind of instruction,
+or instruction cache misses. The downside is that not all processors
+support the PMC (all G3, all G4 and the 970 do support them), and
+they're inactive by default - you need to activate them with a
+dedicated tool. Also, the number of available PMC depends on the
+procesor: the various 604 have 2, the various 75x (aka. G3) have 4,
+and the various 74xx (aka G4) have 6.
+
+*WARNING*: The PowerPC 970 is not very well documented, and its PMC
+registers are 64 bits wide. To properly notify the code, you *must*
+tune for the 970 (using --tune=970), or the code will assume 32 bit
+registers.
+
+
+II - Enabling FFmpeg PowerPC performance support
+
+This needs to be done by hand. First, you need to configure FFmpeg as
+usual, but add the "--powerpc-perf-enable" option. For instance:
+
+#####
+./configure --prefix=/usr/local/ffmpeg-svn --cc=gcc-3.3 --tune=7450 --powerpc-perf-enable
+#####
+
+This will configure FFmpeg to install inside /usr/local/ffmpeg-svn,
+compiling with gcc-3.3 (you should try to use this one or a newer
+gcc), and tuning for the PowerPC 7450 (i.e. the newer G4; as a rule of
+thumb, those at 550Mhz and more). It will also enable the PMC.
+
+You may also edit the file "config.h" to enable the following line:
+
+#####
+// #define ALTIVEC_USE_REFERENCE_C_CODE 1
+#####
+
+If you enable this line, then the code will not make use of AltiVec,
+but will use the reference C code instead. This is useful to compare
+performance between two versions of the code.
+
+Also, the number of enabled PMC is defined in "libavcodec/ppc/dsputil_ppc.h":
+
+#####
+#define POWERPC_NUM_PMC_ENABLED 4
+#####
+
+If you have a G4 CPU, you can enable all 6 PMC. DO NOT enable more
+PMC than available on your CPU!
+
+Then, simply compile FFmpeg as usual (make && make install).
+
+
+
+III - Using FFmpeg PowerPC performance support
+
+This FFmeg can be used exactly as usual. But before exiting, FFmpeg
+will dump a per-function report that looks like this:
+
+#####
+PowerPC performance report
+ Values are from the PMC registers, and represent whatever the
+ registers are set to record.
+ Function "gmc1_altivec" (pmc1):
+ min: 231
+ max: 1339867
+ avg: 558.25 (255302)
+ Function "gmc1_altivec" (pmc2):
+ min: 93
+ max: 2164
+ avg: 267.31 (255302)
+ Function "gmc1_altivec" (pmc3):
+ min: 72
+ max: 1987
+ avg: 276.20 (255302)
+(...)
+#####
+
+In this example, PMC1 was set to record CPU cycles, PMC2 was set to
+record AltiVec Permute Stall Cycles, and PMC3 was set to record AltiVec
+Issue Stalls.
+
+The function "gmc1_altivec" was monitored 255302 times, and the
+minimum execution time was 231 processor cycles. The max and average
+aren't much use, as it's very likely the OS interrupted execution for
+reasons of its own :-(
+
+With the exact same settings and source file, but using the reference C
+code we get:
+
+#####
+PowerPC performance report
+ Values are from the PMC registers, and represent whatever the
+ registers are set to record.
+ Function "gmc1_altivec" (pmc1):
+ min: 592
+ max: 2532235
+ avg: 962.88 (255302)
+ Function "gmc1_altivec" (pmc2):
+ min: 0
+ max: 33
+ avg: 0.00 (255302)
+ Function "gmc1_altivec" (pmc3):
+ min: 0
+ max: 350
+ avg: 0.03 (255302)
+(...)
+#####
+
+592 cycles, so the fastest AltiVec execution is about 2.5x faster than
+the fastest C execution in this example. It's not perfect but it's not
+bad (well I wrote this function so I can't say otherwise :-).
+
+Once you have that kind of report, you can try to improve things by
+finding what goes wrong and fixing it; in the example above, one
+should try to diminish the number of AltiVec stalls, as this *may*
+improve performance.
+
+
+
+IV) Enabling the PMC in Mac OS X
+
+This is easy. Use "Monster" and "monster". Those tools come from
+Apple's CHUD package, and can be found hidden in the developer web
+site & FTP site. "MONster" is the graphical application, use it to
+generate a config file specifying what each register should
+monitor. Then use the command-line application "monster" to use that
+config file, and enjoy the results.
+
+Note that "MONster" can be used for many other things, but it's
+documented by Apple, it's not my subject.
+
+If you are using CHUD 4.4.2 or later, you'll notice that MONster is
+no longer available. It's been superseeded by Shark, where
+configuration of PMCs is available as a plugin.
+
+
+
+V) Enabling the PMC on Linux
+
+On linux you may use oprofile from http://oprofile.sf.net, depending on the
+version and the cpu you may need to apply a patch[1] to access a set of the
+possibile counters from the userspace application. You can always define them
+using the kernel interface /dev/oprofile/* .
+
+[1] http://dev.gentoo.org/~lu_zero/development/oprofile-g4-20060423.patch
+
+--
+Romain Dolbeau <romain@dolbeau.org>
+Luca Barbato <lu_zero@gentoo.org>
diff --git a/contrib/ffmpeg/doc/ffplay-doc.texi b/contrib/ffmpeg/doc/ffplay-doc.texi
new file mode 100644
index 000000000..db08eb38f
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffplay-doc.texi
@@ -0,0 +1,104 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFplay Documentation
+@titlepage
+@sp 7
+@center @titlefont{FFplay Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+@c man begin DESCRIPTION
+FFplay is a very simple and portable media player using the FFmpeg
+libraries and the SDL library. It is mostly used as a testbed for the
+various FFmpeg APIs.
+@c man end
+
+@chapter Invocation
+
+@section Syntax
+@example
+@c man begin SYNOPSIS
+ffplay [options] @file{input_file}
+@c man end
+@end example
+
+@c man begin OPTIONS
+@section Main options
+
+@table @option
+@item -h
+show help
+@item -x width
+force displayed width
+@item -y height
+force displayed height
+@item -an
+disable audio
+@item -vn
+disable video
+@item -nodisp
+disable graphical display
+@item -f fmt
+force format
+@end table
+
+@section Advanced options
+@table @option
+@item -stats
+Show the stream duration, the codec parameters, the current position in
+the stream and the audio/video synchronisation drift.
+@item -rtp_tcp
+Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
+if you are streaming with the RTSP protocol.
+@item -sync type
+Set the master clock to audio (@code{type=audio}), video
+(@code{type=video}) or external (@code{type=ext}). Default is audio. The
+master clock is used to control audio-video synchronization. Most media
+players use audio as master clock, but in some cases (streaming or high
+quality broadcast) it is necessary to change that. This option is mainly
+used for debugging purposes.
+@end table
+
+@section While playing
+
+@table @key
+@item q, ESC
+quit
+
+@item f
+toggle full screen
+
+@item p, SPC
+pause
+
+@item a
+cycle audio channel
+
+@item v
+cycle video channel
+
+@item w
+show audio waves
+@end table
+
+@c man end
+
+@ignore
+
+@setfilename ffplay
+@settitle FFplay media player
+
+@c man begin SEEALSO
+ffmpeg(1), ffserver(1) and the html documentation of @file{ffmpeg}.
+@c man end
+
+@c man begin AUTHOR
+Fabrice Bellard
+@c man end
+
+@end ignore
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffserver-doc.texi b/contrib/ffmpeg/doc/ffserver-doc.texi
new file mode 100644
index 000000000..ed67bb6c0
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffserver-doc.texi
@@ -0,0 +1,224 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFserver Documentation
+@titlepage
+@sp 7
+@center @titlefont{FFserver Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+@c man begin DESCRIPTION
+FFserver is a streaming server for both audio and video. It supports
+several live feeds, streaming from files and time shifting on live feeds
+(you can seek to positions in the past on each live feed, provided you
+specify a big enough feed storage in ffserver.conf).
+
+This documentation covers only the streaming aspects of ffserver /
+ffmpeg. All questions about parameters for ffmpeg, codec questions,
+etc. are not covered here. Read @file{ffmpeg-doc.html} for more
+information.
+@c man end
+
+@chapter QuickStart
+
+[Contributed by Philip Gladstone, philip-ffserver at gladstonefamily dot net]
+
+@section What can this do?
+
+When properly configured and running, you can capture video and audio in real
+time from a suitable capture card, and stream it out over the Internet to
+either Windows Media Player or RealAudio player (with some restrictions).
+
+It can also stream from files, though that is currently broken. Very often, a
+web server can be used to serve up the files just as well.
+
+It can stream prerecorded video from .ffm files, though it is somewhat tricky
+to make it work correctly.
+
+@section What do I need?
+
+I use Linux on a 900MHz Duron with a cheapo Bt848 based TV capture card. I'm
+using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true,
+I needed some special drivers for my motherboard-based sound card.]
+
+I understand that FreeBSD systems work just fine as well.
+
+@section How do I make it work?
+
+First, build the kit. It *really* helps to have installed LAME first. Then when
+you run the ffserver ./configure, make sure that you have the --enable-mp3lame
+flag turned on.
+
+LAME is important as it allows for streaming audio to Windows Media Player.
+Don't ask why the other audio types do not work.
+
+As a simple test, just run the following two command lines (assuming that you
+have a V4L video capture card):
+
+@example
+./ffserver -f doc/ffserver.conf &
+./ffmpeg http://localhost:8090/feed1.ffm
+@end example
+
+At this point you should be able to go to your Windows machine and fire up
+Windows Media Player (WMP). Go to Open URL and enter
+
+@example
+ http://<linuxbox>:8090/test.asf
+@end example
+
+You should (after a short delay) see video and hear audio.
+
+WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
+transfer the entire file before starting to play.
+The same is true of AVI files.
+
+@section What happens next?
+
+You should edit the ffserver.conf file to suit your needs (in terms of
+frame rates etc). Then install ffserver and ffmpeg, write a script to start
+them up, and off you go.
+
+@section Troubleshooting
+
+@subsection I don't hear any audio, but video is fine.
+
+Maybe you didn't install LAME, or got your ./configure statement wrong. Check
+the ffmpeg output to see if a line referring to MP3 is present. If not, then
+your configuration was incorrect. If it is, then maybe your wiring is not
+set up correctly. Maybe the sound card is not getting data from the right
+input source. Maybe you have a really awful audio interface (like I do)
+that only captures in stereo and also requires that one channel be flipped.
+If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
+starting ffmpeg.
+
+@subsection The audio and video loose sync after a while.
+
+Yes, they do.
+
+@subsection After a long while, the video update rate goes way down in WMP.
+
+Yes, it does. Who knows why?
+
+@subsection WMP 6.4 behaves differently to WMP 7.
+
+Yes, it does. Any thoughts on this would be gratefully received. These
+differences extend to embedding WMP into a web page. [There are two
+object IDs that you can use: The old one, which does not play well, and
+the new one, which does (both tested on the same system). However,
+I suspect that the new one is not available unless you have installed WMP 7].
+
+@section What else can it do?
+
+You can replay video from .ffm files that was recorded earlier.
+However, there are a number of caveats, including the fact that the
+ffserver parameters must match the original parameters used to record the
+file. If they do not, then ffserver deletes the file before recording into it.
+(Now that I write this, it seems broken).
+
+You can fiddle with many of the codec choices and encoding parameters, and
+there are a bunch more parameters that you cannot control. Post a message
+to the mailing list if there are some 'must have' parameters. Look in
+ffserver.conf for a list of the currently available controls.
+
+It will automatically generate the ASX or RAM files that are often used
+in browsers. These files are actually redirections to the underlying ASF
+or RM file. The reason for this is that the browser often fetches the
+entire file before starting up the external viewer. The redirection files
+are very small and can be transferred quickly. [The stream itself is
+often 'infinite' and thus the browser tries to download it and never
+finishes.]
+
+@section Tips
+
+* When you connect to a live stream, most players (WMP, RA, etc) want to
+buffer a certain number of seconds of material so that they can display the
+signal continuously. However, ffserver (by default) starts sending data
+in realtime. This means that there is a pause of a few seconds while the
+buffering is being done by the player. The good news is that this can be
+cured by adding a '?buffer=5' to the end of the URL. This means that the
+stream should start 5 seconds in the past -- and so the first 5 seconds
+of the stream are sent as fast as the network will allow. It will then
+slow down to real time. This noticeably improves the startup experience.
+
+You can also add a 'Preroll 15' statement into the ffserver.conf that will
+add the 15 second prebuffering on all requests that do not otherwise
+specify a time. In addition, ffserver will skip frames until a key_frame
+is found. This further reduces the startup delay by not transferring data
+that will be discarded.
+
+* You may want to adjust the MaxBandwidth in the ffserver.conf to limit
+the amount of bandwidth consumed by live streams.
+
+@section Why does the ?buffer / Preroll stop working after a time?
+
+It turns out that (on my machine at least) the number of frames successfully
+grabbed is marginally less than the number that ought to be grabbed. This
+means that the timestamp in the encoded data stream gets behind realtime.
+This means that if you say 'Preroll 10', then when the stream gets 10
+or more seconds behind, there is no Preroll left.
+
+Fixing this requires a change in the internals of how timestamps are
+handled.
+
+@section Does the @code{?date=} stuff work.
+
+Yes (subject to the limitation outlined above). Also note that whenever you
+start ffserver, it deletes the ffm file (if any parameters have changed),
+thus wiping out what you had recorded before.
+
+The format of the @code{?date=xxxxxx} is fairly flexible. You should use one
+of the following formats (the 'T' is literal):
+
+@example
+* YYYY-MM-DDTHH:MM:SS (localtime)
+* YYYY-MM-DDTHH:MM:SSZ (UTC)
+@end example
+
+You can omit the YYYY-MM-DD, and then it refers to the current day. However
+note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this
+may be in the future and so is unlikely to be useful.
+
+You use this by adding the ?date= to the end of the URL for the stream.
+For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
+
+@chapter Invocation
+@section Syntax
+@example
+@c man begin SYNOPSIS
+ffserver [options]
+@c man end
+@end example
+
+@section Options
+@c man begin OPTIONS
+@table @option
+@item -L
+Print the license.
+@item -h
+Print the help.
+@item -f configfile
+Use @file{configfile} instead of @file{/etc/ffserver.conf}.
+@end table
+@c man end
+
+@ignore
+
+@setfilename ffsserver
+@settitle FFserver video server
+
+@c man begin SEEALSO
+ffmpeg(1), ffplay(1), the @file{ffmpeg/doc/ffserver.conf} example and
+the HTML documentation of @file{ffmpeg}.
+@c man end
+
+@c man begin AUTHOR
+Fabrice Bellard
+@c man end
+
+@end ignore
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffserver.conf b/contrib/ffmpeg/doc/ffserver.conf
new file mode 100644
index 000000000..a3b3ff412
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffserver.conf
@@ -0,0 +1,349 @@
+# Port on which the server is listening. You must select a different
+# port from your standard HTTP web server if it is running on the same
+# computer.
+Port 8090
+
+# Address on which the server is bound. Only useful if you have
+# several network interfaces.
+BindAddress 0.0.0.0
+
+# Number of simultaneous requests that can be handled. Since FFServer
+# is very fast, it is more likely that you will want to leave this high
+# and use MaxBandwidth, below.
+MaxClients 1000
+
+# This the maximum amount of kbit/sec that you are prepared to
+# consume when streaming to clients.
+MaxBandwidth 1000
+
+# Access log file (uses standard Apache log file format)
+# '-' is the standard output.
+CustomLog -
+
+# Suppress that if you want to launch ffserver as a daemon.
+NoDaemon
+
+
+##################################################################
+# Definition of the live feeds. Each live feed contains one video
+# and/or audio sequence coming from an ffmpeg encoder or another
+# ffserver. This sequence may be encoded simultaneously with several
+# codecs at several resolutions.
+
+<Feed feed1.ffm>
+
+# You must use 'ffmpeg' to send a live feed to ffserver. In this
+# example, you can type:
+#
+# ffmpeg http://localhost:8090/feed1.ffm
+
+# ffserver can also do time shifting. It means that it can stream any
+# previously recorded live stream. The request should contain:
+# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify
+# a path where the feed is stored on disk. You also specify the
+# maximum size of the feed, where zero means unlimited. Default:
+# File=/tmp/feed_name.ffm FileMaxSize=5M
+File /tmp/feed1.ffm
+FileMaxSize 200K
+
+# You could specify
+# ReadOnlyFile /saved/specialvideo.ffm
+# This marks the file as readonly and it will not be deleted or updated.
+
+# Specify launch in order to start ffmpeg automatically.
+# First ffmpeg must be defined with an appropriate path if needed,
+# after that options can follow, but avoid adding the http:// field
+#Launch ffmpeg
+
+# Only allow connections from localhost to the feed.
+ACL allow 127.0.0.1
+
+</Feed>
+
+
+##################################################################
+# Now you can define each stream which will be generated from the
+# original audio and video stream. Each format has a filename (here
+# 'test1.mpg'). FFServer will send this stream when answering a
+# request containing this filename.
+
+<Stream test1.mpg>
+
+# coming from live feed 'feed1'
+Feed feed1.ffm
+
+# Format of the stream : you can choose among:
+# mpeg : MPEG-1 multiplexed video and audio
+# mpegvideo : only MPEG-1 video
+# mp2 : MPEG-2 audio (use AudioCodec to select layer 2 and 3 codec)
+# ogg : Ogg format (Vorbis audio codec)
+# rm : RealNetworks-compatible stream. Multiplexed audio and video.
+# ra : RealNetworks-compatible stream. Audio only.
+# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
+# jpeg : Generate a single JPEG image.
+# asf : ASF compatible streaming (Windows Media Player format).
+# swf : Macromedia Flash compatible stream
+# avi : AVI format (MPEG-4 video, MPEG audio sound)
+# master : special ffmpeg stream used to duplicate a server
+Format mpeg
+
+# Bitrate for the audio stream. Codecs usually support only a few
+# different bitrates.
+AudioBitRate 32
+
+# Number of audio channels: 1 = mono, 2 = stereo
+AudioChannels 1
+
+# Sampling frequency for audio. When using low bitrates, you should
+# lower this frequency to 22050 or 11025. The supported frequencies
+# depend on the selected audio codec.
+AudioSampleRate 44100
+
+# Bitrate for the video stream
+VideoBitRate 64
+
+# Ratecontrol buffer size
+VideoBufferSize 40
+
+# Number of frames per second
+VideoFrameRate 3
+
+# Size of the video frame: WxH (default: 160x128)
+# The following abbreviations are defined: sqcif, qcif, cif, 4cif
+VideoSize 160x128
+
+# Transmit only intra frames (useful for low bitrates, but kills frame rate).
+#VideoIntraOnly
+
+# If non-intra only, an intra frame is transmitted every VideoGopSize
+# frames. Video synchronization can only begin at an intra frame.
+VideoGopSize 12
+
+# More MPEG-4 parameters
+# VideoHighQuality
+# Video4MotionVector
+
+# Choose your codecs:
+#AudioCodec mp2
+#VideoCodec mpeg1video
+
+# Suppress audio
+#NoAudio
+
+# Suppress video
+#NoVideo
+
+#VideoQMin 3
+#VideoQMax 31
+
+# Set this to the number of seconds backwards in time to start. Note that
+# most players will buffer 5-10 seconds of video, and also you need to allow
+# for a keyframe to appear in the data stream.
+#Preroll 15
+
+# ACL:
+
+# You can allow ranges of addresses (or single addresses)
+#ACL ALLOW <first address> <last address>
+
+# You can deny ranges of addresses (or single addresses)
+#ACL DENY <first address> <last address>
+
+# You can repeat the ACL allow/deny as often as you like. It is on a per
+# stream basis. The first match defines the action. If there are no matches,
+# then the default is the inverse of the last ACL statement.
+#
+# Thus 'ACL allow localhost' only allows access from localhost.
+# 'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
+# allow everybody else.
+
+</Stream>
+
+
+##################################################################
+# Example streams
+
+
+# Multipart JPEG
+
+#<Stream test.mjpg>
+#Feed feed1.ffm
+#Format mpjpeg
+#VideoFrameRate 2
+#VideoIntraOnly
+#NoAudio
+#Strict -1
+#</Stream>
+
+
+# Single JPEG
+
+#<Stream test.jpg>
+#Feed feed1.ffm
+#Format jpeg
+#VideoFrameRate 2
+#VideoIntraOnly
+##VideoSize 352x240
+#NoAudio
+#Strict -1
+#</Stream>
+
+
+# Flash
+
+#<Stream test.swf>
+#Feed feed1.ffm
+#Format swf
+#VideoFrameRate 2
+#VideoIntraOnly
+#NoAudio
+#</Stream>
+
+
+# ASF compatible
+
+<Stream test.asf>
+Feed feed1.ffm
+Format asf
+VideoFrameRate 15
+VideoSize 352x240
+VideoBitRate 256
+VideoBufferSize 40
+VideoGopSize 30
+AudioBitRate 64
+StartSendOnKey
+</Stream>
+
+
+# MP3 audio
+
+#<Stream test.mp3>
+#Feed feed1.ffm
+#Format mp2
+#AudioCodec mp3
+#AudioBitRate 64
+#AudioChannels 1
+#AudioSampleRate 44100
+#NoVideo
+#</Stream>
+
+
+# Ogg Vorbis audio
+
+#<Stream test.ogg>
+#Feed feed1.ffm
+#Title "Stream title"
+#AudioBitRate 64
+#AudioChannels 2
+#AudioSampleRate 44100
+#NoVideo
+#</Stream>
+
+
+# Real with audio only at 32 kbits
+
+#<Stream test.ra>
+#Feed feed1.ffm
+#Format rm
+#AudioBitRate 32
+#NoVideo
+#NoAudio
+#</Stream>
+
+
+# Real with audio and video at 64 kbits
+
+#<Stream test.rm>
+#Feed feed1.ffm
+#Format rm
+#AudioBitRate 32
+#VideoBitRate 128
+#VideoFrameRate 25
+#VideoGopSize 25
+#NoAudio
+#</Stream>
+
+
+##################################################################
+# A stream coming from a file: you only need to set the input
+# filename and optionally a new format. Supported conversions:
+# AVI -> ASF
+
+#<Stream file.rm>
+#File "/usr/local/httpd/htdocs/tlive.rm"
+#NoAudio
+#</Stream>
+
+#<Stream file.asf>
+#File "/usr/local/httpd/htdocs/test.asf"
+#NoAudio
+#Author "Me"
+#Copyright "Super MegaCorp"
+#Title "Test stream from disk"
+#Comment "Test comment"
+#</Stream>
+
+
+##################################################################
+# RTSP examples
+#
+# You can access this stream with the RTSP URL:
+# rtsp://localhost:5454/test1-rtsp.mpg
+#
+# A non-standard RTSP redirector is also created. Its URL is:
+# http://localhost:8090/test1-rtsp.rtsp
+
+#<Stream test1-rtsp.mpg>
+#Format rtp
+#File "/usr/local/httpd/htdocs/test1.mpg"
+#</Stream>
+
+
+##################################################################
+# SDP/multicast examples
+#
+# If you want to send your stream in multicast, you must set the
+# multicast address with MulticastAddress. The port and the TTL can
+# also be set.
+#
+# An SDP file is automatically generated by ffserver by adding the
+# 'sdp' extension to the stream name (here
+# http://localhost:8090/test1-sdp.sdp). You should usually give this
+# file to your player to play the stream.
+#
+# The 'NoLoop' option can be used to avoid looping when the stream is
+# terminated.
+
+#<Stream test1-sdp.mpg>
+#Format rtp
+#File "/usr/local/httpd/htdocs/test1.mpg"
+#MulticastAddress 224.124.0.1
+#MulticastPort 5000
+#MulticastTTL 16
+#NoLoop
+#</Stream>
+
+
+##################################################################
+# Special streams
+
+# Server status
+
+<Stream stat.html>
+Format status
+
+# Only allow local people to get the status
+ACL allow localhost
+ACL allow 192.168.0.0 192.168.255.255
+
+#FaviconURL http://pond1.gladstonefamily.net:8080/favicon.ico
+</Stream>
+
+
+# Redirect index.html to the appropriate site
+
+<Redirect index.html>
+URL http://www.ffmpeg.org/
+</Redirect>
+
+
diff --git a/contrib/ffmpeg/doc/hooks.texi b/contrib/ffmpeg/doc/hooks.texi
new file mode 100644
index 000000000..15013547c
--- /dev/null
+++ b/contrib/ffmpeg/doc/hooks.texi
@@ -0,0 +1,113 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle Video Hook Documentation
+@titlepage
+@sp 7
+@center @titlefont{Video Hook Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+
+The video hook functionality is designed (mostly) for live video. It allows
+the video to be modified or examined between the decoder and the encoder.
+
+Any number of hook modules can be placed inline, and they are run in the
+order that they were specified on the ffmpeg command line.
+
+Three modules are provided and are described below. They are all intended to
+be used as a base for your own modules.
+
+Modules are loaded using the -vhook option to ffmpeg. The value of this parameter
+is a space separated list of arguments. The first is the module name, and the rest
+are passed as arguments to the Configure function of the module.
+
+@section null.c
+
+This does nothing. Actually it converts the input image to RGB24 and then converts
+it back again. This is meant as a sample that you can use to test your setup.
+
+@section fish.c
+
+This implements a 'fish detector'. Essentially it converts the image into HSV
+space and tests whether more than a certain percentage of the pixels fall into
+a specific HSV cuboid. If so, then the image is saved into a file for processing
+by other bits of code.
+
+Why use HSV? It turns out that HSV cuboids represent a more compact range of
+colors than would an RGB cuboid.
+
+@section imlib2.c
+
+This module implements a text overlay for a video image. Currently it
+supports a fixed overlay or reading the text from a file. The string
+is passed through strftime so that it is easy to imprint the date and
+time onto the image.
+
+You may also overlay an image (even semi-transparent) like TV stations do.
+You may move either the text or the image around your video to create
+scrolling credits, for example.
+
+Text fonts are being looked for in a FONTPATH environment variable.
+
+Options:
+@multitable @columnfractions .2 .8
+@item @option{-c <color>} @tab The color of the text
+@item @option{-F <fontname>} @tab The font face and size
+@item @option{-t <text>} @tab The text
+@item @option{-f <filename>} @tab The filename to read text from
+@item @option{-x <expresion>} @tab X coordinate of text or image
+@item @option{-y <expresion>} @tab Y coordinate of text or image
+@item @option{-i <filename>} @tab The filename to read a image from
+@end multitable
+
+Expresions are functions of these variables:
+@multitable @columnfractions .2 .8
+@item @var{N} @tab frame number (starting at zero)
+@item @var{H} @tab frame height
+@item @var{W} @tab frame width
+@item @var{h} @tab image height
+@item @var{w} @tab image width
+@item @var{X} @tab previous x coordinate of text or image
+@item @var{Y} @tab previous y coordinate of text or image
+@end multitable
+
+You may also use the constants @var{PI}, @var{E}, and the math functions available at the
+FFmpeg formula evaluator at (@url{ffmpeg-doc.html#SEC13}), except @var{bits2qp(bits)}
+and @var{qp2bits(qp)}.
+
+Usage examples:
+
+@example
+ # Remember to set the path to your fonts
+ FONTPATH="/cygdrive/c/WINDOWS/Fonts/"
+ FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/"
+ FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/"
+ export FONTPATH
+
+ # Bulb dancing in a Lissajous pattern
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -x W*(0.5+0.25*sin(N/47*PI))-w/2 -y H*(0.5+0.50*cos(N/97*PI))-h/2 -i /usr/share/imlib2/data/images/bulb.png' \
+ -acodec copy -sameq output.avi
+
+ # Text scrolling
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -c red -F Vera.ttf/20 -x 150+0.5*N -y 70+0.25*N -t Hello' \
+ -acodec copy -sameq output.avi
+@end example
+
+@section ppm.c
+
+It's basically a launch point for a PPM pipe, so you can use any
+executable (or script) which consumes a PPM on stdin and produces a PPM
+on stdout (and flushes each frame).
+
+Usage example:
+
+@example
+ffmpeg -i input -vhook "/path/to/ppm.so some-ppm-filter args" output
+@end example
+
+@bye
diff --git a/contrib/ffmpeg/doc/optimization.txt b/contrib/ffmpeg/doc/optimization.txt
new file mode 100644
index 000000000..26c5ae64c
--- /dev/null
+++ b/contrib/ffmpeg/doc/optimization.txt
@@ -0,0 +1,158 @@
+optimization Tips (for libavcodec):
+
+What to optimize:
+If you plan to do non-x86 architecture specific optimizations (SIMD normally),
+then take a look in the i386/ directory, as most important functions are
+already optimized for MMX.
+
+If you want to do x86 optimizations then you can either try to finetune the
+stuff in the i386 directory or find some other functions in the C source to
+optimize, but there aren't many left.
+
+Understanding these overoptimized functions:
+As many functions tend to be a bit difficult to understand because
+of optimizations, it can be hard to optimize them further, or write
+architecture-specific versions. It is recommened to look at older
+revisions of the interesting files (for a web frontend try ViewVC at
+http://svn.mplayerhq.hu/ffmpeg/trunk/).
+Alternatively, look into the other architecture-specific versions in
+the i386/, ppc/, alpha/ subdirectories. Even if you don't exactly
+comprehend the instructions, it could help understanding the functions
+and how they can be optimized.
+
+NOTE: If you still don't understand some function, ask at our mailing list!!!
+(http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel)
+
+
+
+WTF is that function good for ....:
+The primary purpose of that list is to avoid wasting time to optimize functions
+which are rarely used
+
+put(_no_rnd)_pixels{,_x2,_y2,_xy2}
+ Used in motion compensation (en/decoding).
+
+avg_pixels{,_x2,_y2,_xy2}
+ Used in motion compensation of B-frames.
+ These are less important than the put*pixels functions.
+
+avg_no_rnd_pixels*
+ unused
+
+pix_abs16x16{,_x2,_y2,_xy2}
+ Used in motion estimation (encoding) with SAD.
+
+pix_abs8x8{,_x2,_y2,_xy2}
+ Used in motion estimation (encoding) with SAD of MPEG-4 4MV only.
+ These are less important than the pix_abs16x16* functions.
+
+put_mspel8_mc* / wmv2_mspel8*
+ Used only in WMV2.
+ it is not recommended that you waste your time with these, as WMV2
+ is an ugly and relatively useless codec.
+
+mpeg4_qpel* / *qpel_mc*
+ Used in MPEG-4 qpel motion compensation (encoding & decoding).
+ The qpel8 functions are used only for 4mv,
+ the avg_* functions are used only for B-frames.
+ Optimizing them should have a significant impact on qpel
+ encoding & decoding.
+
+qpel{8,16}_mc??_old_c / *pixels{8,16}_l4
+ Just used to work around a bug in an old libavcodec encoder version.
+ Don't optimize them.
+
+tpel_mc_func {put,avg}_tpel_pixels_tab
+ Used only for SVQ3, so only optimize them if you need fast SVQ3 decoding.
+
+add_bytes/diff_bytes
+ For huffyuv only, optimize if you want a faster ffhuffyuv codec.
+
+get_pixels / diff_pixels
+ Used for encoding, easy.
+
+clear_blocks
+ easiest to optimize
+
+gmc
+ Used for MPEG-4 gmc.
+ Optimizing this should have a significant effect on the gmc decoding
+ speed but it's very likely impossible to write in SIMD.
+
+gmc1
+ Used for chroma blocks in MPEG-4 gmc with 1 warp point
+ (there are 4 luma & 2 chroma blocks per macroblock, so
+ only 1/3 of the gmc blocks use this, the other 2/3
+ use the normal put_pixel* code, but only if there is
+ just 1 warp point).
+ Note: DivX5 gmc always uses just 1 warp point.
+
+pix_sum
+ Used for encoding.
+
+hadamard8_diff / sse / sad == pix_norm1 / dct_sad / quant_psnr / rd / bit
+ Specific compare functions used in encoding, it depends upon the
+ command line switches which of these are used.
+ Don't waste your time with dct_sad & quant_psnr, they aren't
+ really useful.
+
+put_pixels_clamped / add_pixels_clamped
+ Used for en/decoding in the IDCT, easy.
+ Note, some optimized IDCTs have the add/put clamped code included and
+ then put_pixels_clamped / add_pixels_clamped will be unused.
+
+idct/fdct
+ idct (encoding & decoding)
+ fdct (encoding)
+ difficult to optimize
+
+dct_quantize_trellis
+ Used for encoding with trellis quantization.
+ difficult to optimize
+
+dct_quantize
+ Used for encoding.
+
+dct_unquantize_mpeg1
+ Used in MPEG-1 en/decoding.
+
+dct_unquantize_mpeg2
+ Used in MPEG-2 en/decoding.
+
+dct_unquantize_h263
+ Used in MPEG-4/H.263 en/decoding.
+
+FIXME remaining functions?
+BTW, most of these functions are in dsputil.c/.h, some are in mpegvideo.c/.h.
+
+
+
+Alignment:
+Some instructions on some architectures have strict alignment restrictions,
+for example most SSE/SSE2 instructions on x86.
+The minimum guaranteed alignment is written in the .h files, for example:
+ void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, UINT8 *pixels/*align 8*/, int line_size);
+
+
+
+Links:
+http://www.aggregate.org/MAGIC/
+
+x86-specific:
+http://developer.intel.com/design/pentium4/manuals/248966.htm
+
+The IA-32 Intel Architecture Software Developer's Manual, Volume 2:
+Instruction Set Reference
+http://developer.intel.com/design/pentium4/manuals/245471.htm
+
+http://www.agner.org/assem/
+
+AMD Athlon Processor x86 Code Optimization Guide:
+http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/22007.pdf
+
+GCC asm links:
+official doc but quite ugly
+http://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
+
+a bit old (note "+" is valid for input-output, even though the next disagrees)
+http://www.cs.virginia.edu/~clc5q/gcc-inline-asm.pdf
diff --git a/contrib/ffmpeg/doc/soc.txt b/contrib/ffmpeg/doc/soc.txt
new file mode 100644
index 000000000..8b4a86db8
--- /dev/null
+++ b/contrib/ffmpeg/doc/soc.txt
@@ -0,0 +1,24 @@
+Google Summer of Code and similar project guidelines
+
+Summer of Code is a project by Google in which students are paid to implement
+some nice new features for various participating open source projects ...
+
+This text is a collection of things to take care of for the next soc as
+it's a little late for this year's soc (2006).
+
+The Goal:
+Our goal in respect to soc is and must be of course exactly one thing and
+that is to improve FFmpeg, to reach this goal, code must
+* conform to the svn policy and patch submission guidelines
+* must improve FFmpeg somehow (faster, smaller, "better",
+ more codecs supported, fewer bugs, cleaner, ...)
+
+for mentors and other developers to help students to reach that goal it is
+essential that changes to their codebase are publicly visible, clean and
+easy reviewable that again leads us to:
+* use of a revision control system like svn
+* separation of cosmetic from non-cosmetic changes (this is almost entirely
+ ignored by mentors and students in soc 2006 which might lead to a suprise
+ when the code will be reviewed at the end before a possible inclusion in
+ FFmpeg, individual changes were generally not reviewable due to cosmetics).
+* frequent commits, so that comments can be provided early
diff --git a/contrib/ffmpeg/doc/texi2pod.pl b/contrib/ffmpeg/doc/texi2pod.pl
new file mode 100755
index 000000000..c414ffcc6
--- /dev/null
+++ b/contrib/ffmpeg/doc/texi2pod.pl
@@ -0,0 +1,427 @@
+#! /usr/bin/perl -w
+
+# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+
+# This file is part of GNU CC.
+
+# GNU CC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# GNU CC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GNU CC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301 USA
+
+# This does trivial (and I mean _trivial_) conversion of Texinfo
+# markup to Perl POD format. It's intended to be used to extract
+# something suitable for a manpage from a Texinfo document.
+
+$output = 0;
+$skipping = 0;
+%sects = ();
+$section = "";
+@icstack = ();
+@endwstack = ();
+@skstack = ();
+@instack = ();
+$shift = "";
+%defs = ();
+$fnno = 1;
+$inf = "";
+$ibase = "";
+
+while ($_ = shift) {
+ if (/^-D(.*)$/) {
+ if ($1 ne "") {
+ $flag = $1;
+ } else {
+ $flag = shift;
+ }
+ $value = "";
+ ($flag, $value) = ($flag =~ /^([^=]+)(?:=(.+))?/);
+ die "no flag specified for -D\n"
+ unless $flag ne "";
+ die "flags may only contain letters, digits, hyphens, dashes and underscores\n"
+ unless $flag =~ /^[a-zA-Z0-9_-]+$/;
+ $defs{$flag} = $value;
+ } elsif (/^-/) {
+ usage();
+ } else {
+ $in = $_, next unless defined $in;
+ $out = $_, next unless defined $out;
+ usage();
+ }
+}
+
+if (defined $in) {
+ $inf = gensym();
+ open($inf, "<$in") or die "opening \"$in\": $!\n";
+ $ibase = $1 if $in =~ m|^(.+)/[^/]+$|;
+} else {
+ $inf = \*STDIN;
+}
+
+if (defined $out) {
+ open(STDOUT, ">$out") or die "opening \"$out\": $!\n";
+}
+
+while(defined $inf) {
+while(<$inf>) {
+ # Certain commands are discarded without further processing.
+ /^\@(?:
+ [a-z]+index # @*index: useful only in complete manual
+ |need # @need: useful only in printed manual
+ |(?:end\s+)?group # @group .. @end group: ditto
+ |page # @page: ditto
+ |node # @node: useful only in .info file
+ |(?:end\s+)?ifnottex # @ifnottex .. @end ifnottex: use contents
+ )\b/x and next;
+
+ chomp;
+
+ # Look for filename and title markers.
+ /^\@setfilename\s+([^.]+)/ and $fn = $1, next;
+ /^\@settitle\s+([^.]+)/ and $tl = postprocess($1), next;
+
+ # Identify a man title but keep only the one we are interested in.
+ /^\@c\s+man\s+title\s+([A-Za-z0-9-]+)\s+(.+)/ and do {
+ if (exists $defs{$1}) {
+ $fn = $1;
+ $tl = postprocess($2);
+ }
+ next;
+ };
+
+ # Look for blocks surrounded by @c man begin SECTION ... @c man end.
+ # This really oughta be @ifman ... @end ifman and the like, but such
+ # would require rev'ing all other Texinfo translators.
+ /^\@c\s+man\s+begin\s+([A-Z]+)\s+([A-Za-z0-9-]+)/ and do {
+ $output = 1 if exists $defs{$2};
+ $sect = $1;
+ next;
+ };
+ /^\@c\s+man\s+begin\s+([A-Z]+)/ and $sect = $1, $output = 1, next;
+ /^\@c\s+man\s+end/ and do {
+ $sects{$sect} = "" unless exists $sects{$sect};
+ $sects{$sect} .= postprocess($section);
+ $section = "";
+ $output = 0;
+ next;
+ };
+
+ # handle variables
+ /^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do {
+ $defs{$1} = $2;
+ next;
+ };
+ /^\@clear\s+([a-zA-Z0-9_-]+)/ and do {
+ delete $defs{$1};
+ next;
+ };
+
+ next unless $output;
+
+ # Discard comments. (Can't do it above, because then we'd never see
+ # @c man lines.)
+ /^\@c\b/ and next;
+
+ # End-block handler goes up here because it needs to operate even
+ # if we are skipping.
+ /^\@end\s+([a-z]+)/ and do {
+ # Ignore @end foo, where foo is not an operation which may
+ # cause us to skip, if we are presently skipping.
+ my $ended = $1;
+ next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex)$/;
+
+ die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
+ die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
+
+ $endw = pop @endwstack;
+
+ if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
+ $skipping = pop @skstack;
+ next;
+ } elsif ($ended =~ /^(?:example|smallexample|display)$/) {
+ $shift = "";
+ $_ = ""; # need a paragraph break
+ } elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
+ $_ = "\n=back\n";
+ $ic = pop @icstack;
+ } else {
+ die "unknown command \@end $ended at line $.\n";
+ }
+ };
+
+ # We must handle commands which can cause skipping even while we
+ # are skipping, otherwise we will not process nested conditionals
+ # correctly.
+ /^\@ifset\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifset";
+ $skipping = 1 unless exists $defs{$1};
+ next;
+ };
+
+ /^\@ifclear\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifclear";
+ $skipping = 1 if exists $defs{$1};
+ next;
+ };
+
+ /^\@(ignore|menu|iftex)\b/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = $1;
+ $skipping = 1;
+ next;
+ };
+
+ next if $skipping;
+
+ # Character entities. First the ones that can be replaced by raw text
+ # or discarded outright:
+ s/\@copyright\{\}/(c)/g;
+ s/\@dots\{\}/.../g;
+ s/\@enddots\{\}/..../g;
+ s/\@([.!? ])/$1/g;
+ s/\@[:-]//g;
+ s/\@bullet(?:\{\})?/*/g;
+ s/\@TeX\{\}/TeX/g;
+ s/\@pounds\{\}/\#/g;
+ s/\@minus(?:\{\})?/-/g;
+ s/\\,/,/g;
+
+ # Now the ones that have to be replaced by special escapes
+ # (which will be turned back into text by unmunge())
+ s/&/&amp;/g;
+ s/\@\{/&lbrace;/g;
+ s/\@\}/&rbrace;/g;
+ s/\@\@/&at;/g;
+
+ # Inside a verbatim block, handle @var specially.
+ if ($shift ne "") {
+ s/\@var\{([^\}]*)\}/<$1>/g;
+ }
+
+ # POD doesn't interpret E<> inside a verbatim block.
+ if ($shift eq "") {
+ s/</&lt;/g;
+ s/>/&gt;/g;
+ } else {
+ s/</&LT;/g;
+ s/>/&GT;/g;
+ }
+
+ # Single line command handlers.
+
+ /^\@include\s+(.+)$/ and do {
+ push @instack, $inf;
+ $inf = gensym();
+
+ # Try cwd and $ibase.
+ open($inf, "<" . $1)
+ or open($inf, "<" . $ibase . "/" . $1)
+ or die "cannot open $1 or $ibase/$1: $!\n";
+ next;
+ };
+
+ /^\@(?:section|unnumbered|unnumberedsec|center)\s+(.+)$/
+ and $_ = "\n=head2 $1\n";
+ /^\@subsection\s+(.+)$/
+ and $_ = "\n=head3 $1\n";
+
+ # Block command handlers:
+ /^\@itemize\s+(\@[a-z]+|\*|-)/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ $ic = $1;
+ $_ = "\n=over 4\n";
+ $endw = "itemize";
+ };
+
+ /^\@enumerate(?:\s+([a-zA-Z0-9]+))?/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ if (defined $1) {
+ $ic = $1 . ".";
+ } else {
+ $ic = "1.";
+ }
+ $_ = "\n=over 4\n";
+ $endw = "enumerate";
+ };
+
+ /^\@([fv]?table)\s+(\@[a-z]+)/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ $endw = $1;
+ $ic = $2;
+ $ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/;
+ $ic =~ s/\@(?:code|kbd)/C/;
+ $ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
+ $ic =~ s/\@(?:file)/F/;
+ $_ = "\n=over 4\n";
+ };
+
+ /^\@((?:small)?example|display)/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $shift = "\t";
+ $_ = ""; # need a paragraph break
+ };
+
+ /^\@itemx?\s*(.+)?$/ and do {
+ if (defined $1) {
+ # Entity escapes prevent munging by the <> processing below.
+ $_ = "\n=item $ic\&LT;$1\&GT;\n";
+ } else {
+ $_ = "\n=item $ic\n";
+ $ic =~ y/A-Ya-y/B-Zb-z/;
+ $ic =~ s/(\d+)/$1 + 1/eg;
+ }
+ };
+
+ $section .= $shift.$_."\n";
+}
+# End of current file.
+close($inf);
+$inf = pop @instack;
+}
+
+die "No filename or title\n" unless defined $fn && defined $tl;
+
+$sects{NAME} = "$fn \- $tl\n";
+$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
+
+for $sect (qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES ENVIRONMENT FILES
+ BUGS NOTES FOOTNOTES SEEALSO AUTHOR COPYRIGHT)) {
+ if(exists $sects{$sect}) {
+ $head = $sect;
+ $head =~ s/SEEALSO/SEE ALSO/;
+ print "=head1 $head\n\n";
+ print scalar unmunge ($sects{$sect});
+ print "\n";
+ }
+}
+
+sub usage
+{
+ die "usage: $0 [-D toggle...] [infile [outfile]]\n";
+}
+
+sub postprocess
+{
+ local $_ = $_[0];
+
+ # @value{foo} is replaced by whatever 'foo' is defined as.
+ while (m/(\@value\{([a-zA-Z0-9_-]+)\})/g) {
+ if (! exists $defs{$2}) {
+ print STDERR "Option $2 not defined\n";
+ s/\Q$1\E//;
+ } else {
+ $value = $defs{$2};
+ s/\Q$1\E/$value/;
+ }
+ }
+
+ # Formatting commands.
+ # Temporary escape for @r.
+ s/\@r\{([^\}]*)\}/R<$1>/g;
+ s/\@(?:dfn|var|emph|cite|i)\{([^\}]*)\}/I<$1>/g;
+ s/\@(?:code|kbd)\{([^\}]*)\}/C<$1>/g;
+ s/\@(?:gccoptlist|samp|strong|key|option|env|command|b)\{([^\}]*)\}/B<$1>/g;
+ s/\@sc\{([^\}]*)\}/\U$1/g;
+ s/\@file\{([^\}]*)\}/F<$1>/g;
+ s/\@w\{([^\}]*)\}/S<$1>/g;
+ s/\@(?:dmn|math)\{([^\}]*)\}/$1/g;
+
+ # Cross references are thrown away, as are @noindent and @refill.
+ # (@noindent is impossible in .pod, and @refill is unnecessary.)
+ # @* is also impossible in .pod; we discard it and any newline that
+ # follows it. Similarly, our macro @gol must be discarded.
+
+ s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
+ s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
+ s/;\s+\@pxref\{(?:[^\}]*)\}//g;
+ s/\@noindent\s*//g;
+ s/\@refill//g;
+ s/\@gol//g;
+ s/\@\*\s*\n?//g;
+
+ # @uref can take one, two, or three arguments, with different
+ # semantics each time. @url and @email are just like @uref with
+ # one argument, for our purposes.
+ s/\@(?:uref|url|email)\{([^\},]*)\}/&lt;B<$1>&gt;/g;
+ s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
+ s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
+
+ # Turn B<blah I<blah> blah> into B<blah> I<blah> B<blah> to
+ # match Texinfo semantics of @emph inside @samp. Also handle @r
+ # inside bold.
+ s/&LT;/</g;
+ s/&GT;/>/g;
+ 1 while s/B<((?:[^<>]|I<[^<>]*>)*)R<([^>]*)>/B<$1>${2}B</g;
+ 1 while (s/B<([^<>]*)I<([^>]+)>/B<$1>I<$2>B</g);
+ 1 while (s/I<([^<>]*)B<([^>]+)>/I<$1>B<$2>I</g);
+ s/[BI]<>//g;
+ s/([BI])<(\s+)([^>]+)>/$2$1<$3>/g;
+ s/([BI])<([^>]+?)(\s+)>/$1<$2>$3/g;
+
+ # Extract footnotes. This has to be done after all other
+ # processing because otherwise the regexp will choke on formatting
+ # inside @footnote.
+ while (/\@footnote/g) {
+ s/\@footnote\{([^\}]+)\}/[$fnno]/;
+ add_footnote($1, $fnno);
+ $fnno++;
+ }
+
+ return $_;
+}
+
+sub unmunge
+{
+ # Replace escaped symbols with their equivalents.
+ local $_ = $_[0];
+
+ s/&lt;/E<lt>/g;
+ s/&gt;/E<gt>/g;
+ s/&lbrace;/\{/g;
+ s/&rbrace;/\}/g;
+ s/&at;/\@/g;
+ s/&amp;/&/g;
+ return $_;
+}
+
+sub add_footnote
+{
+ unless (exists $sects{FOOTNOTES}) {
+ $sects{FOOTNOTES} = "\n=over 4\n\n";
+ }
+
+ $sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
+ $sects{FOOTNOTES} .= $_[0];
+ $sects{FOOTNOTES} .= "\n\n";
+}
+
+# stolen from Symbol.pm
+{
+ my $genseq = 0;
+ sub gensym
+ {
+ my $name = "GEN" . $genseq++;
+ my $ref = \*{$name};
+ delete $::{$name};
+ return $ref;
+ }
+}