summaryrefslogtreecommitdiff
path: root/contrib/ffmpeg/libavformat
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/ffmpeg/libavformat')
-rw-r--r--contrib/ffmpeg/libavformat/4xm.c331
-rw-r--r--contrib/ffmpeg/libavformat/Makefile197
-rw-r--r--contrib/ffmpeg/libavformat/adtsenc.c123
-rw-r--r--contrib/ffmpeg/libavformat/aiff.c436
-rw-r--r--contrib/ffmpeg/libavformat/allformats.c182
-rw-r--r--contrib/ffmpeg/libavformat/allformats.h176
-rw-r--r--contrib/ffmpeg/libavformat/amr.c201
-rw-r--r--contrib/ffmpeg/libavformat/asf-enc.c866
-rw-r--r--contrib/ffmpeg/libavformat/asf.c951
-rw-r--r--contrib/ffmpeg/libavformat/asf.h285
-rw-r--r--contrib/ffmpeg/libavformat/au.c209
-rw-r--r--contrib/ffmpeg/libavformat/audio.c352
-rw-r--r--contrib/ffmpeg/libavformat/avformat.h539
-rw-r--r--contrib/ffmpeg/libavformat/avi.h39
-rw-r--r--contrib/ffmpeg/libavformat/avidec.c989
-rw-r--r--contrib/ffmpeg/libavformat/avienc.c580
-rw-r--r--contrib/ffmpeg/libavformat/avio.c192
-rw-r--r--contrib/ffmpeg/libavformat/avio.h201
-rw-r--r--contrib/ffmpeg/libavformat/aviobuf.c790
-rw-r--r--contrib/ffmpeg/libavformat/avisynth.c222
-rw-r--r--contrib/ffmpeg/libavformat/avs.c227
-rw-r--r--contrib/ffmpeg/libavformat/barpainet.h45
-rw-r--r--contrib/ffmpeg/libavformat/base64.c231
-rw-r--r--contrib/ffmpeg/libavformat/base64.h24
-rw-r--r--contrib/ffmpeg/libavformat/beosaudio.cpp465
-rw-r--r--contrib/ffmpeg/libavformat/crc.c98
-rw-r--r--contrib/ffmpeg/libavformat/cutils.c275
-rw-r--r--contrib/ffmpeg/libavformat/daud.c58
-rw-r--r--contrib/ffmpeg/libavformat/dc1394.c193
-rw-r--r--contrib/ffmpeg/libavformat/dsicin.c224
-rw-r--r--contrib/ffmpeg/libavformat/dv.c451
-rw-r--r--contrib/ffmpeg/libavformat/dv.h37
-rw-r--r--contrib/ffmpeg/libavformat/dv1394.c240
-rw-r--r--contrib/ffmpeg/libavformat/dv1394.h357
-rw-r--r--contrib/ffmpeg/libavformat/dvenc.c407
-rw-r--r--contrib/ffmpeg/libavformat/electronicarts.c291
-rw-r--r--contrib/ffmpeg/libavformat/ffm.c792
-rw-r--r--contrib/ffmpeg/libavformat/file.c140
-rw-r--r--contrib/ffmpeg/libavformat/flic.c221
-rw-r--r--contrib/ffmpeg/libavformat/flvdec.c259
-rw-r--r--contrib/ffmpeg/libavformat/flvenc.c284
-rw-r--r--contrib/ffmpeg/libavformat/framehook.c121
-rw-r--r--contrib/ffmpeg/libavformat/framehook.h50
-rw-r--r--contrib/ffmpeg/libavformat/gif.c419
-rw-r--r--contrib/ffmpeg/libavformat/gifdec.c593
-rw-r--r--contrib/ffmpeg/libavformat/grab.c860
-rw-r--r--contrib/ffmpeg/libavformat/grab_bktr.c330
-rw-r--r--contrib/ffmpeg/libavformat/gxf.c525
-rw-r--r--contrib/ffmpeg/libavformat/gxf.h34
-rw-r--r--contrib/ffmpeg/libavformat/gxfenc.c829
-rw-r--r--contrib/ffmpeg/libavformat/http.c289
-rw-r--r--contrib/ffmpeg/libavformat/idcin.c301
-rw-r--r--contrib/ffmpeg/libavformat/idroq.c291
-rw-r--r--contrib/ffmpeg/libavformat/img.c400
-rw-r--r--contrib/ffmpeg/libavformat/img2.c425
-rw-r--r--contrib/ffmpeg/libavformat/ipmovie.c625
-rw-r--r--contrib/ffmpeg/libavformat/isom.c131
-rw-r--r--contrib/ffmpeg/libavformat/isom.h38
-rw-r--r--contrib/ffmpeg/libavformat/jpeg.c240
-rw-r--r--contrib/ffmpeg/libavformat/libnut.c283
-rw-r--r--contrib/ffmpeg/libavformat/matroska.c2767
-rw-r--r--contrib/ffmpeg/libavformat/mm.c212
-rw-r--r--contrib/ffmpeg/libavformat/mmf.c331
-rw-r--r--contrib/ffmpeg/libavformat/mov.c1798
-rw-r--r--contrib/ffmpeg/libavformat/movenc.c1724
-rw-r--r--contrib/ffmpeg/libavformat/mp3.c430
-rw-r--r--contrib/ffmpeg/libavformat/mpeg.c1824
-rw-r--r--contrib/ffmpeg/libavformat/mpegts.c1527
-rw-r--r--contrib/ffmpeg/libavformat/mpegts.h63
-rw-r--r--contrib/ffmpeg/libavformat/mpegtsenc.c676
-rw-r--r--contrib/ffmpeg/libavformat/mpjpeg.c67
-rw-r--r--contrib/ffmpeg/libavformat/mtv.c187
-rw-r--r--contrib/ffmpeg/libavformat/mxf.c1082
-rw-r--r--contrib/ffmpeg/libavformat/nsvdec.c763
-rw-r--r--contrib/ffmpeg/libavformat/nut.c1457
-rw-r--r--contrib/ffmpeg/libavformat/nut.h97
-rw-r--r--contrib/ffmpeg/libavformat/nutdec.c889
-rw-r--r--contrib/ffmpeg/libavformat/nuv.c241
-rw-r--r--contrib/ffmpeg/libavformat/ogg.c283
-rw-r--r--contrib/ffmpeg/libavformat/ogg2.c697
-rw-r--r--contrib/ffmpeg/libavformat/ogg2.h85
-rw-r--r--contrib/ffmpeg/libavformat/oggparseflac.c82
-rw-r--r--contrib/ffmpeg/libavformat/oggparseogm.c166
-rw-r--r--contrib/ffmpeg/libavformat/oggparsetheora.c129
-rw-r--r--contrib/ffmpeg/libavformat/oggparsevorbis.c205
-rw-r--r--contrib/ffmpeg/libavformat/os_support.c96
-rw-r--r--contrib/ffmpeg/libavformat/os_support.h53
-rw-r--r--contrib/ffmpeg/libavformat/png.c889
-rw-r--r--contrib/ffmpeg/libavformat/pnm.c478
-rw-r--r--contrib/ffmpeg/libavformat/psxstr.c364
-rw-r--r--contrib/ffmpeg/libavformat/qtpalette.h295
-rw-r--r--contrib/ffmpeg/libavformat/raw.c843
-rw-r--r--contrib/ffmpeg/libavformat/riff.c468
-rw-r--r--contrib/ffmpeg/libavformat/riff.h51
-rw-r--r--contrib/ffmpeg/libavformat/rm.c1146
-rw-r--r--contrib/ffmpeg/libavformat/rtp.c1099
-rw-r--r--contrib/ffmpeg/libavformat/rtp.h118
-rw-r--r--contrib/ffmpeg/libavformat/rtp_h264.c419
-rw-r--r--contrib/ffmpeg/libavformat/rtp_h264.h26
-rw-r--r--contrib/ffmpeg/libavformat/rtp_internal.h110
-rw-r--r--contrib/ffmpeg/libavformat/rtpproto.c303
-rw-r--r--contrib/ffmpeg/libavformat/rtsp.c1493
-rw-r--r--contrib/ffmpeg/libavformat/rtsp.h98
-rw-r--r--contrib/ffmpeg/libavformat/rtspcodes.h31
-rw-r--r--contrib/ffmpeg/libavformat/segafilm.c310
-rw-r--r--contrib/ffmpeg/libavformat/sgi.c460
-rw-r--r--contrib/ffmpeg/libavformat/sierravmd.c302
-rw-r--r--contrib/ffmpeg/libavformat/smacker.c345
-rw-r--r--contrib/ffmpeg/libavformat/sol.c160
-rw-r--r--contrib/ffmpeg/libavformat/swf.c944
-rw-r--r--contrib/ffmpeg/libavformat/tcp.c232
-rw-r--r--contrib/ffmpeg/libavformat/tiertexseq.c310
-rw-r--r--contrib/ffmpeg/libavformat/tta.c152
-rw-r--r--contrib/ffmpeg/libavformat/udp.c512
-rw-r--r--contrib/ffmpeg/libavformat/utils.c3108
-rw-r--r--contrib/ffmpeg/libavformat/v4l2.c541
-rw-r--r--contrib/ffmpeg/libavformat/voc.c36
-rw-r--r--contrib/ffmpeg/libavformat/voc.h51
-rw-r--r--contrib/ffmpeg/libavformat/vocdec.c155
-rw-r--r--contrib/ffmpeg/libavformat/vocenc.c104
-rw-r--r--contrib/ffmpeg/libavformat/wav.c253
-rw-r--r--contrib/ffmpeg/libavformat/wc3movie.c394
-rw-r--r--contrib/ffmpeg/libavformat/westwood.c414
-rw-r--r--contrib/ffmpeg/libavformat/wv.c202
-rw-r--r--contrib/ffmpeg/libavformat/yuv.c161
-rw-r--r--contrib/ffmpeg/libavformat/yuv4mpeg.c408
126 files changed, 55680 insertions, 0 deletions
diff --git a/contrib/ffmpeg/libavformat/4xm.c b/contrib/ffmpeg/libavformat/4xm.c
new file mode 100644
index 000000000..12e7d9ee4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/4xm.c
@@ -0,0 +1,331 @@
+/*
+ * 4X Technologies .4xm File Demuxer (no muxer)
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file 4xm.c
+ * 4X Technologies file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the .4xm file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
+#define _4XMV_TAG MKTAG('4', 'X', 'M', 'V')
+#define LIST_TAG MKTAG('L', 'I', 'S', 'T')
+#define HEAD_TAG MKTAG('H', 'E', 'A', 'D')
+#define TRK__TAG MKTAG('T', 'R', 'K', '_')
+#define MOVI_TAG MKTAG('M', 'O', 'V', 'I')
+#define VTRK_TAG MKTAG('V', 'T', 'R', 'K')
+#define STRK_TAG MKTAG('S', 'T', 'R', 'K')
+#define std__TAG MKTAG('s', 't', 'd', '_')
+#define name_TAG MKTAG('n', 'a', 'm', 'e')
+#define vtrk_TAG MKTAG('v', 't', 'r', 'k')
+#define strk_TAG MKTAG('s', 't', 'r', 'k')
+#define ifrm_TAG MKTAG('i', 'f', 'r', 'm')
+#define pfrm_TAG MKTAG('p', 'f', 'r', 'm')
+#define cfrm_TAG MKTAG('c', 'f', 'r', 'm')
+#define snd__TAG MKTAG('s', 'n', 'd', '_')
+
+#define vtrk_SIZE 0x44
+#define strk_SIZE 0x28
+
+#define GET_LIST_HEADER() \
+ fourcc_tag = get_le32(pb); \
+ size = get_le32(pb); \
+ if (fourcc_tag != LIST_TAG) \
+ return AVERROR_INVALIDDATA; \
+ fourcc_tag = get_le32(pb);
+
+typedef struct AudioTrack {
+ int sample_rate;
+ int bits;
+ int channels;
+ int stream_index;
+ int adpcm;
+} AudioTrack;
+
+typedef struct FourxmDemuxContext {
+ int width;
+ int height;
+ int video_stream_index;
+ int track_count;
+ AudioTrack *tracks;
+ int selected_track;
+
+ int64_t audio_pts;
+ int64_t video_pts;
+ float fps;
+} FourxmDemuxContext;
+
+static int fourxm_probe(AVProbeData *p)
+{
+ if (p->buf_size < 12)
+ return 0;
+
+ if ((LE_32(&p->buf[0]) != RIFF_TAG) ||
+ (LE_32(&p->buf[8]) != _4XMV_TAG))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int fourxm_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size;
+ int header_size;
+ FourxmDemuxContext *fourxm = (FourxmDemuxContext *)s->priv_data;
+ unsigned char *header;
+ int i;
+ int current_track = -1;
+ AVStream *st;
+
+ fourxm->track_count = 0;
+ fourxm->tracks = NULL;
+ fourxm->selected_track = 0;
+ fourxm->fps = 1.0;
+
+ /* skip the first 3 32-bit numbers */
+ url_fseek(pb, 12, SEEK_CUR);
+
+ /* check for LIST-HEAD */
+ GET_LIST_HEADER();
+ header_size = size - 4;
+ if (fourcc_tag != HEAD_TAG)
+ return AVERROR_INVALIDDATA;
+
+ /* allocate space for the header and load the whole thing */
+ header = av_malloc(header_size);
+ if (!header)
+ return AVERROR_NOMEM;
+ if (get_buffer(pb, header, header_size) != header_size)
+ return AVERROR_IO;
+
+ /* take the lazy approach and search for any and all vtrk and strk chunks */
+ for (i = 0; i < header_size - 8; i++) {
+ fourcc_tag = LE_32(&header[i]);
+ size = LE_32(&header[i + 4]);
+
+ if (fourcc_tag == std__TAG) {
+ fourxm->fps = av_int2flt(LE_32(&header[i + 12]));
+ } else if (fourcc_tag == vtrk_TAG) {
+ /* check that there is enough data */
+ if (size != vtrk_SIZE) {
+ av_free(header);
+ return AVERROR_INVALIDDATA;
+ }
+ fourxm->width = LE_32(&header[i + 36]);
+ fourxm->height = LE_32(&header[i + 40]);
+ i += 8 + size;
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 60, 1, fourxm->fps);
+
+ fourxm->video_stream_index = st->index;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_4XM;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = fourxm->width;
+ st->codec->height = fourxm->height;
+
+ } else if (fourcc_tag == strk_TAG) {
+ /* check that there is enough data */
+ if (size != strk_SIZE) {
+ av_free(header);
+ return AVERROR_INVALIDDATA;
+ }
+ current_track = LE_32(&header[i + 8]);
+ if (current_track + 1 > fourxm->track_count) {
+ fourxm->track_count = current_track + 1;
+ if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack))
+ return -1;
+ fourxm->tracks = av_realloc(fourxm->tracks,
+ fourxm->track_count * sizeof(AudioTrack));
+ if (!fourxm->tracks) {
+ av_free(header);
+ return AVERROR_NOMEM;
+ }
+ }
+ fourxm->tracks[current_track].adpcm = LE_32(&header[i + 12]);
+ fourxm->tracks[current_track].channels = LE_32(&header[i + 36]);
+ fourxm->tracks[current_track].sample_rate = LE_32(&header[i + 40]);
+ fourxm->tracks[current_track].bits = LE_32(&header[i + 44]);
+ i += 8 + size;
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, current_track);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);
+
+ fourxm->tracks[current_track].stream_index = st->index;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = 0;
+ st->codec->channels = fourxm->tracks[current_track].channels;
+ st->codec->sample_rate = fourxm->tracks[current_track].sample_rate;
+ st->codec->bits_per_sample = fourxm->tracks[current_track].bits;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+ if (fourxm->tracks[current_track].adpcm)
+ st->codec->codec_id = CODEC_ID_ADPCM_4XM;
+ else if (st->codec->bits_per_sample == 8)
+ st->codec->codec_id = CODEC_ID_PCM_U8;
+ else
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ }
+ }
+
+ av_free(header);
+
+ /* skip over the LIST-MOVI chunk (which is where the stream should be */
+ GET_LIST_HEADER();
+ if (fourcc_tag != MOVI_TAG)
+ return AVERROR_INVALIDDATA;
+
+ /* initialize context members */
+ fourxm->video_pts = -1; /* first frame will push to 0 */
+ fourxm->audio_pts = 0;
+
+ return 0;
+}
+
+static int fourxm_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ FourxmDemuxContext *fourxm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size, out_size;
+ int ret = 0;
+ int track_number;
+ int packet_read = 0;
+ unsigned char header[8];
+ int audio_frame_count;
+
+ while (!packet_read) {
+
+ if ((ret = get_buffer(&s->pb, header, 8)) < 0)
+ return ret;
+ fourcc_tag = LE_32(&header[0]);
+ size = LE_32(&header[4]);
+ if (url_feof(pb))
+ return AVERROR_IO;
+ switch (fourcc_tag) {
+
+ case LIST_TAG:
+ /* this is a good time to bump the video pts */
+ fourxm->video_pts ++;
+
+ /* skip the LIST-* tag and move on to the next fourcc */
+ get_le32(pb);
+ break;
+
+ case ifrm_TAG:
+ case pfrm_TAG:
+ case cfrm_TAG:{
+
+ /* allocate 8 more bytes than 'size' to account for fourcc
+ * and size */
+ if (size + 8 < size || av_new_packet(pkt, size + 8))
+ return AVERROR_IO;
+ pkt->stream_index = fourxm->video_stream_index;
+ pkt->pts = fourxm->video_pts;
+ pkt->pos = url_ftell(&s->pb);
+ memcpy(pkt->data, header, 8);
+ ret = get_buffer(&s->pb, &pkt->data[8], size);
+
+ if (ret < 0)
+ av_free_packet(pkt);
+ else
+ packet_read = 1;
+ break;
+ }
+
+ case snd__TAG:
+ track_number = get_le32(pb);
+ out_size= get_le32(pb);
+ size-=8;
+
+ if (track_number == fourxm->selected_track) {
+ ret= av_get_packet(&s->pb, pkt, size);
+ if(ret<0)
+ return AVERROR_IO;
+ pkt->stream_index =
+ fourxm->tracks[fourxm->selected_track].stream_index;
+ pkt->pts = fourxm->audio_pts;
+ packet_read = 1;
+
+ /* pts accounting */
+ audio_frame_count = size;
+ if (fourxm->tracks[fourxm->selected_track].adpcm)
+ audio_frame_count -=
+ 2 * (fourxm->tracks[fourxm->selected_track].channels);
+ audio_frame_count /=
+ fourxm->tracks[fourxm->selected_track].channels;
+ if (fourxm->tracks[fourxm->selected_track].adpcm)
+ audio_frame_count *= 2;
+ else
+ audio_frame_count /=
+ (fourxm->tracks[fourxm->selected_track].bits / 8);
+ fourxm->audio_pts += audio_frame_count;
+
+ } else {
+ url_fseek(pb, size, SEEK_CUR);
+ }
+ break;
+
+ default:
+ url_fseek(pb, size, SEEK_CUR);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int fourxm_read_close(AVFormatContext *s)
+{
+ FourxmDemuxContext *fourxm = (FourxmDemuxContext *)s->priv_data;
+
+ av_free(fourxm->tracks);
+
+ return 0;
+}
+
+AVInputFormat fourxm_demuxer = {
+ "4xm",
+ "4X Technologies format",
+ sizeof(FourxmDemuxContext),
+ fourxm_probe,
+ fourxm_read_header,
+ fourxm_read_packet,
+ fourxm_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/Makefile b/contrib/ffmpeg/libavformat/Makefile
new file mode 100644
index 000000000..fd2ac2a29
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/Makefile
@@ -0,0 +1,197 @@
+#
+# libavformat Makefile
+# (c) 2000-2003 Fabrice Bellard
+#
+include ../config.mak
+
+CFLAGS+=-I$(SRC_PATH)/libavcodec
+
+OBJS= utils.o cutils.o os_support.o allformats.o
+
+HEADERS = avformat.h avio.h rtp.h rtsp.h rtspcodes.h
+
+# muxers/demuxers
+OBJS-$(CONFIG_FOURXM_DEMUXER) += 4xm.o
+OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o
+OBJS-$(CONFIG_AIFF_DEMUXER) += aiff.o riff.o
+OBJS-$(CONFIG_AIFF_MUXER) += aiff.o riff.o
+OBJS-$(CONFIG_AMR_DEMUXER) += amr.o
+OBJS-$(CONFIG_AMR_MUXER) += amr.o
+OBJS-$(CONFIG_ASF_DEMUXER) += asf.o riff.o
+OBJS-$(CONFIG_ASF_MUXER) += asf-enc.o riff.o
+OBJS-$(CONFIG_ASF_STREAM_MUXER) += asf-enc.o riff.o
+OBJS-$(CONFIG_AU_DEMUXER) += au.o riff.o
+OBJS-$(CONFIG_AU_MUXER) += au.o riff.o
+OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o riff.o
+OBJS-$(CONFIG_AVI_MUXER) += avienc.o riff.o
+OBJS-$(CONFIG_AVISYNTH) += avisynth.o
+OBJS-$(CONFIG_AVS_DEMUXER) += avs.o vocdec.o voc.o riff.o
+OBJS-$(CONFIG_CRC_MUXER) += crc.o
+OBJS-$(CONFIG_FRAMECRC_MUXER) += crc.o
+OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o
+OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o
+OBJS-$(CONFIG_DV_DEMUXER) += dv.o
+OBJS-$(CONFIG_DV_MUXER) += dvenc.o
+OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o
+OBJS-$(CONFIG_FFM_DEMUXER) += ffm.o
+OBJS-$(CONFIG_FFM_MUXER) += ffm.o
+OBJS-$(CONFIG_FLIC_DEMUXER) += flic.o
+OBJS-$(CONFIG_FLV_DEMUXER) += flvdec.o
+OBJS-$(CONFIG_FLV_MUXER) += flvenc.o
+OBJS-$(CONFIG_GIF_MUXER) += gif.o
+OBJS-$(CONFIG_GIF_DEMUXER) += gifdec.o
+OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o
+OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o
+OBJS-$(CONFIG_ROQ_DEMUXER) += idroq.o
+OBJS-$(CONFIG_IMAGE2_DEMUXER) += img2.o
+OBJS-$(CONFIG_IMAGE2PIPE_DEMUXER) += img2.o
+OBJS-$(CONFIG_IMAGE2_MUXER) += img2.o
+OBJS-$(CONFIG_IMAGE2PIPE_MUXER) += img2.o
+OBJS-$(CONFIG_IPMOVIE_DEMUXER) += ipmovie.o
+OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroska.o riff.o
+OBJS-$(CONFIG_MM_DEMUXER) += mm.o
+OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o riff.o
+OBJS-$(CONFIG_MMF_MUXER) += mmf.o riff.o
+OBJS-$(CONFIG_MOV_DEMUXER) += mov.o riff.o isom.o
+OBJS-$(CONFIG_MOV_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o
+OBJS-$(CONFIG_TGP_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_MP4_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_PSP_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_TG2_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_MP3_DEMUXER) += mp3.o
+OBJS-$(CONFIG_MP2_MUXER) += mp3.o
+OBJS-$(CONFIG_MP3_MUXER) += mp3.o
+OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG1VCD_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG2VOB_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG2SVCD_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG2DVD_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEGPS_DEMUXER) += mpeg.o
+OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpegts.o
+OBJS-$(CONFIG_MPEGTS_MUXER) += mpegtsenc.o
+OBJS-$(CONFIG_MPJPEG_MUXER) += mpjpeg.o
+OBJS-$(CONFIG_MXF_DEMUXER) += mxf.o
+OBJS-$(CONFIG_NSV_DEMUXER) += nsvdec.o riff.o
+OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o riff.o
+OBJS-$(CONFIG_OGG_DEMUXER) += ogg2.o \
+ oggparsevorbis.o \
+ oggparsetheora.o \
+ oggparseflac.o \
+ oggparseogm.o \
+ riff.o
+OBJS-$(CONFIG_STR_DEMUXER) += psxstr.o
+OBJS-$(CONFIG_SHORTEN_DEMUXER) += raw.o
+OBJS-$(CONFIG_FLAC_DEMUXER) += raw.o
+OBJS-$(CONFIG_FLAC_MUXER) += raw.o
+OBJS-$(CONFIG_AC3_DEMUXER) += raw.o
+OBJS-$(CONFIG_AC3_MUXER) += raw.o
+OBJS-$(CONFIG_DTS_DEMUXER) += raw.o
+OBJS-$(CONFIG_AAC_DEMUXER) += raw.o
+OBJS-$(CONFIG_H261_DEMUXER) += raw.o
+OBJS-$(CONFIG_H261_MUXER) += raw.o
+OBJS-$(CONFIG_H263_DEMUXER) += raw.o
+OBJS-$(CONFIG_H263_MUXER) += raw.o
+OBJS-$(CONFIG_M4V_DEMUXER) += raw.o
+OBJS-$(CONFIG_M4V_MUXER) += raw.o
+OBJS-$(CONFIG_H264_DEMUXER) += raw.o
+OBJS-$(CONFIG_H264_MUXER) += raw.o
+OBJS-$(CONFIG_MPEGVIDEO_DEMUXER) += raw.o
+OBJS-$(CONFIG_MPEG1VIDEO_MUXER) += raw.o
+OBJS-$(CONFIG_MPEG2VIDEO_MUXER) += raw.o
+OBJS-$(CONFIG_MJPEG_DEMUXER) += raw.o
+OBJS-$(CONFIG_INGENIENT_DEMUXER) += raw.o
+OBJS-$(CONFIG_MJPEG_MUXER) += raw.o
+OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += raw.o
+OBJS-$(CONFIG_RAWVIDEO_MUXER) += raw.o
+OBJS-$(CONFIG_NULL_MUXER) += raw.o
+OBJS-$(CONFIG_RM_DEMUXER) += rm.o
+OBJS-$(CONFIG_RM_MUXER) += rm.o
+OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o
+OBJS-$(CONFIG_VMD_DEMUXER) += sierravmd.o
+OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o
+OBJS-$(CONFIG_SOL_DEMUXER) += sol.o
+OBJS-$(CONFIG_SWF_DEMUXER) += swf.o
+OBJS-$(CONFIG_SWF_MUXER) += swf.o
+OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o
+OBJS-$(CONFIG_TTA_DEMUXER) += tta.o
+OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc.o riff.o
+OBJS-$(CONFIG_VOC_MUXER) += vocenc.o voc.o riff.o
+OBJS-$(CONFIG_WAV_DEMUXER) += wav.o riff.o
+OBJS-$(CONFIG_WAV_MUXER) += wav.o riff.o
+OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o
+OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o
+OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o
+OBJS-$(CONFIG_WV_DEMUXER) += wv.o
+OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpeg.o
+OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o
+
+OBJS+= framehook.o
+
+ifeq ($(CONFIG_VIDEO4LINUX),yes)
+OBJS-$(CONFIG_VIDEO_GRAB_DEVICE_DEMUXER) += grab.o
+endif
+
+ifeq ($(CONFIG_VIDEO4LINUX2),yes)
+OBJS-$(CONFIG_V4L2_DEMUXER) += v4l2.o
+endif
+
+ifeq ($(CONFIG_BKTR),yes)
+OBJS-$(CONFIG_VIDEO_GRAB_DEVICE_DEMUXER) += grab_bktr.o
+endif
+
+ifeq ($(CONFIG_DV1394),yes)
+OBJS-$(CONFIG_DV1394_DEMUXER) += dv1394.o
+endif
+
+ifeq ($(CONFIG_DC1394),yes)
+OBJS-$(CONFIG_DC1394_DEMUXER) += dc1394.o
+endif
+
+ifeq ($(CONFIG_AUDIO_OSS),yes)
+OBJS-$(CONFIG_AUDIO_DEMUXER) += audio.o
+OBJS-$(CONFIG_AUDIO_MUXER) += audio.o
+endif
+
+EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) \
+ -lavcodec$(BUILDSUF) -L$(BUILD_ROOT)/libavcodec $(EXTRALIBS)
+
+ifeq ($(CONFIG_AUDIO_BEOS),yes)
+CPPOBJS+= beosaudio.o
+endif
+
+# protocols I/O
+OBJS+= avio.o aviobuf.o
+
+ifeq ($(CONFIG_PROTOCOLS),yes)
+OBJS+= file.o
+ifeq ($(CONFIG_NETWORK),yes)
+OBJS+= udp.o tcp.o http.o rtsp.o rtp.o rtpproto.o mpegts.o base64.o rtp_h264.o
+endif
+endif
+
+ifeq ($(CONFIG_LIBNUT),yes)
+OBJS-$(CONFIG_NUT_DEMUXER) += libnut.o riff.o
+OBJS-$(CONFIG_NUT_MUXER) += libnut.o riff.o
+else
+OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o riff.o
+#OBJS-$(CONFIG_NUT_MUXER) += nutenc.o riff.o
+endif
+
+ifeq ($(CONFIG_LIBOGG),yes)
+OBJS-$(CONFIG_OGG_MUXER) += ogg.o
+endif
+
+ifeq ($(CONFIG_GPL),yes)
+OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o
+endif
+
+OBJS += $(OBJS-yes)
+
+NAME=avformat
+ifeq ($(BUILD_SHARED),yes)
+LIBVERSION=$(LAVFVERSION)
+LIBMAJOR=$(LAVFMAJOR)
+endif
+
+include ../common.mak
diff --git a/contrib/ffmpeg/libavformat/adtsenc.c b/contrib/ffmpeg/libavformat/adtsenc.c
new file mode 100644
index 000000000..1ef683838
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/adtsenc.c
@@ -0,0 +1,123 @@
+/*
+ * ADTS muxer.
+ * Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com>
+ * Mans Rullgard <mru@inprovide.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+
+#define ADTS_HEADER_SIZE 7
+
+typedef struct {
+ int write_adts;
+ int objecttype;
+ int sample_rate_index;
+ int channel_conf;
+} ADTSContext;
+
+static int decode_extradata(ADTSContext *adts, uint8_t *buf, int size)
+{
+ GetBitContext gb;
+
+ init_get_bits(&gb, buf, size * 8);
+ adts->objecttype = get_bits(&gb, 5) - 1;
+ adts->sample_rate_index = get_bits(&gb, 4);
+ adts->channel_conf = get_bits(&gb, 4);
+
+ adts->write_adts = 1;
+
+ return 0;
+}
+
+static int adts_write_header(AVFormatContext *s)
+{
+ ADTSContext *adts = s->priv_data;
+ AVCodecContext *avc = s->streams[0]->codec;
+
+ if(avc->extradata_size > 0)
+ decode_extradata(adts, avc->extradata, avc->extradata_size);
+
+ return 0;
+}
+
+static int adts_write_frame_header(AVFormatContext *s, int size)
+{
+ ADTSContext *ctx = s->priv_data;
+ PutBitContext pb;
+ uint8_t buf[ADTS_HEADER_SIZE];
+
+ init_put_bits(&pb, buf, ADTS_HEADER_SIZE);
+
+ /* adts_fixed_header */
+ put_bits(&pb, 12, 0xfff); /* syncword */
+ put_bits(&pb, 1, 0); /* ID */
+ put_bits(&pb, 2, 0); /* layer */
+ put_bits(&pb, 1, 1); /* protection_absent */
+ put_bits(&pb, 2, ctx->objecttype); /* profile_objecttype */
+ put_bits(&pb, 4, ctx->sample_rate_index);
+ put_bits(&pb, 1, 0); /* private_bit */
+ put_bits(&pb, 3, ctx->channel_conf); /* channel_configuration */
+ put_bits(&pb, 1, 0); /* original_copy */
+ put_bits(&pb, 1, 0); /* home */
+
+ /* adts_variable_header */
+ put_bits(&pb, 1, 0); /* copyright_identification_bit */
+ put_bits(&pb, 1, 0); /* copyright_identification_start */
+ put_bits(&pb, 13, ADTS_HEADER_SIZE + size); /* aac_frame_length */
+ put_bits(&pb, 11, 0x7ff); /* adts_buffer_fullness */
+ put_bits(&pb, 2, 0); /* number_of_raw_data_blocks_in_frame */
+
+ flush_put_bits(&pb);
+ put_buffer(&s->pb, buf, ADTS_HEADER_SIZE);
+
+ return 0;
+}
+
+static int adts_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int adts_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ADTSContext *adts = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ if (!pkt->size)
+ return 0;
+ if(adts->write_adts)
+ adts_write_frame_header(s, pkt->size);
+ put_buffer(pb, pkt->data, pkt->size);
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+AVOutputFormat adts_muxer = {
+ "adts",
+ "ADTS AAC",
+ "audio/aac",
+ "aac",
+ sizeof(ADTSContext),
+ CODEC_ID_AAC,
+ CODEC_ID_NONE,
+ adts_write_header,
+ adts_write_packet,
+ adts_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/aiff.c b/contrib/ffmpeg/libavformat/aiff.c
new file mode 100644
index 000000000..e4cf66c3b
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/aiff.c
@@ -0,0 +1,436 @@
+/*
+ * AIFF/AIFF-C muxer and demuxer
+ * Copyright (c) 2006 Patrick Guimond
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+#include "intfloat_readwrite.h"
+
+static const CodecTag codec_aiff_tags[] = {
+ { CODEC_ID_PCM_S16BE, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_S8, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_S24BE, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_S32BE, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_ALAW, MKTAG('a','l','a','w') },
+ { CODEC_ID_PCM_ALAW, MKTAG('A','L','A','W') },
+ { CODEC_ID_PCM_MULAW, MKTAG('u','l','a','w') },
+ { CODEC_ID_PCM_MULAW, MKTAG('U','L','A','W') },
+ { CODEC_ID_MACE3, MKTAG('M','A','C','3') },
+ { CODEC_ID_MACE6, MKTAG('M','A','C','6') },
+ { CODEC_ID_GSM, MKTAG('G','S','M',' ') },
+ { CODEC_ID_ADPCM_G726, MKTAG('G','7','2','6') },
+ { 0, 0 },
+};
+
+#define AIFF 0
+#define AIFF_C_VERSION1 0xA2805140
+
+static int aiff_codec_get_id (int bps)
+{
+ if (bps <= 8)
+ return CODEC_ID_PCM_S8;
+ if (bps <= 16)
+ return CODEC_ID_PCM_S16BE;
+ if (bps <= 24)
+ return CODEC_ID_PCM_S24BE;
+ if (bps <= 32)
+ return CODEC_ID_PCM_S32BE;
+
+ /* bigger than 32 isn't allowed */
+ return 0;
+}
+
+/* returns the size of the found tag */
+static int get_tag(ByteIOContext *pb, uint32_t * tag)
+{
+ int size;
+
+ if (url_feof(pb))
+ return AVERROR_IO;
+
+ *tag = get_le32(pb);
+ size = get_be32(pb);
+
+ if (size < 0)
+ size = 0x7fffffff;
+
+ return size;
+}
+
+/* Metadata string read */
+static void get_meta(ByteIOContext *pb, char * str, int strsize, int size)
+{
+ int res;
+
+ if (size > strsize-1)
+ res = get_buffer(pb, (uint8_t*)str, strsize-1);
+ else
+ res = get_buffer(pb, (uint8_t*)str, size);
+
+ if (res < 0)
+ return;
+
+ str[res] = 0;
+ if (size & 1)
+ size++;
+ size -= res;
+ if (size);
+ url_fskip(pb, size);
+}
+
+/* Returns the number of sound data frames or negative on error */
+static unsigned int get_aiff_header(ByteIOContext *pb, AVCodecContext *codec,
+ int size, unsigned version)
+{
+ AVExtFloat ext;
+ double sample_rate;
+ unsigned int num_frames;
+
+
+ if (size & 1)
+ size++;
+
+ codec->codec_type = CODEC_TYPE_AUDIO;
+ codec->channels = get_be16(pb);
+ num_frames = get_be32(pb);
+ codec->bits_per_sample = get_be16(pb);
+
+ get_buffer(pb, (uint8_t*)&ext, sizeof(ext));/* Sample rate is in */
+ sample_rate = av_ext2dbl(ext); /* 80 bits BE IEEE extended float */
+ codec->sample_rate = sample_rate;
+ size -= 18;
+
+ /* Got an AIFF-C? */
+ if (version == AIFF_C_VERSION1) {
+ codec->codec_tag = get_le32(pb);
+ codec->codec_id = codec_get_id (codec_aiff_tags, codec->codec_tag);
+
+ if (codec->codec_id == CODEC_ID_PCM_S16BE) {
+ codec->codec_id = aiff_codec_get_id (codec->bits_per_sample);
+ codec->bits_per_sample = av_get_bits_per_sample(codec->codec_id);
+ }
+
+ size -= 4;
+ } else {
+ /* Need the codec type */
+ codec->codec_id = aiff_codec_get_id (codec->bits_per_sample);
+ codec->bits_per_sample = av_get_bits_per_sample(codec->codec_id);
+ }
+
+ if (!codec->codec_id)
+ return AVERROR_INVALIDDATA;
+
+ /* Block align needs to be computed in all cases, as the definition
+ * is specific to applications -> here we use the WAVE format definition */
+ codec->block_align = (codec->bits_per_sample * codec->channels) >> 3;
+
+ codec->bit_rate = codec->sample_rate * (codec->block_align << 3);
+
+ /* Chunk is over */
+ if (size)
+ url_fseek(pb, size, SEEK_CUR);
+
+ return num_frames;
+}
+
+#ifdef CONFIG_MUXERS
+typedef struct {
+ offset_t form;
+ offset_t frames;
+ offset_t ssnd;
+} AIFFOutputContext;
+
+static int aiff_write_header(AVFormatContext *s)
+{
+ AIFFOutputContext *aiff = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc = s->streams[0]->codec;
+ AVExtFloat sample_rate;
+
+ /* First verify if format is ok */
+ enc->codec_tag = codec_get_tag(codec_aiff_tags, enc->codec_id);
+ if (!enc->codec_tag) {
+ av_free(aiff);
+ return -1;
+ }
+
+ /* FORM AIFF header */
+ put_tag(pb, "FORM");
+ aiff->form = url_ftell(pb);
+ put_be32(pb, 0); /* file length */
+ put_tag(pb, "AIFC");
+
+ /* Version chunk */
+ put_tag(pb, "FVER");
+ put_be32(pb, 4);
+ put_be32(pb, 0xA2805140);
+
+ /* Common chunk */
+ put_tag(pb, "COMM");
+ put_be32(pb, 24); /* size */
+ put_be16(pb, enc->channels); /* Number of channels */
+
+ aiff->frames = url_ftell(pb);
+ put_be32(pb, 0); /* Number of frames */
+
+ if (!enc->bits_per_sample)
+ enc->bits_per_sample = av_get_bits_per_sample(enc->codec_id);
+ if (!enc->bits_per_sample) {
+ av_log(s, AV_LOG_ERROR, "could not compute bits per sample\n");
+ return -1;
+ }
+ if (!enc->block_align)
+ enc->block_align = (enc->bits_per_sample * enc->channels) >> 3;
+
+ put_be16(pb, enc->bits_per_sample); /* Sample size */
+
+ sample_rate = av_dbl2ext((double)enc->sample_rate);
+ put_buffer(pb, (uint8_t*)&sample_rate, sizeof(sample_rate));
+
+ put_le32(pb, enc->codec_tag);
+ put_be16(pb, 0);
+
+ /* Sound data chunk */
+ put_tag(pb, "SSND");
+ aiff->ssnd = url_ftell(pb); /* Sound chunk size */
+ put_be32(pb, 0); /* Sound samples data size */
+ put_be32(pb, 0); /* Data offset */
+ put_be32(pb, 0); /* Block-size (block align) */
+
+ av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+
+ /* Data is starting here */
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int aiff_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int aiff_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AIFFOutputContext *aiff = s->priv_data;
+ AVCodecContext *enc = s->streams[0]->codec;
+
+ /* Chunks sizes must be even */
+ offset_t file_size, end_size;
+ end_size = file_size = url_ftell(pb);
+ if (file_size & 1) {
+ put_byte(pb, 0);
+ end_size++;
+ }
+
+ if (!url_is_streamed(&s->pb)) {
+ /* File length */
+ url_fseek(pb, aiff->form, SEEK_SET);
+ put_be32(pb, (uint32_t)(file_size - aiff->form - 4));
+
+ /* Number of sample frames */
+ url_fseek(pb, aiff->frames, SEEK_SET);
+ put_be32(pb, ((uint32_t)(file_size-aiff->ssnd-12))/enc->block_align);
+
+ /* Sound Data chunk size */
+ url_fseek(pb, aiff->ssnd, SEEK_SET);
+ put_be32(pb, (uint32_t)(file_size - aiff->ssnd - 4));
+
+ /* return to the end */
+ url_fseek(pb, end_size, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int aiff_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size < 16)
+ return 0;
+ if (p->buf[0] == 'F' && p->buf[1] == 'O' &&
+ p->buf[2] == 'R' && p->buf[3] == 'M' &&
+ p->buf[8] == 'A' && p->buf[9] == 'I' &&
+ p->buf[10] == 'F' && (p->buf[11] == 'F' || p->buf[11] == 'C'))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* aiff input */
+static int aiff_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size, filesize, offset;
+ uint32_t tag;
+ unsigned version = AIFF_C_VERSION1;
+ ByteIOContext *pb = &s->pb;
+ AVStream * st = s->streams[0];
+
+ /* check FORM header */
+ filesize = get_tag(pb, &tag);
+ if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
+ return AVERROR_INVALIDDATA;
+
+ /* AIFF data type */
+ tag = get_le32(pb);
+ if (tag == MKTAG('A', 'I', 'F', 'F')) /* Got an AIFF file */
+ version = AIFF;
+ else if (tag != MKTAG('A', 'I', 'F', 'C')) /* An AIFF-C file then */
+ return AVERROR_INVALIDDATA;
+
+ filesize -= 4;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ while (filesize > 0) {
+ /* parse different chunks */
+ size = get_tag(pb, &tag);
+ if (size < 0)
+ return size;
+
+ filesize -= size + 8;
+
+ switch (tag) {
+ case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */
+ /* Then for the complete header info */
+ st->nb_frames = get_aiff_header (pb, st->codec, size, version);
+ if (st->nb_frames < 0)
+ return st->nb_frames;
+ break;
+
+ case MKTAG('F', 'V', 'E', 'R'): /* Version chunk */
+ version = get_be32(pb);
+ break;
+
+ case MKTAG('N', 'A', 'M', 'E'): /* Sample name chunk */
+ get_meta (pb, s->title, sizeof(s->title), size);
+ break;
+
+ case MKTAG('A', 'U', 'T', 'H'): /* Author chunk */
+ get_meta (pb, s->author, sizeof(s->author), size);
+ break;
+
+ case MKTAG('(', 'c', ')', ' '): /* Copyright chunk */
+ get_meta (pb, s->copyright, sizeof(s->copyright), size);
+ break;
+
+ case MKTAG('A', 'N', 'N', 'O'): /* Annotation chunk */
+ get_meta (pb, s->comment, sizeof(s->comment), size);
+ break;
+
+ case MKTAG('S', 'S', 'N', 'D'): /* Sampled sound chunk */
+ get_be32(pb); /* Block align... don't care */
+ offset = get_be32(pb); /* Offset of sound data */
+ goto got_sound;
+
+ default: /* Jump */
+ if (size & 1) /* Always even aligned */
+ size++;
+ url_fskip (pb, size);
+ }
+ }
+
+ /* End of loop and didn't get sound */
+ return AVERROR_INVALIDDATA;
+
+got_sound:
+ /* Now positioned, get the sound data start and end */
+ if (st->nb_frames)
+ s->file_size = st->nb_frames * st->codec->block_align;
+
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+ st->start_time = 0;
+ st->duration = st->nb_frames;
+
+ /* Position the stream at the first block */
+ url_fskip(pb, offset);
+
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int aiff_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ AVStream *st = s->streams[0];
+ int res;
+
+ /* End of stream may be reached */
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+
+ /* Now for that packet */
+ res = av_get_packet(&s->pb, pkt, (MAX_SIZE / st->codec->block_align) * st->codec->block_align);
+ if (res < 0)
+ return res;
+
+ /* Only one stream in an AIFF file */
+ pkt->stream_index = 0;
+ return 0;
+}
+
+static int aiff_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int aiff_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ return pcm_read_seek(s, stream_index, timestamp, flags);
+}
+
+#ifdef CONFIG_AIFF_DEMUXER
+AVInputFormat aiff_demuxer = {
+ "aiff",
+ "Audio IFF",
+ 0,
+ aiff_probe,
+ aiff_read_header,
+ aiff_read_packet,
+ aiff_read_close,
+ aiff_read_seek,
+};
+#endif
+
+#ifdef CONFIG_AIFF_MUXER
+AVOutputFormat aiff_muxer = {
+ "aiff",
+ "Audio IFF",
+ "audio/aiff",
+ "aif,aiff,afc,aifc",
+ sizeof(AIFFOutputContext),
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_NONE,
+ aiff_write_header,
+ aiff_write_packet,
+ aiff_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/allformats.c b/contrib/ffmpeg/libavformat/allformats.c
new file mode 100644
index 000000000..f4b16adff
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/allformats.c
@@ -0,0 +1,182 @@
+/*
+ * Register all the formats and protocols
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+
+#define REGISTER_MUXER(X,x) \
+ if(ENABLE_##X##_MUXER) av_register_output_format(&x##_muxer)
+#define REGISTER_DEMUXER(X,x) \
+ if(ENABLE_##X##_DEMUXER) av_register_input_format(&x##_demuxer)
+#define REGISTER_MUXDEMUX(X,x) REGISTER_MUXER(X,x); REGISTER_DEMUXER(X,x)
+
+/* If you do not call this function, then you can select exactly which
+ formats you want to support */
+
+/**
+ * Initialize libavcodec and register all the codecs and formats.
+ */
+void av_register_all(void)
+{
+ static int inited = 0;
+
+ if (inited != 0)
+ return;
+ inited = 1;
+
+ avcodec_init();
+ avcodec_register_all();
+
+ REGISTER_DEMUXER (AAC, aac);
+ REGISTER_MUXDEMUX(AC3, ac3);
+ REGISTER_MUXER (ADTS, adts);
+ REGISTER_MUXDEMUX(AIFF, aiff);
+ REGISTER_MUXDEMUX(AMR, amr);
+ REGISTER_MUXDEMUX(ASF, asf);
+ REGISTER_MUXER (ASF_STREAM, asf_stream);
+ REGISTER_MUXDEMUX(AU, au);
+#if defined(CONFIG_AUDIO_OSS) || defined(CONFIG_AUDIO_BEOS)
+ REGISTER_MUXDEMUX(AUDIO, audio);
+#endif
+ REGISTER_MUXDEMUX(AVI, avi);
+#ifdef CONFIG_AVISYNTH
+ av_register_input_format(&avisynth_demuxer);
+#endif
+ REGISTER_DEMUXER (AVS, avs);
+ REGISTER_MUXER (CRC, crc);
+ REGISTER_DEMUXER (DAUD, daud);
+#ifdef CONFIG_DC1394
+ REGISTER_DEMUXER (DC1394, dc1394);
+#endif
+ REGISTER_DEMUXER (DSICIN, dsicin);
+ REGISTER_DEMUXER (DTS, dts);
+ REGISTER_MUXDEMUX(DV, dv);
+#ifdef CONFIG_DV1394
+ REGISTER_DEMUXER (DV1394, dv1394);
+#endif
+ REGISTER_DEMUXER (EA, ea);
+ REGISTER_MUXDEMUX(FFM, ffm);
+ REGISTER_MUXDEMUX(FLAC, flac);
+ REGISTER_DEMUXER (FLIC, flic);
+ REGISTER_MUXDEMUX(FLV, flv);
+ REGISTER_DEMUXER (FOURXM, fourxm);
+ REGISTER_MUXER (FRAMECRC, framecrc);
+ REGISTER_MUXDEMUX(GIF, gif);
+ REGISTER_DEMUXER (GXF, gxf);
+#ifdef CONFIG_GPL
+ REGISTER_MUXER (GXF, gxf);
+#endif
+ REGISTER_MUXDEMUX(H261, h261);
+ REGISTER_MUXDEMUX(H263, h263);
+ REGISTER_MUXDEMUX(H264, h264);
+ REGISTER_DEMUXER (IDCIN, idcin);
+ REGISTER_MUXDEMUX(IMAGE2, image2);
+ REGISTER_MUXDEMUX(IMAGE2PIPE, image2pipe);
+ REGISTER_DEMUXER (INGENIENT, ingenient);
+ REGISTER_DEMUXER (IPMOVIE, ipmovie);
+ REGISTER_MUXDEMUX(M4V, m4v);
+ REGISTER_DEMUXER (MATROSKA, matroska);
+ REGISTER_MUXDEMUX(MJPEG, mjpeg);
+ REGISTER_DEMUXER (MM, mm);
+ REGISTER_MUXDEMUX(MMF, mmf);
+ REGISTER_MUXDEMUX(MOV, mov);
+ REGISTER_MUXER (MP2, mp2);
+ REGISTER_MUXDEMUX(MP3, mp3);
+ REGISTER_MUXER (MP4, mp4);
+ REGISTER_MUXER (MPEG1SYSTEM, mpeg1system);
+ REGISTER_MUXER (MPEG1VCD, mpeg1vcd);
+ REGISTER_MUXER (MPEG1VIDEO, mpeg1video);
+ REGISTER_MUXER (MPEG2DVD, mpeg2dvd);
+ REGISTER_MUXER (MPEG2SVCD, mpeg2svcd);
+ REGISTER_MUXER (MPEG2VIDEO, mpeg2video);
+ REGISTER_MUXER (MPEG2VOB, mpeg2vob);
+ REGISTER_DEMUXER (MPEGPS, mpegps);
+ REGISTER_MUXDEMUX(MPEGTS, mpegts);
+ REGISTER_DEMUXER (MPEGVIDEO, mpegvideo);
+ REGISTER_MUXER (MPJPEG, mpjpeg);
+ REGISTER_DEMUXER (MTV, mtv);
+ REGISTER_DEMUXER (MXF, mxf);
+ REGISTER_DEMUXER (NSV, nsv);
+ REGISTER_MUXER (NULL, null);
+ REGISTER_DEMUXER (NUT, nut);
+#ifdef CONFIG_LIBNUT
+ REGISTER_MUXER (NUT, nut);
+#endif
+ REGISTER_DEMUXER (NUV, nuv);
+ REGISTER_DEMUXER (OGG, ogg);
+#ifdef CONFIG_LIBOGG
+ REGISTER_MUXER (OGG, ogg);
+#endif
+ REGISTER_MUXDEMUX(PCM_ALAW, pcm_alaw);
+ REGISTER_MUXDEMUX(PCM_MULAW, pcm_mulaw);
+ REGISTER_MUXDEMUX(PCM_S16BE, pcm_s16be);
+ REGISTER_MUXDEMUX(PCM_S16LE, pcm_s16le);
+ REGISTER_MUXDEMUX(PCM_S8, pcm_s8);
+ REGISTER_MUXDEMUX(PCM_U16BE, pcm_u16be);
+ REGISTER_MUXDEMUX(PCM_U16LE, pcm_u16le);
+ REGISTER_MUXDEMUX(PCM_U8, pcm_u8);
+ REGISTER_MUXER (PSP, psp);
+ REGISTER_MUXDEMUX(RAWVIDEO, rawvideo);
+ REGISTER_MUXDEMUX(RM, rm);
+ REGISTER_DEMUXER (ROQ, roq);
+#ifdef CONFIG_NETWORK
+ REGISTER_DEMUXER (REDIR, redir);
+ REGISTER_MUXER (RTP, rtp);
+ REGISTER_DEMUXER (RTSP, rtsp);
+ REGISTER_DEMUXER (SDP, sdp);
+ av_register_rtp_dynamic_payload_handlers();
+#endif
+ REGISTER_DEMUXER (SEGAFILM, segafilm);
+ REGISTER_DEMUXER (SHORTEN, shorten);
+ REGISTER_DEMUXER (SMACKER, smacker);
+ REGISTER_DEMUXER (SOL, sol);
+ REGISTER_DEMUXER (STR, str);
+ REGISTER_MUXDEMUX(SWF, swf);
+ REGISTER_MUXER (TG2, tg2);
+ REGISTER_MUXER (TGP, tgp);
+ REGISTER_DEMUXER (TIERTEXSEQ, tiertexseq);
+ REGISTER_DEMUXER (TTA, tta);
+#ifdef CONFIG_VIDEO4LINUX2
+ REGISTER_DEMUXER (V4L2, v4l2);
+#endif
+#if defined(CONFIG_VIDEO4LINUX) || defined(CONFIG_BKTR)
+ REGISTER_DEMUXER (VIDEO_GRAB_DEVICE, video_grab_device);
+#endif
+ REGISTER_DEMUXER (VMD, vmd);
+ REGISTER_MUXDEMUX(VOC, voc);
+ REGISTER_MUXDEMUX(WAV, wav);
+ REGISTER_DEMUXER (WC3, wc3);
+ REGISTER_DEMUXER (WSAUD, wsaud);
+ REGISTER_DEMUXER (WSVQA, wsvqa);
+ REGISTER_DEMUXER (WV, wv);
+ REGISTER_MUXDEMUX(YUV4MPEGPIPE, yuv4mpegpipe);
+
+#ifdef CONFIG_PROTOCOLS
+ /* file protocols */
+ register_protocol(&file_protocol);
+ register_protocol(&pipe_protocol);
+#ifdef CONFIG_NETWORK
+ register_protocol(&udp_protocol);
+ register_protocol(&rtp_protocol);
+ register_protocol(&tcp_protocol);
+ register_protocol(&http_protocol);
+#endif
+#endif
+}
diff --git a/contrib/ffmpeg/libavformat/allformats.h b/contrib/ffmpeg/libavformat/allformats.h
new file mode 100644
index 000000000..a138841c9
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/allformats.h
@@ -0,0 +1,176 @@
+/*
+ * Register all the formats and protocols.
+ * copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef ALLFORMATS_H
+#define ALLFORMATS_H
+
+extern AVInputFormat fourxm_demuxer;
+extern AVOutputFormat adts_muxer;
+extern AVInputFormat aiff_demuxer;
+extern AVOutputFormat aiff_muxer;
+extern AVInputFormat amr_demuxer;
+extern AVOutputFormat amr_muxer;
+extern AVInputFormat asf_demuxer;
+extern AVOutputFormat asf_muxer;
+extern AVOutputFormat asf_stream_muxer;
+extern AVInputFormat au_demuxer;
+extern AVOutputFormat au_muxer;
+extern AVInputFormat audio_demuxer;
+extern AVOutputFormat audio_muxer;
+extern AVInputFormat avi_demuxer;
+extern AVOutputFormat avi_muxer;
+extern AVInputFormat avisynth_demuxer;
+extern AVInputFormat avs_demuxer;
+extern AVOutputFormat crc_muxer;
+extern AVOutputFormat framecrc_muxer;
+extern AVInputFormat daud_demuxer;
+extern AVInputFormat dc1394_demuxer;
+extern AVInputFormat dsicin_demuxer;
+extern AVInputFormat dv1394_demuxer;
+extern AVInputFormat dv_demuxer;
+extern AVOutputFormat dv_muxer;
+extern AVInputFormat ea_demuxer;
+extern AVInputFormat ffm_demuxer;
+extern AVOutputFormat ffm_muxer;
+extern AVInputFormat flic_demuxer;
+extern AVInputFormat flv_demuxer;
+extern AVOutputFormat flv_muxer;
+extern AVOutputFormat gif_muxer;
+extern AVInputFormat gif_demuxer;
+extern AVInputFormat video_grab_device_demuxer;
+extern AVInputFormat gxf_demuxer;
+extern AVOutputFormat gxf_muxer;
+extern AVInputFormat idcin_demuxer;
+extern AVInputFormat roq_demuxer;
+extern AVInputFormat image2_demuxer;
+extern AVInputFormat image2pipe_demuxer;
+extern AVOutputFormat image2_muxer;
+extern AVOutputFormat image2pipe_muxer;
+extern AVInputFormat image_demuxer;
+extern AVInputFormat imagepipe_demuxer;
+extern AVOutputFormat image_muxer;
+extern AVOutputFormat imagepipe_muxer;
+extern AVInputFormat ipmovie_demuxer;
+extern AVInputFormat matroska_demuxer;
+extern AVInputFormat mm_demuxer;
+extern AVInputFormat mmf_demuxer;
+extern AVOutputFormat mmf_muxer;
+extern AVInputFormat mov_demuxer;
+extern AVOutputFormat mov_muxer;
+extern AVOutputFormat tgp_muxer;
+extern AVOutputFormat mp4_muxer;
+extern AVOutputFormat psp_muxer;
+extern AVOutputFormat tg2_muxer;
+extern AVInputFormat mp3_demuxer;
+extern AVOutputFormat mp2_muxer;
+extern AVOutputFormat mp3_muxer;
+extern AVOutputFormat mpeg1system_muxer;
+extern AVOutputFormat mpeg1vcd_muxer;
+extern AVOutputFormat mpeg2vob_muxer;
+extern AVOutputFormat mpeg2svcd_muxer;
+extern AVOutputFormat mpeg2dvd_muxer;
+extern AVInputFormat mpegps_demuxer;
+extern AVInputFormat mpegts_demuxer;
+extern AVOutputFormat mpegts_muxer;
+extern AVOutputFormat mpjpeg_muxer;
+extern AVInputFormat mtv_demuxer;
+extern AVInputFormat mxf_demuxer;
+extern AVInputFormat nsv_demuxer;
+extern AVInputFormat nut_demuxer;
+extern AVOutputFormat nut_muxer;
+extern AVInputFormat nuv_demuxer;
+extern AVInputFormat ogg_demuxer;
+extern AVOutputFormat ogg_muxer;
+extern AVInputFormat str_demuxer;
+extern AVInputFormat shorten_demuxer;
+extern AVInputFormat flac_demuxer;
+extern AVOutputFormat flac_muxer;
+extern AVInputFormat ac3_demuxer;
+extern AVOutputFormat ac3_muxer;
+extern AVInputFormat dts_demuxer;
+extern AVInputFormat aac_demuxer;
+extern AVInputFormat h261_demuxer;
+extern AVOutputFormat h261_muxer;
+extern AVInputFormat h263_demuxer;
+extern AVOutputFormat h263_muxer;
+extern AVInputFormat m4v_demuxer;
+extern AVOutputFormat m4v_muxer;
+extern AVInputFormat h264_demuxer;
+extern AVOutputFormat h264_muxer;
+extern AVInputFormat mpegvideo_demuxer;
+extern AVOutputFormat mpeg1video_muxer;
+extern AVOutputFormat mpeg2video_muxer;
+extern AVInputFormat mjpeg_demuxer;
+extern AVInputFormat ingenient_demuxer;
+extern AVOutputFormat mjpeg_muxer;
+extern AVInputFormat pcm_s16le_demuxer;
+extern AVOutputFormat pcm_s16le_muxer;
+extern AVInputFormat pcm_s16be_demuxer;
+extern AVOutputFormat pcm_s16be_muxer;
+extern AVInputFormat pcm_u16le_demuxer;
+extern AVOutputFormat pcm_u16le_muxer;
+extern AVInputFormat pcm_u16be_demuxer;
+extern AVOutputFormat pcm_u16be_muxer;
+extern AVInputFormat pcm_s8_demuxer;
+extern AVOutputFormat pcm_s8_muxer;
+extern AVInputFormat pcm_u8_demuxer;
+extern AVOutputFormat pcm_u8_muxer;
+extern AVInputFormat pcm_mulaw_demuxer;
+extern AVOutputFormat pcm_mulaw_muxer;
+extern AVInputFormat pcm_alaw_demuxer;
+extern AVOutputFormat pcm_alaw_muxer;
+extern AVInputFormat rawvideo_demuxer;
+extern AVOutputFormat rawvideo_muxer;
+extern AVOutputFormat null_muxer;
+extern AVInputFormat rm_demuxer;
+extern AVOutputFormat rm_muxer;
+extern AVInputFormat sdp_demuxer;
+extern AVInputFormat redir_demuxer;
+extern AVInputFormat segafilm_demuxer;
+extern AVInputFormat vmd_demuxer;
+extern AVInputFormat smacker_demuxer;
+extern AVInputFormat sol_demuxer;
+extern AVInputFormat swf_demuxer;
+extern AVOutputFormat swf_muxer;
+extern AVInputFormat tta_demuxer;
+extern AVInputFormat v4l2_demuxer;
+extern AVInputFormat voc_demuxer;
+extern AVOutputFormat voc_muxer;
+extern AVInputFormat wav_demuxer;
+extern AVOutputFormat wav_muxer;
+extern AVInputFormat wc3_demuxer;
+extern AVInputFormat wsaud_demuxer;
+extern AVInputFormat wsvqa_demuxer;
+extern AVInputFormat wv_demuxer;
+extern AVOutputFormat yuv4mpegpipe_muxer;
+extern AVInputFormat yuv4mpegpipe_demuxer;
+extern AVInputFormat tiertexseq_demuxer;
+
+/* raw.c */
+int pcm_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags);
+
+/* rtsp.c */
+int redir_open(AVFormatContext **ic_ptr, ByteIOContext *f);
+/* rtp.c */
+void av_register_rtp_dynamic_payload_handlers();
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/amr.c b/contrib/ffmpeg/libavformat/amr.c
new file mode 100644
index 000000000..635a898fa
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/amr.c
@@ -0,0 +1,201 @@
+/*
+ * amr file format
+ * Copyright (c) 2001 ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+Write and read amr data according to RFC3267, http://www.ietf.org/rfc/rfc3267.txt?number=3267
+
+Only mono files are supported.
+
+*/
+#include "avformat.h"
+
+static const char AMR_header [] = "#!AMR\n";
+static const char AMRWB_header [] = "#!AMR-WB\n";
+
+#ifdef CONFIG_MUXERS
+static int amr_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc = s->streams[0]->codec;
+
+ s->priv_data = NULL;
+
+ if (enc->codec_id == CODEC_ID_AMR_NB)
+ {
+ put_tag(pb, AMR_header); /* magic number */
+ }
+ else if(enc->codec_id == CODEC_ID_AMR_WB)
+ {
+ put_tag(pb, AMRWB_header); /* magic number */
+ }
+ else
+ {
+ return -1;
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int amr_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ put_buffer(&s->pb, pkt->data, pkt->size);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int amr_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+#endif /* CONFIG_MUXERS */
+
+static int amr_probe(AVProbeData *p)
+{
+ //Only check for "#!AMR" which could be amr-wb, amr-nb.
+ //This will also trigger multichannel files: "#!AMR_MC1.0\n" and
+ //"#!AMR-WB_MC1.0\n" (not supported)
+
+ if (p->buf_size < 5)
+ return 0;
+ if(memcmp(p->buf,AMR_header,5)==0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* amr input */
+static int amr_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ uint8_t header[9];
+
+ get_buffer(pb, header, 6);
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ {
+ return AVERROR_NOMEM;
+ }
+ if(memcmp(header,AMR_header,6)!=0)
+ {
+ get_buffer(pb, header+6, 3);
+ if(memcmp(header,AMRWB_header,9)!=0)
+ {
+ return -1;
+ }
+
+ st->codec->codec_tag = MKTAG('s', 'a', 'w', 'b');
+ st->codec->codec_id = CODEC_ID_AMR_WB;
+ st->codec->sample_rate = 16000;
+ }
+ else
+ {
+ st->codec->codec_tag = MKTAG('s', 'a', 'm', 'r');
+ st->codec->codec_id = CODEC_ID_AMR_NB;
+ st->codec->sample_rate = 8000;
+ }
+ st->codec->channels = 1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+ return 0;
+}
+
+static int amr_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ AVCodecContext *enc = s->streams[0]->codec;
+ int read, size, toc, mode;
+
+ if (url_feof(&s->pb))
+ {
+ return AVERROR_IO;
+ }
+
+//FIXME this is wrong, this should rather be in a AVParset
+ toc=get_byte(&s->pb);
+ mode = (toc >> 3) & 0x0F;
+
+ if (enc->codec_id == CODEC_ID_AMR_NB)
+ {
+ static const uint8_t packed_size[16] = {12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0};
+
+ size=packed_size[mode]+1;
+ }
+ else if(enc->codec_id == CODEC_ID_AMR_WB)
+ {
+ static uint8_t packed_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
+
+ size=packed_size[mode];
+ }
+ else
+ {
+ assert(0);
+ }
+
+ if ( (size==0) || av_new_packet(pkt, size))
+ {
+ return AVERROR_IO;
+ }
+
+ pkt->stream_index = 0;
+ pkt->pos= url_ftell(&s->pb);
+ pkt->data[0]=toc;
+ pkt->duration= enc->codec_id == CODEC_ID_AMR_NB ? 160 : 320;
+ read = get_buffer(&s->pb, pkt->data+1, size-1);
+
+ if (read != size-1)
+ {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_AMR_DEMUXER
+AVInputFormat amr_demuxer = {
+ "amr",
+ "3gpp amr file format",
+ 0, /*priv_data_size*/
+ amr_probe,
+ amr_read_header,
+ amr_read_packet,
+ NULL,
+};
+#endif
+
+#ifdef CONFIG_AMR_MUXER
+AVOutputFormat amr_muxer = {
+ "amr",
+ "3gpp amr file format",
+ "audio/amr",
+ "amr",
+ 0,
+ CODEC_ID_AMR_NB,
+ CODEC_ID_NONE,
+ amr_write_header,
+ amr_write_packet,
+ amr_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/asf-enc.c b/contrib/ffmpeg/libavformat/asf-enc.c
new file mode 100644
index 000000000..3ef67507f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/asf-enc.c
@@ -0,0 +1,866 @@
+/*
+ * Adaptive stream format muxer
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+#include "asf.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+
+#define ASF_INDEXED_INTERVAL 10000000
+#define ASF_INDEX_BLOCK 600
+
+#define ASF_PACKET_ERROR_CORRECTION_DATA_SIZE 0x2
+#define ASF_PACKET_ERROR_CORRECTION_FLAGS (\
+ ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT | \
+ ASF_PACKET_ERROR_CORRECTION_DATA_SIZE\
+ )
+
+#if (ASF_PACKET_ERROR_CORRECTION_FLAGS != 0)
+# define ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE 1
+#else
+# define ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE 0
+#endif
+
+#define ASF_PPI_PROPERTY_FLAGS (\
+ ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE | \
+ ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD | \
+ ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE | \
+ ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE \
+ )
+
+#define ASF_PPI_LENGTH_TYPE_FLAGS 0
+
+#define ASF_PAYLOAD_FLAGS ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD
+
+#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 1
+#endif
+#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 2
+#endif
+#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 4
+#endif
+#ifndef ASF_PPI_SEQUENCE_FIELD_SIZE
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 0
+#endif
+
+
+#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 2
+#endif
+#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 4
+#endif
+#ifndef ASF_PPI_PACKET_LENGTH_FIELD_SIZE
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 0
+#endif
+
+#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 2
+#endif
+#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 4
+#endif
+#ifndef ASF_PPI_PADDING_LENGTH_FIELD_SIZE
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 2
+#endif
+#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 4
+#endif
+#ifndef ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 2
+#endif
+#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 4
+#endif
+#ifndef ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 2
+#endif
+#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 4
+#endif
+#ifndef ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE == (ASF_PAYLOAD_FLAGS & ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD == (ASF_PAYLOAD_FLAGS & ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 2
+#endif
+#ifndef ASF_PAYLOAD_LENGTH_FIELD_SIZE
+# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 0
+#endif
+
+#define PACKET_HEADER_MIN_SIZE (\
+ ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE + \
+ ASF_PACKET_ERROR_CORRECTION_DATA_SIZE + \
+ 1 + /*Length Type Flags*/ \
+ 1 + /*Property Flags*/ \
+ ASF_PPI_PACKET_LENGTH_FIELD_SIZE + \
+ ASF_PPI_SEQUENCE_FIELD_SIZE + \
+ ASF_PPI_PADDING_LENGTH_FIELD_SIZE + \
+ 4 + /*Send Time Field*/ \
+ 2 /*Duration Field*/ \
+ )
+
+
+// Replicated Data shall be at least 8 bytes long.
+#define ASF_PAYLOAD_REPLICATED_DATA_LENGTH 0x08
+
+#define PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD (\
+ 1 + /*Stream Number*/ \
+ ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
+ ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH \
+ )
+
+#define PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS (\
+ 1 + /*Stream Number*/ \
+ ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
+ ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH + \
+ ASF_PAYLOAD_LENGTH_FIELD_SIZE \
+ )
+
+#define SINGLE_PAYLOAD_DATA_LENGTH (\
+ PACKET_SIZE - \
+ PACKET_HEADER_MIN_SIZE - \
+ PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD \
+ )
+
+#define MULTI_PAYLOAD_CONSTANT (\
+ PACKET_SIZE - \
+ PACKET_HEADER_MIN_SIZE - \
+ 1 - /*Payload Flags*/ \
+ 2*PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS \
+ )
+
+static int preroll_time = 2000;
+
+static const uint8_t error_spread_ADPCM_G726[] = { 0x01, 0x90, 0x01, 0x90, 0x01, 0x01, 0x00, 0x00 };
+
+static void put_guid(ByteIOContext *s, const GUID *g)
+{
+ int i;
+
+ put_le32(s, g->v1);
+ put_le16(s, g->v2);
+ put_le16(s, g->v3);
+ for(i=0;i<8;i++)
+ put_byte(s, g->v4[i]);
+}
+
+static void put_str16(ByteIOContext *s, const char *tag)
+{
+ int c;
+
+ put_le16(s,strlen(tag) + 1);
+ for(;;) {
+ c = (uint8_t)*tag++;
+ put_le16(s, c);
+ if (c == '\0')
+ break;
+ }
+}
+
+static void put_str16_nolen(ByteIOContext *s, const char *tag)
+{
+ int c;
+
+ for(;;) {
+ c = (uint8_t)*tag++;
+ put_le16(s, c);
+ if (c == '\0')
+ break;
+ }
+}
+
+static int64_t put_header(ByteIOContext *pb, const GUID *g)
+{
+ int64_t pos;
+
+ pos = url_ftell(pb);
+ put_guid(pb, g);
+ put_le64(pb, 24);
+ return pos;
+}
+
+/* update header size */
+static void end_header(ByteIOContext *pb, int64_t pos)
+{
+ int64_t pos1;
+
+ pos1 = url_ftell(pb);
+ url_fseek(pb, pos + 16, SEEK_SET);
+ put_le64(pb, pos1 - pos);
+ url_fseek(pb, pos1, SEEK_SET);
+}
+
+/* write an asf chunk (only used in streaming case) */
+static void put_chunk(AVFormatContext *s, int type, int payload_length, int flags)
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int length;
+
+ length = payload_length + 8;
+ put_le16(pb, type);
+ put_le16(pb, length); //size
+ put_le32(pb, asf->seqno);//sequence number
+ put_le16(pb, flags); /* unknown bytes */
+ put_le16(pb, length); //size_confirm
+ asf->seqno++;
+}
+
+/* convert from unix to windows time */
+static int64_t unix_to_file_time(int ti)
+{
+ int64_t t;
+
+ t = ti * int64_t_C(10000000);
+ t += int64_t_C(116444736000000000);
+ return t;
+}
+
+/* write the header (used two times if non streamed) */
+static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
+ int has_title;
+ AVCodecContext *enc;
+ int64_t header_offset, cur_pos, hpos;
+ int bit_rate;
+ int64_t duration;
+
+ duration = asf->duration + preroll_time * 10000;
+ has_title = (s->title[0] || s->author[0] || s->copyright[0] || s->comment[0]);
+
+ bit_rate = 0;
+ for(n=0;n<s->nb_streams;n++) {
+ enc = s->streams[n]->codec;
+
+ av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */
+
+ bit_rate += enc->bit_rate;
+ }
+
+ if (asf->is_streamed) {
+ put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
+ }
+
+ put_guid(pb, &asf_header);
+ put_le64(pb, -1); /* header length, will be patched after */
+ put_le32(pb, 3 + has_title + s->nb_streams); /* number of chunks in header */
+ put_byte(pb, 1); /* ??? */
+ put_byte(pb, 2); /* ??? */
+
+ /* file header */
+ header_offset = url_ftell(pb);
+ hpos = put_header(pb, &file_header);
+ put_guid(pb, &my_guid);
+ put_le64(pb, file_size);
+ file_time = 0;
+ put_le64(pb, unix_to_file_time(file_time));
+ put_le64(pb, asf->nb_packets); /* number of packets */
+ put_le64(pb, duration); /* end time stamp (in 100ns units) */
+ put_le64(pb, duration); /* duration (in 100ns units) */
+ put_le32(pb, preroll_time); /* start time stamp */
+ put_le32(pb, 0); /* ??? */
+ put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */
+ put_le32(pb, asf->packet_size); /* packet size */
+ put_le32(pb, asf->packet_size); /* packet size */
+ put_le32(pb, bit_rate); /* Nominal data rate in bps */
+ end_header(pb, hpos);
+
+ /* unknown headers */
+ hpos = put_header(pb, &head1_guid);
+ put_guid(pb, &head2_guid);
+ put_le32(pb, 6);
+ put_le16(pb, 0);
+ end_header(pb, hpos);
+
+ /* title and other infos */
+ if (has_title) {
+ hpos = put_header(pb, &comment_header);
+ if ( s->title[0] ) { put_le16(pb, 2 * (strlen(s->title ) + 1)); } else { put_le16(pb, 0); }
+ if ( s->author[0] ) { put_le16(pb, 2 * (strlen(s->author ) + 1)); } else { put_le16(pb, 0); }
+ if ( s->copyright[0] ) { put_le16(pb, 2 * (strlen(s->copyright) + 1)); } else { put_le16(pb, 0); }
+ if ( s->comment[0] ) { put_le16(pb, 2 * (strlen(s->comment ) + 1)); } else { put_le16(pb, 0); }
+ put_le16(pb, 0);
+ if ( s->title[0] ) put_str16_nolen(pb, s->title);
+ if ( s->author[0] ) put_str16_nolen(pb, s->author);
+ if ( s->copyright[0] ) put_str16_nolen(pb, s->copyright);
+ if ( s->comment[0] ) put_str16_nolen(pb, s->comment);
+ end_header(pb, hpos);
+ }
+
+ /* stream headers */
+ for(n=0;n<s->nb_streams;n++) {
+ int64_t es_pos;
+ const uint8_t *er_spr = NULL;
+ int er_spr_len = 0;
+ // ASFStream *stream = &asf->streams[n];
+
+ enc = s->streams[n]->codec;
+ asf->streams[n].num = n + 1;
+ asf->streams[n].seq = 0;
+
+
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ if (enc->codec_id == CODEC_ID_ADPCM_G726) {
+ er_spr = error_spread_ADPCM_G726;
+ er_spr_len = sizeof(error_spread_ADPCM_G726);
+ }
+ }
+
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ wav_extra_size = 0;
+ extra_size = 18 + wav_extra_size;
+ extra_size2 = er_spr_len;
+ break;
+ default:
+ case CODEC_TYPE_VIDEO:
+ wav_extra_size = enc->extradata_size;
+ extra_size = 0x33 + wav_extra_size;
+ extra_size2 = 0;
+ break;
+ }
+
+ hpos = put_header(pb, &stream_header);
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ put_guid(pb, &audio_stream);
+ if ((er_spr != NULL) && (er_spr_len != 0)) {
+ put_guid(pb, &audio_conceal_spread);
+ } else {
+ put_guid(pb, &video_conceal_none);
+ }
+ } else {
+ put_guid(pb, &video_stream);
+ put_guid(pb, &video_conceal_none);
+ }
+ put_le64(pb, 0); /* ??? */
+ es_pos = url_ftell(pb);
+ put_le32(pb, extra_size); /* wav header len */
+ put_le32(pb, extra_size2); /* additional data len */
+ put_le16(pb, n + 1); /* stream number */
+ put_le32(pb, 0); /* ??? */
+
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ /* WAVEFORMATEX header */
+ int wavsize = put_wav_header(pb, enc);
+ if ((enc->codec_id != CODEC_ID_MP3) && (enc->codec_id != CODEC_ID_MP2) && (enc->codec_id != CODEC_ID_ADPCM_IMA_WAV) && (enc->extradata_size==0)) {
+ wavsize += 2;
+ put_le16(pb, 0);
+ }
+
+ if (wavsize < 0)
+ return -1;
+ if (wavsize != extra_size) {
+ cur_pos = url_ftell(pb);
+ url_fseek(pb, es_pos, SEEK_SET);
+ put_le32(pb, wavsize); /* wav header len */
+ url_fseek(pb, cur_pos, SEEK_SET);
+ }
+ /* ERROR Correction */
+ if ((er_spr != NULL) && (er_spr_len != 0))
+ put_buffer(pb, er_spr, er_spr_len);
+ } else {
+ put_le32(pb, enc->width);
+ put_le32(pb, enc->height);
+ put_byte(pb, 2); /* ??? */
+ put_le16(pb, 40 + enc->extradata_size); /* size */
+
+ /* BITMAPINFOHEADER header */
+ put_bmp_header(pb, enc, codec_bmp_tags, 1);
+ }
+ end_header(pb, hpos);
+ }
+
+ /* media comments */
+
+ hpos = put_header(pb, &codec_comment_header);
+ put_guid(pb, &codec_comment1_header);
+ put_le32(pb, s->nb_streams);
+ for(n=0;n<s->nb_streams;n++) {
+ AVCodec *p;
+
+ enc = s->streams[n]->codec;
+ p = avcodec_find_encoder(enc->codec_id);
+
+ put_le16(pb, asf->streams[n].num);
+ put_str16(pb, p ? p->name : enc->codec_name);
+ put_le16(pb, 0); /* no parameters */
+
+
+ /* id */
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ put_le16(pb, 2);
+ if(!enc->codec_tag)
+ enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
+ if(!enc->codec_tag)
+ return -1;
+ put_le16(pb, enc->codec_tag);
+ } else {
+ put_le16(pb, 4);
+ if(!enc->codec_tag)
+ enc->codec_tag = codec_get_tag(codec_bmp_tags, enc->codec_id);
+ if(!enc->codec_tag)
+ return -1;
+ put_le32(pb, enc->codec_tag);
+ }
+ }
+ end_header(pb, hpos);
+
+ /* patch the header size fields */
+
+ cur_pos = url_ftell(pb);
+ header_size = cur_pos - header_offset;
+ if (asf->is_streamed) {
+ header_size += 8 + 30 + 50;
+
+ url_fseek(pb, header_offset - 10 - 30, SEEK_SET);
+ put_le16(pb, header_size);
+ url_fseek(pb, header_offset - 2 - 30, SEEK_SET);
+ put_le16(pb, header_size);
+
+ header_size -= 8 + 30 + 50;
+ }
+ header_size += 24 + 6;
+ url_fseek(pb, header_offset - 14, SEEK_SET);
+ put_le64(pb, header_size);
+ url_fseek(pb, cur_pos, SEEK_SET);
+
+ /* movie chunk, followed by packets of packet_size */
+ asf->data_offset = cur_pos;
+ put_guid(pb, &data_header);
+ put_le64(pb, data_chunk_size);
+ put_guid(pb, &my_guid);
+ put_le64(pb, asf->nb_packets); /* nb packets */
+ put_byte(pb, 1); /* ??? */
+ put_byte(pb, 1); /* ??? */
+ return 0;
+}
+
+static int asf_write_header(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+
+ asf->packet_size = PACKET_SIZE;
+ asf->nb_packets = 0;
+
+ asf->last_indexed_pts = 0;
+ asf->index_ptr = (ASFIndex*)av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK );
+ asf->nb_index_memory_alloc = ASF_INDEX_BLOCK;
+ asf->nb_index_count = 0;
+ asf->maximum_packet = 0;
+
+ if (asf_write_header1(s, 0, 50) < 0) {
+ //av_free(asf);
+ return -1;
+ }
+
+ put_flush_packet(&s->pb);
+
+ asf->packet_nb_payloads = 0;
+ asf->prev_packet_sent_time = 0;
+ asf->packet_timestamp_start = -1;
+ asf->packet_timestamp_end = -1;
+ init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+static int asf_write_stream_header(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+
+ asf->is_streamed = 1;
+
+ return asf_write_header(s);
+}
+
+static int put_payload_parsing_info(
+ AVFormatContext *s,
+ unsigned int sendtime,
+ unsigned int duration,
+ int nb_payloads,
+ int padsize
+ )
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ppi_size, i;
+ unsigned char *start_ppi_ptr = pb->buf_ptr;
+
+ int iLengthTypeFlags = ASF_PPI_LENGTH_TYPE_FLAGS;
+
+ put_byte(pb, ASF_PACKET_ERROR_CORRECTION_FLAGS);
+ for (i = 0; i < ASF_PACKET_ERROR_CORRECTION_DATA_SIZE; i++){
+ put_byte(pb, 0x0);
+ }
+
+ if (asf->multi_payloads_present)
+ iLengthTypeFlags |= ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT;
+
+ if (padsize > 0) {
+ if (padsize < 256)
+ iLengthTypeFlags |= ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE;
+ else
+ iLengthTypeFlags |= ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD;
+ }
+ put_byte(pb, iLengthTypeFlags);
+
+ put_byte(pb, ASF_PPI_PROPERTY_FLAGS);
+
+ if (iLengthTypeFlags & ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD)
+ put_le16(pb, padsize - 2);
+ if (iLengthTypeFlags & ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE)
+ put_byte(pb, padsize - 1);
+
+ put_le32(pb, sendtime);
+ put_le16(pb, duration);
+ if (asf->multi_payloads_present)
+ put_byte(pb, nb_payloads | ASF_PAYLOAD_FLAGS);
+
+ ppi_size = pb->buf_ptr - start_ppi_ptr;
+
+ return ppi_size;
+}
+
+static void flush_packet(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ int packet_hdr_size, packet_filled_size;
+
+ if (asf->is_streamed) {
+ put_chunk(s, 0x4424, asf->packet_size, 0);
+ }
+
+ packet_hdr_size = put_payload_parsing_info(
+ s,
+ asf->packet_timestamp_start,
+ asf->packet_timestamp_end - asf->packet_timestamp_start,
+ asf->packet_nb_payloads,
+ asf->packet_size_left
+ );
+
+ packet_filled_size = PACKET_SIZE - packet_hdr_size - asf->packet_size_left;
+ memset(asf->packet_buf + packet_filled_size, 0, asf->packet_size_left);
+
+ put_buffer(&s->pb, asf->packet_buf, asf->packet_size - packet_hdr_size);
+
+ put_flush_packet(&s->pb);
+ asf->nb_packets++;
+ asf->packet_nb_payloads = 0;
+ asf->prev_packet_sent_time = asf->packet_timestamp_start;
+ asf->packet_timestamp_start = -1;
+ asf->packet_timestamp_end = -1;
+ init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
+ NULL, NULL, NULL, NULL);
+}
+
+static void put_payload_header(
+ AVFormatContext *s,
+ ASFStream *stream,
+ int presentation_time,
+ int m_obj_size,
+ int m_obj_offset,
+ int payload_len,
+ int flags
+ )
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &asf->pb;
+ int val;
+
+ val = stream->num;
+ if (flags & PKT_FLAG_KEY)
+ val |= ASF_PL_FLAG_KEY_FRAME;
+ put_byte(pb, val);
+
+ put_byte(pb, stream->seq); //Media object number
+ put_le32(pb, m_obj_offset); //Offset Into Media Object
+
+ // Replicated Data shall be at least 8 bytes long.
+ // The first 4 bytes of data shall contain the
+ // Size of the Media Object that the payload belongs to.
+ // The next 4 bytes of data shall contain the
+ // Presentation Time for the media object that the payload belongs to.
+ put_byte(pb, ASF_PAYLOAD_REPLICATED_DATA_LENGTH);
+
+ put_le32(pb, m_obj_size); //Replicated Data - Media Object Size
+ put_le32(pb, presentation_time);//Replicated Data - Presentation Time
+
+ if (asf->multi_payloads_present){
+ put_le16(pb, payload_len); //payload length
+ }
+}
+
+static void put_frame(
+ AVFormatContext *s,
+ ASFStream *stream,
+ int timestamp,
+ const uint8_t *buf,
+ int m_obj_size,
+ int flags
+ )
+{
+ ASFContext *asf = s->priv_data;
+ int m_obj_offset, payload_len, frag_len1;
+
+ m_obj_offset = 0;
+ while (m_obj_offset < m_obj_size) {
+ payload_len = m_obj_size - m_obj_offset;
+ if (asf->packet_timestamp_start == -1) {
+ asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);
+
+ if (asf->multi_payloads_present){
+ asf->packet_size_left = PACKET_SIZE; //For debug
+ asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE - 1;
+ frag_len1 = MULTI_PAYLOAD_CONSTANT - 1;
+ }
+ else {
+ asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE;
+ frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH;
+ }
+ if (asf->prev_packet_sent_time > timestamp)
+ asf->packet_timestamp_start = asf->prev_packet_sent_time;
+ else
+ asf->packet_timestamp_start = timestamp;
+ }
+ else {
+ // multi payloads
+ frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS;
+
+ if (asf->prev_packet_sent_time > timestamp)
+ asf->packet_timestamp_start = asf->prev_packet_sent_time;
+ else if (asf->packet_timestamp_start >= timestamp)
+ asf->packet_timestamp_start = timestamp;
+ }
+ if (frag_len1 > 0) {
+ if (payload_len > frag_len1)
+ payload_len = frag_len1;
+ else if (payload_len == (frag_len1 - 1))
+ payload_len = frag_len1 - 2; //additional byte need to put padding length
+
+ put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len, flags);
+ put_buffer(&asf->pb, buf, payload_len);
+
+ if (asf->multi_payloads_present)
+ asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS);
+ else
+ asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD);
+ asf->packet_timestamp_end = timestamp;
+
+ asf->packet_nb_payloads++;
+ } else {
+ payload_len = 0;
+ }
+ m_obj_offset += payload_len;
+ buf += payload_len;
+
+ if (!asf->multi_payloads_present)
+ flush_packet(s);
+ else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + 1))
+ flush_packet(s);
+ }
+ stream->seq++;
+}
+
+static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ASFContext *asf = s->priv_data;
+ ASFStream *stream;
+ int64_t duration;
+ AVCodecContext *codec;
+ int64_t packet_st,pts;
+ int start_sec,i;
+
+ codec = s->streams[pkt->stream_index]->codec;
+ stream = &asf->streams[pkt->stream_index];
+
+ //XXX /FIXME use duration from AVPacket (quick hack by)
+ pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts;
+ if (pts == AV_NOPTS_VALUE) {
+ if (codec->codec_type == CODEC_TYPE_AUDIO) {
+ duration = (codec->frame_number * (int64_t)codec->frame_size * int64_t_C(10000000)) /
+ codec->sample_rate;
+ } else {
+ duration = av_rescale(codec->frame_number * (int64_t)codec->time_base.num, 10000000, codec->time_base.den);
+ }
+ } else {
+ duration = pts * 10000;
+ }
+ if (duration > asf->duration)
+ asf->duration = duration;
+
+ packet_st = asf->nb_packets;
+ put_frame(s, stream, pkt->pts, pkt->data, pkt->size, pkt->flags);
+
+ /* check index */
+ if ((!asf->is_streamed) && (codec->codec_type == CODEC_TYPE_VIDEO) && (pkt->flags & PKT_FLAG_KEY)) {
+ start_sec = (int)(duration / int64_t_C(10000000));
+ if (start_sec != (int)(asf->last_indexed_pts / int64_t_C(10000000))) {
+ for(i=asf->nb_index_count;i<start_sec;i++) {
+ if (i>=asf->nb_index_memory_alloc) {
+ asf->nb_index_memory_alloc += ASF_INDEX_BLOCK;
+ asf->index_ptr = (ASFIndex*)av_realloc( asf->index_ptr, sizeof(ASFIndex) * asf->nb_index_memory_alloc );
+ }
+ // store
+ asf->index_ptr[i].packet_number = (uint32_t)packet_st;
+ asf->index_ptr[i].packet_count = (uint16_t)(asf->nb_packets-packet_st);
+ if (asf->maximum_packet < (uint16_t)(asf->nb_packets-packet_st))
+ asf->maximum_packet = (uint16_t)(asf->nb_packets-packet_st);
+ }
+ asf->nb_index_count = start_sec;
+ asf->last_indexed_pts = duration;
+ }
+ }
+ return 0;
+}
+
+//
+static int asf_write_index(AVFormatContext *s, ASFIndex *index, uint16_t max, uint32_t count)
+{
+ ByteIOContext *pb = &s->pb;
+ int i;
+
+ put_guid(pb, &simple_index_header);
+ put_le64(pb, 24 + 16 + 8 + 4 + 4 + (4 + 2)*count);
+ put_guid(pb, &my_guid);
+ put_le64(pb, ASF_INDEXED_INTERVAL);
+ put_le32(pb, max);
+ put_le32(pb, count);
+ for(i=0; i<count; i++) {
+ put_le32(pb, index[i].packet_number);
+ put_le16(pb, index[i].packet_count);
+ }
+
+ return 0;
+}
+
+static int asf_write_trailer(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ int64_t file_size,data_size;
+
+ /* flush the current packet */
+ if (asf->pb.buf_ptr > asf->pb.buffer)
+ flush_packet(s);
+
+ /* write index */
+ data_size = url_ftell(&s->pb);
+ if ((!asf->is_streamed) && (asf->nb_index_count != 0)) {
+ asf_write_index(s, asf->index_ptr, asf->maximum_packet, asf->nb_index_count);
+ }
+ put_flush_packet(&s->pb);
+
+ if (asf->is_streamed) {
+ put_chunk(s, 0x4524, 0, 0); /* end of stream */
+ } else {
+ /* rewrite an updated header */
+ file_size = url_ftell(&s->pb);
+ url_fseek(&s->pb, 0, SEEK_SET);
+ asf_write_header1(s, file_size, data_size - asf->data_offset);
+ }
+
+ put_flush_packet(&s->pb);
+ av_free(asf->index_ptr);
+ return 0;
+}
+
+#ifdef CONFIG_ASF_MUXER
+AVOutputFormat asf_muxer = {
+ "asf",
+ "asf format",
+ "video/x-ms-asf",
+ "asf,wmv,wma",
+ sizeof(ASFContext),
+#ifdef CONFIG_MP3LAME
+ CODEC_ID_MP3,
+#else
+ CODEC_ID_MP2,
+#endif
+ CODEC_ID_MSMPEG4V3,
+ asf_write_header,
+ asf_write_packet,
+ asf_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+
+#ifdef CONFIG_ASF_STREAM_MUXER
+AVOutputFormat asf_stream_muxer = {
+ "asf_stream",
+ "asf format",
+ "video/x-ms-asf",
+ "asf,wmv,wma",
+ sizeof(ASFContext),
+#ifdef CONFIG_MP3LAME
+ CODEC_ID_MP3,
+#else
+ CODEC_ID_MP2,
+#endif
+ CODEC_ID_MSMPEG4V3,
+ asf_write_stream_header,
+ asf_write_packet,
+ asf_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif //CONFIG_ASF_STREAM_MUXER
diff --git a/contrib/ffmpeg/libavformat/asf.c b/contrib/ffmpeg/libavformat/asf.c
new file mode 100644
index 000000000..f63e4b695
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/asf.c
@@ -0,0 +1,951 @@
+/*
+ * ASF compatible demuxer
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+#include "mpegaudio.h"
+#include "asf.h"
+#include "common.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define FRAME_HEADER_SIZE 17
+// Fix Me! FRAME_HEADER_SIZE may be different.
+
+static const GUID index_guid = {
+ 0x33000890, 0xe5b1, 0x11cf, { 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb },
+};
+
+/**********************************/
+/* decoding */
+
+//#define DEBUG
+
+#ifdef DEBUG
+#define PRINT_IF_GUID(g,cmp) \
+if (!memcmp(g, &cmp, sizeof(GUID))) \
+ printf("(GUID: %s) ", #cmp)
+
+static void print_guid(const GUID *g)
+{
+ int i;
+ PRINT_IF_GUID(g, asf_header);
+ else PRINT_IF_GUID(g, file_header);
+ else PRINT_IF_GUID(g, stream_header);
+ else PRINT_IF_GUID(g, audio_stream);
+ else PRINT_IF_GUID(g, audio_conceal_none);
+ else PRINT_IF_GUID(g, video_stream);
+ else PRINT_IF_GUID(g, video_conceal_none);
+ else PRINT_IF_GUID(g, command_stream);
+ else PRINT_IF_GUID(g, comment_header);
+ else PRINT_IF_GUID(g, codec_comment_header);
+ else PRINT_IF_GUID(g, codec_comment1_header);
+ else PRINT_IF_GUID(g, data_header);
+ else PRINT_IF_GUID(g, index_guid);
+ else PRINT_IF_GUID(g, head1_guid);
+ else PRINT_IF_GUID(g, head2_guid);
+ else PRINT_IF_GUID(g, my_guid);
+ else PRINT_IF_GUID(g, ext_stream_header);
+ else PRINT_IF_GUID(g, extended_content_header);
+ else PRINT_IF_GUID(g, ext_stream_embed_stream_header);
+ else PRINT_IF_GUID(g, ext_stream_audio_stream);
+ else
+ printf("(GUID: unknown) ");
+ printf("0x%08x, 0x%04x, 0x%04x, {", g->v1, g->v2, g->v3);
+ for(i=0;i<8;i++)
+ printf(" 0x%02x,", g->v4[i]);
+ printf("}\n");
+}
+#undef PRINT_IF_GUID
+#endif
+
+static void get_guid(ByteIOContext *s, GUID *g)
+{
+ int i;
+
+ g->v1 = get_le32(s);
+ g->v2 = get_le16(s);
+ g->v3 = get_le16(s);
+ for(i=0;i<8;i++)
+ g->v4[i] = get_byte(s);
+}
+
+#if 0
+static void get_str16(ByteIOContext *pb, char *buf, int buf_size)
+{
+ int len, c;
+ char *q;
+
+ len = get_le16(pb);
+ q = buf;
+ while (len > 0) {
+ c = get_le16(pb);
+ if ((q - buf) < buf_size - 1)
+ *q++ = c;
+ len--;
+ }
+ *q = '\0';
+}
+#endif
+
+static void get_str16_nolen(ByteIOContext *pb, int len, char *buf, int buf_size)
+{
+ char* q = buf;
+ len /= 2;
+ while (len--) {
+ uint8_t tmp;
+ PUT_UTF8(get_le16(pb), tmp, if (q - buf < buf_size - 1) *q++ = tmp;)
+ }
+ *q = '\0';
+}
+
+static int asf_probe(AVProbeData *pd)
+{
+ GUID g;
+ const unsigned char *p;
+ int i;
+
+ /* check file header */
+ if (pd->buf_size <= 32)
+ return 0;
+ p = pd->buf;
+ g.v1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
+ p += 4;
+ g.v2 = p[0] | (p[1] << 8);
+ p += 2;
+ g.v3 = p[0] | (p[1] << 8);
+ p += 2;
+ for(i=0;i<8;i++)
+ g.v4[i] = *p++;
+
+ if (!memcmp(&g, &asf_header, sizeof(GUID)))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ ASFContext *asf = s->priv_data;
+ GUID g;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ ASFStream *asf_st;
+ int size, i;
+ int64_t gsize;
+
+ get_guid(pb, &g);
+ if (memcmp(&g, &asf_header, sizeof(GUID)))
+ goto fail;
+ get_le64(pb);
+ get_le32(pb);
+ get_byte(pb);
+ get_byte(pb);
+ memset(&asf->asfid2avid, -1, sizeof(asf->asfid2avid));
+ for(;;) {
+ get_guid(pb, &g);
+ gsize = get_le64(pb);
+#ifdef DEBUG
+ printf("%08"PRIx64": ", url_ftell(pb) - 24);
+ print_guid(&g);
+ printf(" size=0x%"PRIx64"\n", gsize);
+#endif
+ if (gsize < 24)
+ goto fail;
+ if (!memcmp(&g, &file_header, sizeof(GUID))) {
+ get_guid(pb, &asf->hdr.guid);
+ asf->hdr.file_size = get_le64(pb);
+ asf->hdr.create_time = get_le64(pb);
+ asf->hdr.packets_count = get_le64(pb);
+ asf->hdr.send_time = get_le64(pb);
+ asf->hdr.play_time = get_le64(pb);
+ asf->hdr.preroll = get_le32(pb);
+ asf->hdr.ignore = get_le32(pb);
+ asf->hdr.flags = get_le32(pb);
+ asf->hdr.min_pktsize = get_le32(pb);
+ asf->hdr.max_pktsize = get_le32(pb);
+ asf->hdr.max_bitrate = get_le32(pb);
+ asf->packet_size = asf->hdr.max_pktsize;
+ asf->nb_packets = asf->hdr.packets_count;
+ } else if (!memcmp(&g, &stream_header, sizeof(GUID))) {
+ int type, type_specific_size, sizeX;
+ uint64_t total_size;
+ unsigned int tag1;
+ int64_t pos1, pos2;
+ int test_for_ext_stream_audio;
+
+ pos1 = url_ftell(pb);
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ av_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
+ asf_st = av_mallocz(sizeof(ASFStream));
+ if (!asf_st)
+ goto fail;
+ st->priv_data = asf_st;
+ st->start_time = asf->hdr.preroll;
+ st->duration = asf->hdr.send_time /
+ (10000000 / 1000) - st->start_time;
+ get_guid(pb, &g);
+
+ test_for_ext_stream_audio = 0;
+ if (!memcmp(&g, &audio_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_AUDIO;
+ } else if (!memcmp(&g, &video_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_VIDEO;
+ } else if (!memcmp(&g, &command_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_UNKNOWN;
+ } else if (!memcmp(&g, &ext_stream_embed_stream_header, sizeof(GUID))) {
+ test_for_ext_stream_audio = 1;
+ type = CODEC_TYPE_UNKNOWN;
+ } else {
+ goto fail;
+ }
+ get_guid(pb, &g);
+ total_size = get_le64(pb);
+ type_specific_size = get_le32(pb);
+ get_le32(pb);
+ st->id = get_le16(pb) & 0x7f; /* stream id */
+ // mapping of asf ID to AV stream ID;
+ asf->asfid2avid[st->id] = s->nb_streams - 1;
+
+ get_le32(pb);
+
+ if (test_for_ext_stream_audio) {
+ get_guid(pb, &g);
+ if (!memcmp(&g, &ext_stream_audio_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_AUDIO;
+ get_guid(pb, &g);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_guid(pb, &g);
+ get_le32(pb);
+ }
+ }
+
+ st->codec->codec_type = type;
+ if (type == CODEC_TYPE_AUDIO) {
+ get_wav_header(pb, st->codec, type_specific_size);
+ st->need_parsing = 1;
+ /* We have to init the frame size at some point .... */
+ pos2 = url_ftell(pb);
+ if (gsize > (pos2 + 8 - pos1 + 24)) {
+ asf_st->ds_span = get_byte(pb);
+ asf_st->ds_packet_size = get_le16(pb);
+ asf_st->ds_chunk_size = get_le16(pb);
+ asf_st->ds_data_size = get_le16(pb);
+ asf_st->ds_silence_data = get_byte(pb);
+ }
+ //printf("Descrambling: ps:%d cs:%d ds:%d s:%d sd:%d\n",
+ // asf_st->ds_packet_size, asf_st->ds_chunk_size,
+ // asf_st->ds_data_size, asf_st->ds_span, asf_st->ds_silence_data);
+ if (asf_st->ds_span > 1) {
+ if (!asf_st->ds_chunk_size
+ || (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1))
+ asf_st->ds_span = 0; // disable descrambling
+ }
+ switch (st->codec->codec_id) {
+ case CODEC_ID_MP3:
+ st->codec->frame_size = MPA_FRAME_SIZE;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_U16LE:
+ case CODEC_ID_PCM_U16BE:
+ case CODEC_ID_PCM_S8:
+ case CODEC_ID_PCM_U8:
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_MULAW:
+ st->codec->frame_size = 1;
+ break;
+ default:
+ /* This is probably wrong, but it prevents a crash later */
+ st->codec->frame_size = 1;
+ break;
+ }
+ } else if (type == CODEC_TYPE_VIDEO) {
+ get_le32(pb);
+ get_le32(pb);
+ get_byte(pb);
+ size = get_le16(pb); /* size */
+ sizeX= get_le32(pb); /* size */
+ st->codec->width = get_le32(pb);
+ st->codec->height = get_le32(pb);
+ /* not available for asf */
+ get_le16(pb); /* panes */
+ st->codec->bits_per_sample = get_le16(pb); /* depth */
+ tag1 = get_le32(pb);
+ url_fskip(pb, 20);
+// av_log(NULL, AV_LOG_DEBUG, "size:%d tsize:%d sizeX:%d\n", size, total_size, sizeX);
+ size= sizeX;
+ if (size > 40) {
+ st->codec->extradata_size = size - 40;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
+ }
+
+ /* Extract palette from extradata if bpp <= 8 */
+ /* This code assumes that extradata contains only palette */
+ /* This is true for all paletted codecs implemented in ffmpeg */
+ if (st->codec->extradata_size && (st->codec->bits_per_sample <= 8)) {
+ st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
+#ifdef WORDS_BIGENDIAN
+ for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
+ st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
+#else
+ memcpy(st->codec->palctrl->palette, st->codec->extradata,
+ FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
+#endif
+ st->codec->palctrl->palette_changed = 1;
+ }
+
+ st->codec->codec_tag = tag1;
+ st->codec->codec_id = codec_get_id(codec_bmp_tags, tag1);
+ if(tag1 == MKTAG('D', 'V', 'R', ' '))
+ st->need_parsing = 1;
+ }
+ pos2 = url_ftell(pb);
+ url_fskip(pb, gsize - (pos2 - pos1 + 24));
+ } else if (!memcmp(&g, &data_header, sizeof(GUID))) {
+ asf->data_object_offset = url_ftell(pb);
+ if (gsize != (uint64_t)-1 && gsize >= 24) {
+ asf->data_object_size = gsize - 24;
+ } else {
+ asf->data_object_size = (uint64_t)-1;
+ }
+ break;
+ } else if (!memcmp(&g, &comment_header, sizeof(GUID))) {
+ int len1, len2, len3, len4, len5;
+
+ len1 = get_le16(pb);
+ len2 = get_le16(pb);
+ len3 = get_le16(pb);
+ len4 = get_le16(pb);
+ len5 = get_le16(pb);
+ get_str16_nolen(pb, len1, s->title, sizeof(s->title));
+ get_str16_nolen(pb, len2, s->author, sizeof(s->author));
+ get_str16_nolen(pb, len3, s->copyright, sizeof(s->copyright));
+ get_str16_nolen(pb, len4, s->comment, sizeof(s->comment));
+ url_fskip(pb, len5);
+ } else if (!memcmp(&g, &extended_content_header, sizeof(GUID))) {
+ int desc_count, i;
+
+ desc_count = get_le16(pb);
+ for(i=0;i<desc_count;i++)
+ {
+ int name_len,value_type,value_len;
+ uint64_t value_num = 0;
+ char *name, *value;
+
+ name_len = get_le16(pb);
+ name = (char *)av_malloc(name_len * 2);
+ get_str16_nolen(pb, name_len, name, name_len * 2);
+ value_type = get_le16(pb);
+ value_len = get_le16(pb);
+ if ((value_type == 0) || (value_type == 1)) // unicode or byte
+ {
+ value = (char *)av_malloc(value_len * 2);
+ get_str16_nolen(pb, value_len, value,
+ value_len * 2);
+ if (strcmp(name,"WM/AlbumTitle")==0) { pstrcpy(s->album, sizeof(s->album), value); }
+ av_free(value);
+ }
+ if ((value_type >= 2) && (value_type <= 5)) // boolean or DWORD or QWORD or WORD
+ {
+ if (value_type==2) value_num = get_le32(pb);
+ if (value_type==3) value_num = get_le32(pb);
+ if (value_type==4) value_num = get_le64(pb);
+ if (value_type==5) value_num = get_le16(pb);
+ if (strcmp(name,"WM/Track")==0) s->track = value_num + 1;
+ if (strcmp(name,"WM/TrackNumber")==0) s->track = value_num;
+ }
+ av_free(name);
+ }
+ } else if (!memcmp(&g, &ext_stream_header, sizeof(GUID))) {
+ int ext_len, payload_ext_ct, stream_ct;
+ uint32_t ext_d;
+ int64_t pos_ex_st;
+ pos_ex_st = url_ftell(pb);
+
+ get_le64(pb);
+ get_le64(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le16(pb);
+ get_le16(pb);
+ get_le64(pb);
+ stream_ct = get_le16(pb);
+ payload_ext_ct = get_le16(pb);
+
+ for (i=0; i<stream_ct; i++){
+ get_le16(pb);
+ ext_len = get_le16(pb);
+ url_fseek(pb, ext_len, SEEK_CUR);
+ }
+
+ for (i=0; i<payload_ext_ct; i++){
+ get_guid(pb, &g);
+ ext_d=get_le16(pb);
+ ext_len=get_le32(pb);
+ url_fseek(pb, ext_len, SEEK_CUR);
+ }
+
+ // there could be a optional stream properties object to follow
+ // if so the next iteration will pick it up
+ } else if (!memcmp(&g, &head1_guid, sizeof(GUID))) {
+ int v1, v2;
+ get_guid(pb, &g);
+ v1 = get_le32(pb);
+ v2 = get_le16(pb);
+#if 0
+ } else if (!memcmp(&g, &codec_comment_header, sizeof(GUID))) {
+ int len, v1, n, num;
+ char str[256], *q;
+ char tag[16];
+
+ get_guid(pb, &g);
+ print_guid(&g);
+
+ n = get_le32(pb);
+ for(i=0;i<n;i++) {
+ num = get_le16(pb); /* stream number */
+ get_str16(pb, str, sizeof(str));
+ get_str16(pb, str, sizeof(str));
+ len = get_le16(pb);
+ q = tag;
+ while (len > 0) {
+ v1 = get_byte(pb);
+ if ((q - tag) < sizeof(tag) - 1)
+ *q++ = v1;
+ len--;
+ }
+ *q = '\0';
+ }
+#endif
+ } else if (url_feof(pb)) {
+ goto fail;
+ } else {
+ url_fseek(pb, gsize - 24, SEEK_CUR);
+ }
+ }
+ get_guid(pb, &g);
+ get_le64(pb);
+ get_byte(pb);
+ get_byte(pb);
+ if (url_feof(pb))
+ goto fail;
+ asf->data_offset = url_ftell(pb);
+ asf->packet_size_left = 0;
+
+ return 0;
+
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ if (st) {
+ av_free(st->priv_data);
+ av_free(st->codec->extradata);
+ }
+ av_free(st);
+ }
+ return -1;
+}
+
+#define DO_2BITS(bits, var, defval) \
+ switch (bits & 3) \
+ { \
+ case 3: var = get_le32(pb); rsize += 4; break; \
+ case 2: var = get_le16(pb); rsize += 2; break; \
+ case 1: var = get_byte(pb); rsize++; break; \
+ default: var = defval; break; \
+ }
+
+static int asf_get_packet(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t packet_length, padsize;
+ int rsize = 9;
+ int c;
+
+ assert((url_ftell(&s->pb) - s->data_offset) % asf->packet_size == 0);
+
+ c = get_byte(pb);
+ if (c != 0x82) {
+ if (!url_feof(pb))
+ av_log(s, AV_LOG_ERROR, "ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
+ }
+ if ((c & 0x0f) == 2) { // always true for now
+ if (get_le16(pb) != 0) {
+ if (!url_feof(pb))
+ av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n");
+ return AVERROR_IO;
+ }
+ rsize+=2;
+/* }else{
+ if (!url_feof(pb))
+ printf("ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
+ return AVERROR_IO;*/
+ }
+
+ asf->packet_flags = get_byte(pb);
+ asf->packet_property = get_byte(pb);
+
+ DO_2BITS(asf->packet_flags >> 5, packet_length, asf->packet_size);
+ DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored
+ DO_2BITS(asf->packet_flags >> 3, padsize, 0); // padding length
+
+ //the following checks prevent overflows and infinite loops
+ if(packet_length >= (1U<<29)){
+ av_log(s, AV_LOG_ERROR, "invalid packet_length %d at:%"PRId64"\n", packet_length, url_ftell(pb));
+ return 0; // FIXME this should be -1
+ }
+ if(padsize >= (1U<<29)){
+ av_log(s, AV_LOG_ERROR, "invalid padsize %d at:%"PRId64"\n", padsize, url_ftell(pb));
+ return 0; // FIXME this should be -1
+ }
+
+ asf->packet_timestamp = get_le32(pb);
+ get_le16(pb); /* duration */
+ // rsize has at least 11 bytes which have to be present
+
+ if (asf->packet_flags & 0x01) {
+ asf->packet_segsizetype = get_byte(pb); rsize++;
+ asf->packet_segments = asf->packet_segsizetype & 0x3f;
+ } else {
+ asf->packet_segments = 1;
+ asf->packet_segsizetype = 0x80;
+ }
+ asf->packet_size_left = packet_length - padsize - rsize;
+ if (packet_length < asf->hdr.min_pktsize)
+ padsize += asf->hdr.min_pktsize - packet_length;
+ asf->packet_padsize = padsize;
+#ifdef DEBUG
+ printf("packet: size=%d padsize=%d left=%d\n", asf->packet_size, asf->packet_padsize, asf->packet_size_left);
+#endif
+ return 0;
+}
+
+static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ASFContext *asf = s->priv_data;
+ ASFStream *asf_st = 0;
+ ByteIOContext *pb = &s->pb;
+ //static int pc = 0;
+ for (;;) {
+ int rsize = 0;
+ if (asf->packet_size_left < FRAME_HEADER_SIZE
+ || asf->packet_segments < 1) {
+ //asf->packet_size_left <= asf->packet_padsize) {
+ int ret = asf->packet_size_left + asf->packet_padsize;
+ //printf("PacketLeftSize:%d Pad:%d Pos:%"PRId64"\n", asf->packet_size_left, asf->packet_padsize, url_ftell(pb));
+ if((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size)
+ ret += asf->packet_size - ((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size);
+ assert(ret>=0);
+ /* fail safe */
+ url_fskip(pb, ret);
+ asf->packet_pos= url_ftell(&s->pb);
+ if (asf->data_object_size != (uint64_t)-1 &&
+ (asf->packet_pos - asf->data_object_offset >= asf->data_object_size))
+ return AVERROR_IO; /* Do not exceed the size of the data object */
+ ret = asf_get_packet(s);
+ //printf("READ ASF PACKET %d r:%d c:%d\n", ret, asf->packet_size_left, pc++);
+ if (ret < 0 || url_feof(pb))
+ return AVERROR_IO;
+ asf->packet_time_start = 0;
+ continue;
+ }
+ if (asf->packet_time_start == 0) {
+ /* read frame header */
+ int num = get_byte(pb);
+ asf->packet_segments--;
+ rsize++;
+ asf->packet_key_frame = (num & 0x80) >> 7;
+ asf->stream_index = asf->asfid2avid[num & 0x7f];
+ // sequence should be ignored!
+ DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
+ DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
+ DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
+//printf("key:%d stream:%d seq:%d offset:%d replic_size:%d\n", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size);
+ if (asf->packet_replic_size > 1) {
+ assert(asf->packet_replic_size >= 8);
+ // it should be always at least 8 bytes - FIXME validate
+ asf->packet_obj_size = get_le32(pb);
+ asf->packet_frag_timestamp = get_le32(pb); // timestamp
+ if (asf->packet_replic_size > 8)
+ url_fskip(pb, asf->packet_replic_size - 8);
+ rsize += asf->packet_replic_size; // FIXME - check validity
+ } else if (asf->packet_replic_size==1){
+ // multipacket - frag_offset is begining timestamp
+ asf->packet_time_start = asf->packet_frag_offset;
+ asf->packet_frag_offset = 0;
+ asf->packet_frag_timestamp = asf->packet_timestamp;
+
+ asf->packet_time_delta = get_byte(pb);
+ rsize++;
+ }else{
+ assert(asf->packet_replic_size==0);
+ }
+ if (asf->packet_flags & 0x01) {
+ DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
+#undef DO_2BITS
+ //printf("Fragsize %d\n", asf->packet_frag_size);
+ } else {
+ asf->packet_frag_size = asf->packet_size_left - rsize;
+ //printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
+ }
+ if (asf->packet_replic_size == 1) {
+ asf->packet_multi_size = asf->packet_frag_size;
+ if (asf->packet_multi_size > asf->packet_size_left) {
+ asf->packet_segments = 0;
+ continue;
+ }
+ }
+ asf->packet_size_left -= rsize;
+ //printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
+
+ if (asf->stream_index < 0
+ || s->streams[asf->stream_index]->discard >= AVDISCARD_ALL
+ || (!asf->packet_key_frame && s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY)
+ ) {
+ asf->packet_time_start = 0;
+ /* unhandled packet (should not happen) */
+ url_fskip(pb, asf->packet_frag_size);
+ asf->packet_size_left -= asf->packet_frag_size;
+ if(asf->stream_index < 0)
+ av_log(s, AV_LOG_ERROR, "ff asf skip %d %d\n", asf->packet_frag_size, num & 0x7f);
+ continue;
+ }
+ asf->asf_st = s->streams[asf->stream_index]->priv_data;
+ }
+ asf_st = asf->asf_st;
+
+ if ((asf->packet_frag_offset != asf_st->frag_offset
+ || (asf->packet_frag_offset
+ && asf->packet_seq != asf_st->seq)) // seq should be ignored
+ ) {
+ /* cannot continue current packet: free it */
+ // FIXME better check if packet was already allocated
+ av_log(s, AV_LOG_INFO, "ff asf parser skips: %d - %d o:%d - %d %d %d fl:%d\n",
+ asf_st->pkt.size,
+ asf->packet_obj_size,
+ asf->packet_frag_offset, asf_st->frag_offset,
+ asf->packet_seq, asf_st->seq, asf->packet_frag_size);
+ if (asf_st->pkt.size)
+ av_free_packet(&asf_st->pkt);
+ asf_st->frag_offset = 0;
+ if (asf->packet_frag_offset != 0) {
+ url_fskip(pb, asf->packet_frag_size);
+ av_log(s, AV_LOG_INFO, "ff asf parser skipping %db\n", asf->packet_frag_size);
+ asf->packet_size_left -= asf->packet_frag_size;
+ continue;
+ }
+ }
+ if (asf->packet_replic_size == 1) {
+ // frag_offset is here used as the begining timestamp
+ asf->packet_frag_timestamp = asf->packet_time_start;
+ asf->packet_time_start += asf->packet_time_delta;
+ asf->packet_obj_size = asf->packet_frag_size = get_byte(pb);
+ asf->packet_size_left--;
+ asf->packet_multi_size--;
+ if (asf->packet_multi_size < asf->packet_obj_size)
+ {
+ asf->packet_time_start = 0;
+ url_fskip(pb, asf->packet_multi_size);
+ asf->packet_size_left -= asf->packet_multi_size;
+ continue;
+ }
+ asf->packet_multi_size -= asf->packet_obj_size;
+ //printf("COMPRESS size %d %d %d ms:%d\n", asf->packet_obj_size, asf->packet_frag_timestamp, asf->packet_size_left, asf->packet_multi_size);
+ }
+ if (asf_st->frag_offset == 0) {
+ /* new packet */
+ av_new_packet(&asf_st->pkt, asf->packet_obj_size);
+ asf_st->seq = asf->packet_seq;
+ asf_st->pkt.pts = asf->packet_frag_timestamp;
+ asf_st->pkt.stream_index = asf->stream_index;
+ asf_st->pkt.pos =
+ asf_st->packet_pos= asf->packet_pos;
+//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
+//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & PKT_FLAG_KEY,
+//s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO, asf->packet_obj_size);
+ if (s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO)
+ asf->packet_key_frame = 1;
+ if (asf->packet_key_frame)
+ asf_st->pkt.flags |= PKT_FLAG_KEY;
+ }
+
+ /* read data */
+ //printf("READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\n",
+ // asf->packet_size, asf_st->pkt.size, asf->packet_frag_offset,
+ // asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data);
+ asf->packet_size_left -= asf->packet_frag_size;
+ if (asf->packet_size_left < 0)
+ continue;
+ get_buffer(pb, asf_st->pkt.data + asf->packet_frag_offset,
+ asf->packet_frag_size);
+ asf_st->frag_offset += asf->packet_frag_size;
+ /* test if whole packet is read */
+ if (asf_st->frag_offset == asf_st->pkt.size) {
+ /* return packet */
+ if (asf_st->ds_span > 1) {
+ /* packet descrambling */
+ uint8_t *newdata = av_malloc(asf_st->pkt.size);
+ if (newdata) {
+ int offset = 0;
+ while (offset < asf_st->pkt.size) {
+ int off = offset / asf_st->ds_chunk_size;
+ int row = off / asf_st->ds_span;
+ int col = off % asf_st->ds_span;
+ int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
+ //printf("off:%d row:%d col:%d idx:%d\n", off, row, col, idx);
+ memcpy(newdata + offset,
+ asf_st->pkt.data + idx * asf_st->ds_chunk_size,
+ asf_st->ds_chunk_size);
+ offset += asf_st->ds_chunk_size;
+ }
+ av_free(asf_st->pkt.data);
+ asf_st->pkt.data = newdata;
+ }
+ }
+ asf_st->frag_offset = 0;
+ memcpy(pkt, &asf_st->pkt, sizeof(AVPacket));
+ //printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
+ asf_st->pkt.size = 0;
+ asf_st->pkt.data = 0;
+ break; // packet completed
+ }
+ }
+ return 0;
+}
+
+static int asf_read_close(AVFormatContext *s)
+{
+ int i;
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ av_free(st->priv_data);
+ av_free(st->codec->palctrl);
+ }
+ return 0;
+}
+
+// Added to support seeking after packets have been read
+// If information is not reset, read_packet fails due to
+// leftover information from previous reads
+static void asf_reset_header(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ ASFStream *asf_st;
+ int i;
+
+ asf->packet_nb_frames = 0;
+ asf->packet_timestamp_start = -1;
+ asf->packet_timestamp_end = -1;
+ asf->packet_size_left = 0;
+ asf->packet_segments = 0;
+ asf->packet_flags = 0;
+ asf->packet_property = 0;
+ asf->packet_timestamp = 0;
+ asf->packet_segsizetype = 0;
+ asf->packet_segments = 0;
+ asf->packet_seq = 0;
+ asf->packet_replic_size = 0;
+ asf->packet_key_frame = 0;
+ asf->packet_padsize = 0;
+ asf->packet_frag_offset = 0;
+ asf->packet_frag_size = 0;
+ asf->packet_frag_timestamp = 0;
+ asf->packet_multi_size = 0;
+ asf->packet_obj_size = 0;
+ asf->packet_time_delta = 0;
+ asf->packet_time_start = 0;
+
+ for(i=0; i<s->nb_streams; i++){
+ asf_st= s->streams[i]->priv_data;
+ av_free_packet(&asf_st->pkt);
+ asf_st->frag_offset=0;
+ asf_st->seq=0;
+ }
+ asf->asf_st= NULL;
+}
+
+static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit)
+{
+ ASFContext *asf = s->priv_data;
+ AVPacket pkt1, *pkt = &pkt1;
+ ASFStream *asf_st;
+ int64_t pts;
+ int64_t pos= *ppos;
+ int i;
+ int64_t start_pos[s->nb_streams];
+
+ for(i=0; i<s->nb_streams; i++){
+ start_pos[i]= pos;
+ }
+
+ pos= (pos+asf->packet_size-1-s->data_offset)/asf->packet_size*asf->packet_size+ s->data_offset;
+ *ppos= pos;
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+//printf("asf_read_pts\n");
+ asf_reset_header(s);
+ for(;;){
+ if (av_read_frame(s, pkt) < 0){
+ av_log(s, AV_LOG_INFO, "seek failed\n");
+ return AV_NOPTS_VALUE;
+ }
+
+ pts= pkt->pts;
+
+ av_free_packet(pkt);
+ if(pkt->flags&PKT_FLAG_KEY){
+ i= pkt->stream_index;
+
+ asf_st= s->streams[i]->priv_data;
+
+ assert((asf_st->packet_pos - s->data_offset) % asf->packet_size == 0);
+ pos= asf_st->packet_pos;
+
+ av_add_index_entry(s->streams[i], pos, pts, pkt->size, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
+ start_pos[i]= asf_st->packet_pos + 1;
+
+ if(pkt->stream_index == stream_index)
+ break;
+ }
+ }
+
+ *ppos= pos;
+//printf("found keyframe at %"PRId64" stream %d stamp:%"PRId64"\n", *ppos, stream_index, pts);
+
+ return pts;
+}
+
+static void asf_build_simple_index(AVFormatContext *s, int stream_index)
+{
+ GUID g;
+ ASFContext *asf = s->priv_data;
+ int64_t gsize, itime;
+ int64_t pos, current_pos, index_pts;
+ int i;
+ int pct,ict;
+
+ current_pos = url_ftell(&s->pb);
+
+ url_fseek(&s->pb, asf->data_object_offset + asf->data_object_size, SEEK_SET);
+ get_guid(&s->pb, &g);
+ if (!memcmp(&g, &index_guid, sizeof(GUID))) {
+ gsize = get_le64(&s->pb);
+ get_guid(&s->pb, &g);
+ itime=get_le64(&s->pb);
+ pct=get_le32(&s->pb);
+ ict=get_le32(&s->pb);
+ av_log(NULL, AV_LOG_DEBUG, "itime:0x%"PRIx64", pct:%d, ict:%d\n",itime,pct,ict);
+
+ for (i=0;i<ict;i++){
+ int pktnum=get_le32(&s->pb);
+ int pktct =get_le16(&s->pb);
+ av_log(NULL, AV_LOG_DEBUG, "pktnum:%d, pktct:%d\n", pktnum, pktct);
+
+ pos=s->data_offset + asf->packet_size*(int64_t)pktnum;
+ index_pts=av_rescale(itime, i, 10000);
+
+ av_add_index_entry(s->streams[stream_index], pos, index_pts, asf->packet_size, 0, AVINDEX_KEYFRAME);
+ }
+ asf->index_read= 1;
+ }
+ url_fseek(&s->pb, current_pos, SEEK_SET);
+}
+
+static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags)
+{
+ ASFContext *asf = s->priv_data;
+ AVStream *st = s->streams[stream_index];
+ int64_t pos;
+ int index;
+
+ if (asf->packet_size <= 0)
+ return -1;
+
+ if (!asf->index_read)
+ asf_build_simple_index(s, stream_index);
+
+ if(!(asf->index_read && st->index_entries)){
+ if(av_seek_frame_binary(s, stream_index, pts, flags)<0)
+ return -1;
+ }else{
+ index= av_index_search_timestamp(st, pts, flags);
+ if(index<0)
+ return -1;
+
+ /* find the position */
+ pos = st->index_entries[index].pos;
+ pts = st->index_entries[index].timestamp;
+
+ // various attempts to find key frame have failed so far
+ // asf_reset_header(s);
+ // url_fseek(&s->pb, pos, SEEK_SET);
+ // key_pos = pos;
+ // for(i=0;i<16;i++){
+ // pos = url_ftell(&s->pb);
+ // if (av_read_frame(s, &pkt) < 0){
+ // av_log(s, AV_LOG_INFO, "seek failed\n");
+ // return -1;
+ // }
+ // asf_st = s->streams[stream_index]->priv_data;
+ // pos += st->parser->frame_offset;
+ //
+ // if (pkt.size > b) {
+ // b = pkt.size;
+ // key_pos = pos;
+ // }
+ //
+ // av_free_packet(&pkt);
+ // }
+
+ /* do the seek */
+ av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos);
+ url_fseek(&s->pb, pos, SEEK_SET);
+ }
+ asf_reset_header(s);
+ return 0;
+}
+
+AVInputFormat asf_demuxer = {
+ "asf",
+ "asf format",
+ sizeof(ASFContext),
+ asf_probe,
+ asf_read_header,
+ asf_read_packet,
+ asf_read_close,
+ asf_read_seek,
+ asf_read_pts,
+};
diff --git a/contrib/ffmpeg/libavformat/asf.h b/contrib/ffmpeg/libavformat/asf.h
new file mode 100644
index 000000000..bbe88801a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/asf.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define PACKET_SIZE 3200
+
+typedef struct {
+ int num;
+ unsigned char seq;
+ /* use for reading */
+ AVPacket pkt;
+ int frag_offset;
+ int timestamp;
+ int64_t duration;
+
+ int ds_span; /* descrambling */
+ int ds_packet_size;
+ int ds_chunk_size;
+ int ds_data_size;
+ int ds_silence_data;
+
+ int64_t packet_pos;
+
+} ASFStream;
+
+typedef struct {
+ uint32_t v1;
+ uint16_t v2;
+ uint16_t v3;
+ uint8_t v4[8];
+} GUID;
+
+typedef struct {
+ GUID guid; // generated by client computer
+ uint64_t file_size; // in bytes
+ // invalid if broadcasting
+ uint64_t create_time; // time of creation, in 100-nanosecond units since 1.1.1601
+ // invalid if broadcasting
+ uint64_t packets_count; // how many packets are there in the file
+ // invalid if broadcasting
+ uint64_t play_time; // play time, in 100-nanosecond units
+ // invalid if broadcasting
+ uint64_t send_time; // time to send file, in 100-nanosecond units
+ // invalid if broadcasting (could be ignored)
+ uint32_t preroll; // timestamp of the first packet, in milliseconds
+ // if nonzero - substract from time
+ uint32_t ignore; // preroll is 64bit - but let's just ignore it
+ uint32_t flags; // 0x01 - broadcast
+ // 0x02 - seekable
+ // rest is reserved should be 0
+ uint32_t min_pktsize; // size of a data packet
+ // invalid if broadcasting
+ uint32_t max_pktsize; // shall be the same as for min_pktsize
+ // invalid if broadcasting
+ uint32_t max_bitrate; // bandwith of stream in bps
+ // should be the sum of bitrates of the
+ // individual media streams
+} ASFMainHeader;
+
+
+typedef struct {
+ uint32_t packet_number;
+ uint16_t packet_count;
+} ASFIndex;
+
+
+typedef struct {
+ uint32_t seqno;
+ unsigned int packet_size;
+ int is_streamed;
+ int asfid2avid[128]; /* conversion table from asf ID 2 AVStream ID */
+ ASFStream streams[128]; /* it's max number and it's not that big */
+ /* non streamed additonnal info */
+ int64_t nb_packets;
+ int64_t duration; /* in 100ns units */
+ /* packet filling */
+ unsigned char multi_payloads_present;
+ int packet_size_left;
+ int prev_packet_sent_time;
+ int packet_timestamp_start;
+ int packet_timestamp_end;
+ unsigned int packet_nb_payloads;
+ int packet_nb_frames;
+ uint8_t packet_buf[PACKET_SIZE];
+ ByteIOContext pb;
+ /* only for reading */
+ uint64_t data_offset; /* begining of the first data packet */
+ uint64_t data_object_offset; /* data object offset (excl. GUID & size)*/
+ uint64_t data_object_size; /* size of the data object */
+ int index_read;
+
+ ASFMainHeader hdr;
+
+ int packet_flags;
+ int packet_property;
+ int packet_timestamp;
+ int packet_segsizetype;
+ int packet_segments;
+ int packet_seq;
+ int packet_replic_size;
+ int packet_key_frame;
+ int packet_padsize;
+ int packet_frag_offset;
+ int packet_frag_size;
+ int packet_frag_timestamp;
+ int packet_multi_size;
+ int packet_obj_size;
+ int packet_time_delta;
+ int packet_time_start;
+ int64_t packet_pos;
+
+ int stream_index;
+
+
+ int64_t last_indexed_pts;
+ ASFIndex* index_ptr;
+ uint32_t nb_index_count;
+ uint32_t nb_index_memory_alloc;
+ uint16_t maximum_packet;
+
+ ASFStream* asf_st; /* currently decoded stream */
+} ASFContext;
+
+static const GUID asf_header = {
+ 0x75B22630, 0x668E, 0x11CF, { 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C },
+};
+
+static const GUID file_header = {
+ 0x8CABDCA1, 0xA947, 0x11CF, { 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 },
+};
+
+static const GUID stream_header = {
+ 0xB7DC0791, 0xA9B7, 0x11CF, { 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 },
+};
+
+static const GUID ext_stream_header = {
+ 0x14E6A5CB, 0xC672, 0x4332, { 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A },
+};
+
+static const GUID audio_stream = {
+ 0xF8699E40, 0x5B4D, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+};
+
+static const GUID audio_conceal_none = {
+ // 0x49f1a440, 0x4ece, 0x11d0, { 0xa3, 0xac, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6 },
+ // New value lifted from avifile
+ 0x20fb5700, 0x5b55, 0x11cf, { 0xa8, 0xfd, 0x00, 0x80, 0x5f, 0x5c, 0x44, 0x2b },
+};
+
+static const GUID audio_conceal_spread = {
+ 0xBFC3CD50, 0x618F, 0x11CF, { 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20 },
+};
+
+static const GUID video_stream = {
+ 0xBC19EFC0, 0x5B4D, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+};
+
+static const GUID video_conceal_none = {
+ 0x20FB5700, 0x5B55, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+};
+
+static const GUID command_stream = {
+ 0x59DACFC0, 0x59E6, 0x11D0, { 0xA3, 0xAC, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 },
+};
+
+static const GUID comment_header = {
+ 0x75b22633, 0x668e, 0x11cf, { 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c },
+};
+
+static const GUID codec_comment_header = {
+ 0x86D15240, 0x311D, 0x11D0, { 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 },
+};
+static const GUID codec_comment1_header = {
+ 0x86d15241, 0x311d, 0x11d0, { 0xa3, 0xa4, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6 },
+};
+
+static const GUID data_header = {
+ 0x75b22636, 0x668e, 0x11cf, { 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c },
+};
+
+static const GUID head1_guid = {
+ 0x5fbf03b5, 0xa92e, 0x11cf, { 0x8e, 0xe3, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65 },
+};
+
+static const GUID head2_guid = {
+ 0xabd3d211, 0xa9ba, 0x11cf, { 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65 },
+};
+
+static const GUID extended_content_header = {
+ 0xD2D0A440, 0xE307, 0x11D2, { 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5E, 0xA8, 0x50 },
+};
+
+static const GUID simple_index_header = {
+ 0x33000890, 0xE5B1, 0x11CF, { 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB },
+};
+
+static const GUID ext_stream_embed_stream_header = {
+ 0x3afb65e2, 0x47ef, 0x40f2, { 0xac, 0x2c, 0x70, 0xa9, 0x0d, 0x71, 0xd3, 0x43}
+};
+
+static const GUID ext_stream_audio_stream = {
+ 0x31178c9d, 0x03e1, 0x4528, { 0xb5, 0x82, 0x3d, 0xf9, 0xdb, 0x22, 0xf5, 0x03}
+};
+
+/* I am not a number !!! This GUID is the one found on the PC used to
+ generate the stream */
+static const GUID my_guid = {
+ 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+#define ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT 0x80 //1000 0000
+
+
+// ASF data packet structure
+// =========================
+//
+//
+// -----------------------------------
+// | Error Correction Data | Optional
+// -----------------------------------
+// | Payload Parsing Information (PPI) |
+// -----------------------------------
+// | Payload Data |
+// -----------------------------------
+// | Padding Data |
+// -----------------------------------
+
+
+// PPI_FLAG - Payload parsing information flags
+#define ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT 1
+
+#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE 0x02 //0000 0010
+#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD 0x04 //0000 0100
+#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD 0x06 //0000 0110
+#define ASF_PPI_MASK_SEQUENCE_FIELD_SIZE 0x06 //0000 0110
+
+#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE 0x08 //0000 1000
+#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD 0x10 //0001 0000
+#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD 0x18 //0001 1000
+#define ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE 0x18 //0001 1000
+
+#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE 0x20 //0010 0000
+#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD 0x40 //0100 0000
+#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD 0x60 //0110 0000
+#define ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE 0x60 //0110 0000
+
+// PL_FLAG - Payload flags
+#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE 0x01 //0000 0001
+#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD 0x02 //0000 0010
+#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD 0x03 //0000 0011
+#define ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE 0x03 //0000 0011
+
+#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE 0x04 //0000 0100
+#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD 0x08 //0000 1000
+#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD 0x0c //0000 1100
+#define ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE 0x0c //0000 1100
+
+#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE 0x10 //0001 0000
+#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD 0x20 //0010 0000
+#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD 0x30 //0011 0000
+#define ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE 0x30 //0011 0000
+
+#define ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
+#define ASF_PL_MASK_STREAM_NUMBER_LENGTH_FIELD_SIZE 0xc0 //1100 0000
+
+#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
+#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD 0x80 //1000 0000
+#define ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE 0xc0 //1100 0000
+
+#define ASF_PL_FLAG_KEY_FRAME 0x80 //1000 0000
diff --git a/contrib/ffmpeg/libavformat/au.c b/contrib/ffmpeg/libavformat/au.c
new file mode 100644
index 000000000..27c7cdc85
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/au.c
@@ -0,0 +1,209 @@
+/*
+ * AU muxer and demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * First version by Francois Revol revol@free.fr
+ *
+ * Reference documents:
+ * http://www.opengroup.org/public/pubs/external/auformat.html
+ * http://www.goice.co.jp/member/mo/formats/au.html
+ */
+
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+
+/* if we don't know the size in advance */
+#define AU_UNKOWN_SIZE ((uint32_t)(~0))
+
+/* The ffmpeg codecs we support, and the IDs they have in the file */
+static const CodecTag codec_au_tags[] = {
+ { CODEC_ID_PCM_MULAW, 1 },
+ { CODEC_ID_PCM_S16BE, 3 },
+ { CODEC_ID_PCM_ALAW, 27 },
+ { 0, 0 },
+};
+
+#ifdef CONFIG_MUXERS
+/* AUDIO_FILE header */
+static int put_au_header(ByteIOContext *pb, AVCodecContext *enc)
+{
+ if(!enc->codec_tag)
+ enc->codec_tag = codec_get_tag(codec_au_tags, enc->codec_id);
+ if(!enc->codec_tag)
+ return -1;
+ put_tag(pb, ".snd"); /* magic number */
+ put_be32(pb, 24); /* header size */
+ put_be32(pb, AU_UNKOWN_SIZE); /* data size */
+ put_be32(pb, (uint32_t)enc->codec_tag); /* codec ID */
+ put_be32(pb, enc->sample_rate);
+ put_be32(pb, (uint32_t)enc->channels);
+ return 0;
+}
+
+static int au_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+
+ s->priv_data = NULL;
+
+ /* format header */
+ if (put_au_header(pb, s->streams[0]->codec) < 0) {
+ return -1;
+ }
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int au_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int au_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ offset_t file_size;
+
+ if (!url_is_streamed(&s->pb)) {
+
+ /* update file size */
+ file_size = url_ftell(pb);
+ url_fseek(pb, 8, SEEK_SET);
+ put_be32(pb, (uint32_t)(file_size - 24));
+ url_fseek(pb, file_size, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int au_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 24)
+ return 0;
+ if (p->buf[0] == '.' && p->buf[1] == 's' &&
+ p->buf[2] == 'n' && p->buf[3] == 'd')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* au input */
+static int au_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size;
+ unsigned int tag;
+ ByteIOContext *pb = &s->pb;
+ unsigned int id, codec, channels, rate;
+ AVStream *st;
+
+ /* check ".snd" header */
+ tag = get_le32(pb);
+ if (tag != MKTAG('.', 's', 'n', 'd'))
+ return -1;
+ size = get_be32(pb); /* header size */
+ get_be32(pb); /* data size */
+
+ id = get_be32(pb);
+ rate = get_be32(pb);
+ channels = get_be32(pb);
+
+ codec = codec_get_id(codec_au_tags, id);
+
+ if (size >= 24) {
+ /* skip unused data */
+ url_fseek(pb, size - 24, SEEK_CUR);
+ }
+
+ /* now we are ready: build format streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = id;
+ st->codec->codec_id = codec;
+ st->codec->channels = channels;
+ st->codec->sample_rate = rate;
+ av_set_pts_info(st, 64, 1, rate);
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int au_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret;
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ ret= av_get_packet(&s->pb, pkt, MAX_SIZE);
+ if (ret < 0)
+ return AVERROR_IO;
+ pkt->stream_index = 0;
+
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return 0;
+}
+
+static int au_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_AU_DEMUXER
+AVInputFormat au_demuxer = {
+ "au",
+ "SUN AU Format",
+ 0,
+ au_probe,
+ au_read_header,
+ au_read_packet,
+ au_read_close,
+ pcm_read_seek,
+};
+#endif
+
+#ifdef CONFIG_AU_MUXER
+AVOutputFormat au_muxer = {
+ "au",
+ "SUN AU Format",
+ "audio/basic",
+ "au",
+ 0,
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_NONE,
+ au_write_header,
+ au_write_packet,
+ au_write_trailer,
+};
+#endif //CONFIG_AU_MUXER
diff --git a/contrib/ffmpeg/libavformat/audio.c b/contrib/ffmpeg/libavformat/audio.c
new file mode 100644
index 000000000..1dfccccb8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/audio.c
@@ -0,0 +1,352 @@
+/*
+ * Linux audio play and grab interface
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef __OpenBSD__
+#include <soundcard.h>
+#else
+#include <sys/soundcard.h>
+#endif
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+#define AUDIO_BLOCK_SIZE 4096
+
+typedef struct {
+ int fd;
+ int sample_rate;
+ int channels;
+ int frame_size; /* in bytes ! */
+ int codec_id;
+ int flip_left : 1;
+ uint8_t buffer[AUDIO_BLOCK_SIZE];
+ int buffer_ptr;
+} AudioData;
+
+static int audio_open(AudioData *s, int is_output, const char *audio_device)
+{
+ int audio_fd;
+ int tmp, err;
+ char *flip = getenv("AUDIO_FLIP_LEFT");
+
+ /* open linux audio device */
+ if (!audio_device)
+#ifdef __OpenBSD__
+ audio_device = "/dev/sound";
+#else
+ audio_device = "/dev/dsp";
+#endif
+
+ if (is_output)
+ audio_fd = open(audio_device, O_WRONLY);
+ else
+ audio_fd = open(audio_device, O_RDONLY);
+ if (audio_fd < 0) {
+ perror(audio_device);
+ return AVERROR_IO;
+ }
+
+ if (flip && *flip == '1') {
+ s->flip_left = 1;
+ }
+
+ /* non blocking mode */
+ if (!is_output)
+ fcntl(audio_fd, F_SETFL, O_NONBLOCK);
+
+ s->frame_size = AUDIO_BLOCK_SIZE;
+#if 0
+ tmp = (NB_FRAGMENTS << 16) | FRAGMENT_BITS;
+ err = ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_SETFRAGMENT");
+ }
+#endif
+
+ /* select format : favour native format */
+ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
+
+#ifdef WORDS_BIGENDIAN
+ if (tmp & AFMT_S16_BE) {
+ tmp = AFMT_S16_BE;
+ } else if (tmp & AFMT_S16_LE) {
+ tmp = AFMT_S16_LE;
+ } else {
+ tmp = 0;
+ }
+#else
+ if (tmp & AFMT_S16_LE) {
+ tmp = AFMT_S16_LE;
+ } else if (tmp & AFMT_S16_BE) {
+ tmp = AFMT_S16_BE;
+ } else {
+ tmp = 0;
+ }
+#endif
+
+ switch(tmp) {
+ case AFMT_S16_LE:
+ s->codec_id = CODEC_ID_PCM_S16LE;
+ break;
+ case AFMT_S16_BE:
+ s->codec_id = CODEC_ID_PCM_S16BE;
+ break;
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
+ close(audio_fd);
+ return AVERROR_IO;
+ }
+ err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_SETFMT");
+ goto fail;
+ }
+
+ tmp = (s->channels == 2);
+ err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_STEREO");
+ goto fail;
+ }
+ if (tmp)
+ s->channels = 2;
+
+ tmp = s->sample_rate;
+ err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_SPEED");
+ goto fail;
+ }
+ s->sample_rate = tmp; /* store real sample rate */
+ s->fd = audio_fd;
+
+ return 0;
+ fail:
+ close(audio_fd);
+ return AVERROR_IO;
+}
+
+static int audio_close(AudioData *s)
+{
+ close(s->fd);
+ return 0;
+}
+
+/* sound output support */
+static int audio_write_header(AVFormatContext *s1)
+{
+ AudioData *s = s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ st = s1->streams[0];
+ s->sample_rate = st->codec->sample_rate;
+ s->channels = st->codec->channels;
+ ret = audio_open(s, 1, NULL);
+ if (ret < 0) {
+ return AVERROR_IO;
+ } else {
+ return 0;
+ }
+}
+
+static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ AudioData *s = s1->priv_data;
+ int len, ret;
+ int size= pkt->size;
+ uint8_t *buf= pkt->data;
+
+ while (size > 0) {
+ len = AUDIO_BLOCK_SIZE - s->buffer_ptr;
+ if (len > size)
+ len = size;
+ memcpy(s->buffer + s->buffer_ptr, buf, len);
+ s->buffer_ptr += len;
+ if (s->buffer_ptr >= AUDIO_BLOCK_SIZE) {
+ for(;;) {
+ ret = write(s->fd, s->buffer, AUDIO_BLOCK_SIZE);
+ if (ret > 0)
+ break;
+ if (ret < 0 && (errno != EAGAIN && errno != EINTR))
+ return AVERROR_IO;
+ }
+ s->buffer_ptr = 0;
+ }
+ buf += len;
+ size -= len;
+ }
+ return 0;
+}
+
+static int audio_write_trailer(AVFormatContext *s1)
+{
+ AudioData *s = s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+/* grab support */
+
+static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ AudioData *s = s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ if (ap->sample_rate <= 0 || ap->channels <= 0)
+ return -1;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+ s->sample_rate = ap->sample_rate;
+ s->channels = ap->channels;
+
+ ret = audio_open(s, 0, ap->device);
+ if (ret < 0) {
+ av_free(st);
+ return AVERROR_IO;
+ }
+
+ /* take real parameters */
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = s->codec_id;
+ st->codec->sample_rate = s->sample_rate;
+ st->codec->channels = s->channels;
+
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ return 0;
+}
+
+static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ AudioData *s = s1->priv_data;
+ int ret, bdelay;
+ int64_t cur_time;
+ struct audio_buf_info abufi;
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+ for(;;) {
+ struct timeval tv;
+ fd_set fds;
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 30 * 1000; /* 30 msecs -- a bit shorter than 1 frame at 30fps */
+
+ FD_ZERO(&fds);
+ FD_SET(s->fd, &fds);
+
+ /* This will block until data is available or we get a timeout */
+ (void) select(s->fd + 1, &fds, 0, 0, &tv);
+
+ ret = read(s->fd, pkt->data, pkt->size);
+ if (ret > 0)
+ break;
+ if (ret == -1 && (errno == EAGAIN || errno == EINTR)) {
+ av_free_packet(pkt);
+ pkt->size = 0;
+ pkt->pts = av_gettime();
+ return 0;
+ }
+ if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ }
+ pkt->size = ret;
+
+ /* compute pts of the start of the packet */
+ cur_time = av_gettime();
+ bdelay = ret;
+ if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
+ bdelay += abufi.bytes;
+ }
+ /* substract time represented by the number of bytes in the audio fifo */
+ cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
+
+ /* convert to wanted units */
+ pkt->pts = cur_time;
+
+ if (s->flip_left && s->channels == 2) {
+ int i;
+ short *p = (short *) pkt->data;
+
+ for (i = 0; i < ret; i += 4) {
+ *p = ~*p;
+ p += 2;
+ }
+ }
+ return 0;
+}
+
+static int audio_read_close(AVFormatContext *s1)
+{
+ AudioData *s = s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+#ifdef CONFIG_AUDIO_DEMUXER
+AVInputFormat audio_demuxer = {
+ "audio_device",
+ "audio grab and output",
+ sizeof(AudioData),
+ NULL,
+ audio_read_header,
+ audio_read_packet,
+ audio_read_close,
+ .flags = AVFMT_NOFILE,
+};
+#endif
+
+#ifdef CONFIG_AUDIO_MUXER
+AVOutputFormat audio_muxer = {
+ "audio_device",
+ "audio grab and output",
+ "",
+ "",
+ sizeof(AudioData),
+ /* XXX: we make the assumption that the soundcard accepts this format */
+ /* XXX: find better solution with "preinit" method, needed also in
+ other formats */
+#ifdef WORDS_BIGENDIAN
+ CODEC_ID_PCM_S16BE,
+#else
+ CODEC_ID_PCM_S16LE,
+#endif
+ CODEC_ID_NONE,
+ audio_write_header,
+ audio_write_packet,
+ audio_write_trailer,
+ .flags = AVFMT_NOFILE,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/avformat.h b/contrib/ffmpeg/libavformat/avformat.h
new file mode 100644
index 000000000..5dc41d273
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avformat.h
@@ -0,0 +1,539 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFORMAT_H
+#define AVFORMAT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LIBAVFORMAT_VERSION_INT ((51<<16)+(6<<8)+0)
+#define LIBAVFORMAT_VERSION 51.6.0
+#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
+
+#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
+
+#include <time.h>
+#include <stdio.h> /* FILE */
+#include "avcodec.h"
+
+#include "avio.h"
+
+/* packet functions */
+
+#ifndef MAXINT64
+#define MAXINT64 int64_t_C(0x7fffffffffffffff)
+#endif
+
+#ifndef MININT64
+#define MININT64 int64_t_C(0x8000000000000000)
+#endif
+
+typedef struct AVPacket {
+ int64_t pts; ///< presentation time stamp in time_base units
+ int64_t dts; ///< decompression time stamp in time_base units
+ uint8_t *data;
+ int size;
+ int stream_index;
+ int flags;
+ int duration; ///< presentation duration in time_base units (0 if not available)
+ void (*destruct)(struct AVPacket *);
+ void *priv;
+ int64_t pos; ///< byte position in stream, -1 if unknown
+} AVPacket;
+#define PKT_FLAG_KEY 0x0001
+
+void av_destruct_packet_nofree(AVPacket *pkt);
+void av_destruct_packet(AVPacket *pkt);
+
+/* initialize optional fields of a packet */
+static inline void av_init_packet(AVPacket *pkt)
+{
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ pkt->pos = -1;
+ pkt->duration = 0;
+ pkt->flags = 0;
+ pkt->stream_index = 0;
+ pkt->destruct= av_destruct_packet_nofree;
+}
+
+int av_new_packet(AVPacket *pkt, int size);
+int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size);
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Free a packet
+ *
+ * @param pkt packet to free
+ */
+static inline void av_free_packet(AVPacket *pkt)
+{
+ if (pkt && pkt->destruct) {
+ pkt->destruct(pkt);
+ }
+}
+
+/*************************************************/
+/* fractional numbers for exact pts handling */
+
+/* the exact value of the fractional number is: 'val + num / den'. num
+ is assumed to be such as 0 <= num < den */
+typedef struct AVFrac {
+ int64_t val, num, den;
+} AVFrac attribute_deprecated;
+
+/*************************************************/
+/* input/output formats */
+
+struct AVFormatContext;
+
+/* this structure contains the data a format has to probe a file */
+typedef struct AVProbeData {
+ const char *filename;
+ unsigned char *buf;
+ int buf_size;
+} AVProbeData;
+
+#define AVPROBE_SCORE_MAX 100 ///< max score, half of that is used for file extension based detection
+
+typedef struct AVFormatParameters {
+ AVRational time_base;
+ int sample_rate;
+ int channels;
+ int width;
+ int height;
+ enum PixelFormat pix_fmt;
+ int channel; /* used to select dv channel */
+ const char *device; /* video, audio or DV device */
+ const char *standard; /* tv standard, NTSC, PAL, SECAM */
+ int mpeg2ts_raw:1; /* force raw MPEG2 transport stream output, if possible */
+ int mpeg2ts_compute_pcr:1; /* compute exact PCR for each transport
+ stream packet (only meaningful if
+ mpeg2ts_raw is TRUE */
+ int initial_pause:1; /* do not begin to play the stream
+ immediately (RTSP only) */
+ int prealloced_context:1;
+ enum CodecID video_codec_id;
+ enum CodecID audio_codec_id;
+} AVFormatParameters;
+
+#define AVFMT_NOFILE 0x0001 /* no file should be opened */
+#define AVFMT_NEEDNUMBER 0x0002 /* needs '%d' in filename */
+#define AVFMT_SHOW_IDS 0x0008 /* show format stream IDs numbers */
+#define AVFMT_RAWPICTURE 0x0020 /* format wants AVPicture structure for
+ raw picture data */
+#define AVFMT_GLOBALHEADER 0x0040 /* format wants global header */
+#define AVFMT_NOTIMESTAMPS 0x0080 /* format doesnt need / has any timestamps */
+
+typedef struct AVOutputFormat {
+ const char *name;
+ const char *long_name;
+ const char *mime_type;
+ const char *extensions; /* comma separated extensions */
+ /* size of private data so that it can be allocated in the wrapper */
+ int priv_data_size;
+ /* output support */
+ enum CodecID audio_codec; /* default audio codec */
+ enum CodecID video_codec; /* default video codec */
+ int (*write_header)(struct AVFormatContext *);
+ int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);
+ int (*write_trailer)(struct AVFormatContext *);
+ /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER */
+ int flags;
+ /* currently only used to set pixel format if not YUV420P */
+ int (*set_parameters)(struct AVFormatContext *, AVFormatParameters *);
+ int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, AVPacket *in, int flush);
+ /* private fields */
+ struct AVOutputFormat *next;
+} AVOutputFormat;
+
+typedef struct AVInputFormat {
+ const char *name;
+ const char *long_name;
+ /* size of private data so that it can be allocated in the wrapper */
+ int priv_data_size;
+ /* tell if a given file has a chance of being parsing by this format */
+ int (*read_probe)(AVProbeData *);
+ /* read the format header and initialize the AVFormatContext
+ structure. Return 0 if OK. 'ap' if non NULL contains
+ additionnal paramters. Only used in raw format right
+ now. 'av_new_stream' should be called to create new streams. */
+ int (*read_header)(struct AVFormatContext *,
+ AVFormatParameters *ap);
+ /* read one packet and put it in 'pkt'. pts and flags are also
+ set. 'av_new_stream' can be called only if the flag
+ AVFMTCTX_NOHEADER is used. */
+ int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);
+ /* close the stream. The AVFormatContext and AVStreams are not
+ freed by this function */
+ int (*read_close)(struct AVFormatContext *);
+ /**
+ * seek to a given timestamp relative to the frames in
+ * stream component stream_index
+ * @param stream_index must not be -1
+ * @param flags selects which direction should be preferred if no exact
+ * match is available
+ */
+ int (*read_seek)(struct AVFormatContext *,
+ int stream_index, int64_t timestamp, int flags);
+ /**
+ * gets the next timestamp in AV_TIME_BASE units.
+ */
+ int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
+ int64_t *pos, int64_t pos_limit);
+ /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */
+ int flags;
+ /* if extensions are defined, then no probe is done. You should
+ usually not use extension format guessing because it is not
+ reliable enough */
+ const char *extensions;
+ /* general purpose read only value that the format can use */
+ int value;
+
+ /* start/resume playing - only meaningful if using a network based format
+ (RTSP) */
+ int (*read_play)(struct AVFormatContext *);
+
+ /* pause playing - only meaningful if using a network based format
+ (RTSP) */
+ int (*read_pause)(struct AVFormatContext *);
+
+ /* private fields */
+ struct AVInputFormat *next;
+} AVInputFormat;
+
+typedef struct AVIndexEntry {
+ int64_t pos;
+ int64_t timestamp;
+#define AVINDEX_KEYFRAME 0x0001
+ int flags:2;
+ int size:30; //yeah trying to keep the size of this small to reduce memory requirements (its 24 vs 32 byte due to possible 8byte align)
+ int min_distance; /* min distance between this and the previous keyframe, used to avoid unneeded searching */
+} AVIndexEntry;
+
+typedef struct AVStream {
+ int index; /* stream index in AVFormatContext */
+ int id; /* format specific stream id */
+ AVCodecContext *codec; /* codec context */
+ /**
+ * real base frame rate of the stream.
+ * this is the lowest framerate with which all timestamps can be
+ * represented accurately (its the least common multiple of all
+ * framerates in the stream), Note, this value is just a guess!
+ * for example if the timebase is 1/90000 and all frames have either
+ * approximately 3600 or 1800 timer ticks then r_frame_rate will be 50/1
+ */
+ AVRational r_frame_rate;
+ void *priv_data;
+ /* internal data used in av_find_stream_info() */
+ int64_t codec_info_duration;
+ int codec_info_nb_frames;
+ /* encoding: PTS generation when outputing stream */
+ AVFrac pts;
+
+ /**
+ * this is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. for fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ */
+ AVRational time_base;
+ int pts_wrap_bits; /* number of bits in pts (used for wrapping control) */
+ /* ffmpeg.c private use */
+ int stream_copy; /* if TRUE, just copy stream */
+ enum AVDiscard discard; ///< selects which packets can be discarded at will and dont need to be demuxed
+ //FIXME move stuff to a flags field?
+ /* quality, as it has been removed from AVCodecContext and put in AVVideoFrame
+ * MN:dunno if thats the right place, for it */
+ float quality;
+ /* decoding: position of the first frame of the component, in
+ AV_TIME_BASE fractional seconds. */
+ int64_t start_time;
+ /* decoding: duration of the stream, in AV_TIME_BASE fractional
+ seconds. */
+ int64_t duration;
+
+ char language[4]; /* ISO 639 3-letter language code (empty string if undefined) */
+
+ /* av_read_frame() support */
+ int need_parsing; ///< 1->full parsing needed, 2->only parse headers dont repack
+ struct AVCodecParserContext *parser;
+
+ int64_t cur_dts;
+ int last_IP_duration;
+ int64_t last_IP_pts;
+ /* av_seek_frame() support */
+ AVIndexEntry *index_entries; /* only used if the format does not
+ support seeking natively */
+ int nb_index_entries;
+ unsigned int index_entries_allocated_size;
+
+ int64_t nb_frames; ///< number of frames in this stream if known or 0
+
+#define MAX_REORDER_DELAY 4
+ int64_t pts_buffer[MAX_REORDER_DELAY+1];
+} AVStream;
+
+#define AVFMTCTX_NOHEADER 0x0001 /* signal that no header is present
+ (streams are added dynamically) */
+
+#define MAX_STREAMS 20
+
+/* format I/O context */
+typedef struct AVFormatContext {
+ const AVClass *av_class; /* set by av_alloc_format_context */
+ /* can only be iformat or oformat, not both at the same time */
+ struct AVInputFormat *iformat;
+ struct AVOutputFormat *oformat;
+ void *priv_data;
+ ByteIOContext pb;
+ int nb_streams;
+ AVStream *streams[MAX_STREAMS];
+ char filename[1024]; /* input or output filename */
+ /* stream info */
+ int64_t timestamp;
+ char title[512];
+ char author[512];
+ char copyright[512];
+ char comment[512];
+ char album[512];
+ int year; /* ID3 year, 0 if none */
+ int track; /* track number, 0 if none */
+ char genre[32]; /* ID3 genre */
+
+ int ctx_flags; /* format specific flags, see AVFMTCTX_xx */
+ /* private data for pts handling (do not modify directly) */
+ /* This buffer is only needed when packets were already buffered but
+ not decoded, for example to get the codec parameters in mpeg
+ streams */
+ struct AVPacketList *packet_buffer;
+
+ /* decoding: position of the first frame of the component, in
+ AV_TIME_BASE fractional seconds. NEVER set this value directly:
+ it is deduced from the AVStream values. */
+ int64_t start_time;
+ /* decoding: duration of the stream, in AV_TIME_BASE fractional
+ seconds. NEVER set this value directly: it is deduced from the
+ AVStream values. */
+ int64_t duration;
+ /* decoding: total file size. 0 if unknown */
+ int64_t file_size;
+ /* decoding: total stream bitrate in bit/s, 0 if not
+ available. Never set it directly if the file_size and the
+ duration are known as ffmpeg can compute it automatically. */
+ int bit_rate;
+
+ /* av_read_frame() support */
+ AVStream *cur_st;
+ const uint8_t *cur_ptr;
+ int cur_len;
+ AVPacket cur_pkt;
+
+ /* av_seek_frame() support */
+ int64_t data_offset; /* offset of the first packet */
+ int index_built;
+
+ int mux_rate;
+ int packet_size;
+ int preload;
+ int max_delay;
+
+#define AVFMT_NOOUTPUTLOOP -1
+#define AVFMT_INFINITEOUTPUTLOOP 0
+ /* number of times to loop output in formats that support it */
+ int loop_output;
+
+ int flags;
+#define AVFMT_FLAG_GENPTS 0x0001 ///< generate pts if missing even if it requires parsing future frames
+#define AVFMT_FLAG_IGNIDX 0x0002 ///< ignore index
+
+ int loop_input;
+ /* decoding: size of data to probe; encoding unused */
+ unsigned int probesize;
+} AVFormatContext;
+
+typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList *next;
+} AVPacketList;
+
+extern AVInputFormat *first_iformat;
+extern AVOutputFormat *first_oformat;
+
+enum CodecID av_guess_image2_codec(const char *filename);
+
+/* XXX: use automatic init with either ELF sections or C file parser */
+/* modules */
+
+#include "rtp.h"
+
+#include "rtsp.h"
+
+/* utils.c */
+void av_register_input_format(AVInputFormat *format);
+void av_register_output_format(AVOutputFormat *format);
+AVOutputFormat *guess_stream_format(const char *short_name,
+ const char *filename, const char *mime_type);
+AVOutputFormat *guess_format(const char *short_name,
+ const char *filename, const char *mime_type);
+enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+ const char *filename, const char *mime_type, enum CodecType type);
+
+void av_hex_dump(FILE *f, uint8_t *buf, int size);
+void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload);
+
+void av_register_all(void);
+
+/* media file input */
+AVInputFormat *av_find_input_format(const char *short_name);
+AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
+int av_open_input_stream(AVFormatContext **ic_ptr,
+ ByteIOContext *pb, const char *filename,
+ AVInputFormat *fmt, AVFormatParameters *ap);
+int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
+ AVInputFormat *fmt,
+ int buf_size,
+ AVFormatParameters *ap);
+/* no av_open for output, so applications will need this: */
+AVFormatContext *av_alloc_format_context(void);
+
+#define AVERROR_UNKNOWN (-1) /* unknown error */
+#define AVERROR_IO (-2) /* i/o error */
+#define AVERROR_NUMEXPECTED (-3) /* number syntax expected in filename */
+#define AVERROR_INVALIDDATA (-4) /* invalid data found */
+#define AVERROR_NOMEM (-5) /* not enough memory */
+#define AVERROR_NOFMT (-6) /* unknown format */
+#define AVERROR_NOTSUPP (-7) /* operation not supported */
+
+int av_find_stream_info(AVFormatContext *ic);
+int av_read_packet(AVFormatContext *s, AVPacket *pkt);
+int av_read_frame(AVFormatContext *s, AVPacket *pkt);
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
+int av_read_play(AVFormatContext *s);
+int av_read_pause(AVFormatContext *s);
+void av_close_input_file(AVFormatContext *s);
+AVStream *av_new_stream(AVFormatContext *s, int id);
+void av_set_pts_info(AVStream *s, int pts_wrap_bits,
+ int pts_num, int pts_den);
+
+#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward
+#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes
+#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non keyframes
+
+int av_find_default_stream_index(AVFormatContext *s);
+int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
+int av_add_index_entry(AVStream *st,
+ int64_t pos, int64_t timestamp, int size, int distance, int flags);
+int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags);
+void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
+int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
+
+/* media file output */
+int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
+int av_write_header(AVFormatContext *s);
+int av_write_frame(AVFormatContext *s, AVPacket *pkt);
+int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush);
+
+int av_write_trailer(AVFormatContext *s);
+
+void dump_format(AVFormatContext *ic,
+ int index,
+ const char *url,
+ int is_output);
+int parse_image_size(int *width_ptr, int *height_ptr, const char *str);
+int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg);
+int64_t parse_date(const char *datestr, int duration);
+
+int64_t av_gettime(void);
+
+/* ffm specific for ffserver */
+#define FFM_PACKET_SIZE 4096
+offset_t ffm_read_write_index(int fd);
+void ffm_write_write_index(int fd, offset_t pos);
+void ffm_set_write_index(AVFormatContext *s, offset_t pos, offset_t file_size);
+
+int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+int av_get_frame_filename(char *buf, int buf_size,
+ const char *path, int number);
+int av_filename_number_test(const char *filename);
+
+/* grab specific */
+int video_grab_init(void);
+int audio_init(void);
+
+/* DV1394 */
+int dv1394_init(void);
+int dc1394_init(void);
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "os_support.h"
+
+int strstart(const char *str, const char *val, const char **ptr);
+int stristart(const char *str, const char *val, const char **ptr);
+void pstrcpy(char *buf, int buf_size, const char *str);
+char *pstrcat(char *buf, int buf_size, const char *s);
+
+void __dynarray_add(unsigned long **tab_ptr, int *nb_ptr, unsigned long elem);
+
+#ifdef __GNUC__
+#define dynarray_add(tab, nb_ptr, elem)\
+do {\
+ typeof(tab) _tab = (tab);\
+ typeof(elem) _elem = (elem);\
+ (void)sizeof(**_tab == _elem); /* check that types are compatible */\
+ __dynarray_add((unsigned long **)_tab, nb_ptr, (unsigned long)_elem);\
+} while(0)
+#else
+#define dynarray_add(tab, nb_ptr, elem)\
+do {\
+ __dynarray_add((unsigned long **)(tab), nb_ptr, (unsigned long)(elem));\
+} while(0)
+#endif
+
+time_t mktimegm(struct tm *tm);
+struct tm *brktimegm(time_t secs, struct tm *tm);
+const char *small_strptime(const char *p, const char *fmt,
+ struct tm *dt);
+
+struct in_addr;
+int resolve_host(struct in_addr *sin_addr, const char *hostname);
+
+void url_split(char *proto, int proto_size,
+ char *authorization, int authorization_size,
+ char *hostname, int hostname_size,
+ int *port_ptr,
+ char *path, int path_size,
+ const char *url);
+
+int match_ext(const char *filename, const char *extensions);
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVFORMAT_H */
+
diff --git a/contrib/ffmpeg/libavformat/avi.h b/contrib/ffmpeg/libavformat/avi.h
new file mode 100644
index 000000000..2c360689b
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avi.h
@@ -0,0 +1,39 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_AVI_H
+#define FFMPEG_AVI_H
+
+#include "avcodec.h"
+
+#define AVIF_HASINDEX 0x00000010 // Index at end of file?
+#define AVIF_MUSTUSEINDEX 0x00000020
+#define AVIF_ISINTERLEAVED 0x00000100
+#define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames?
+#define AVIF_WASCAPTUREFILE 0x00010000
+#define AVIF_COPYRIGHTED 0x00020000
+
+#define AVI_MAX_RIFF_SIZE 0x40000000LL
+#define AVI_MASTER_INDEX_SIZE 256
+
+/* index flags */
+#define AVIIF_INDEX 0x10
+
+#endif /* FFMPEG_AVI_H */
diff --git a/contrib/ffmpeg/libavformat/avidec.c b/contrib/ffmpeg/libavformat/avidec.c
new file mode 100644
index 000000000..d1af79fa3
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avidec.c
@@ -0,0 +1,989 @@
+/*
+ * AVI demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "avi.h"
+#include "dv.h"
+#include "riff.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+//#define DEBUG
+//#define DEBUG_SEEK
+
+typedef struct AVIStream {
+ int64_t frame_offset; /* current frame (video) or byte (audio) counter
+ (used to compute the pts) */
+ int remaining;
+ int packet_size;
+
+ int scale;
+ int rate;
+ int sample_size; /* size of one sample (or packet) (in the rate/scale sense) in bytes */
+
+ int64_t cum_len; /* temporary storage (used during seek) */
+
+ int prefix; ///< normally 'd'<<8 + 'c' or 'w'<<8 + 'b'
+ int prefix_count;
+} AVIStream;
+
+typedef struct {
+ int64_t riff_end;
+ int64_t movi_end;
+ offset_t movi_list;
+ int index_loaded;
+ int is_odml;
+ int non_interleaved;
+ int stream_index;
+ DVDemuxContext* dv_demux;
+} AVIContext;
+
+static int avi_load_index(AVFormatContext *s);
+static int guess_ni_flag(AVFormatContext *s);
+
+#ifdef DEBUG
+static void print_tag(const char *str, unsigned int tag, int size)
+{
+ printf("%s: tag=%c%c%c%c size=0x%x\n",
+ str, tag & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff,
+ size);
+}
+#endif
+
+static int get_riff(AVIContext *avi, ByteIOContext *pb)
+{
+ uint32_t tag;
+ /* check RIFF header */
+ tag = get_le32(pb);
+
+ if (tag != MKTAG('R', 'I', 'F', 'F'))
+ return -1;
+ avi->riff_end = get_le32(pb); /* RIFF chunk size */
+ avi->riff_end += url_ftell(pb); /* RIFF chunk end */
+ tag = get_le32(pb);
+ if (tag != MKTAG('A', 'V', 'I', ' ') && tag != MKTAG('A', 'V', 'I', 'X'))
+ return -1;
+
+ return 0;
+}
+
+static int read_braindead_odml_indx(AVFormatContext *s, int frame_num){
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int longs_pre_entry= get_le16(pb);
+ int index_sub_type = get_byte(pb);
+ int index_type = get_byte(pb);
+ int entries_in_use = get_le32(pb);
+ int chunk_id = get_le32(pb);
+ int64_t base = get_le64(pb);
+ int stream_id= 10*((chunk_id&0xFF) - '0') + (((chunk_id>>8)&0xFF) - '0');
+ AVStream *st;
+ AVIStream *ast;
+ int i;
+ int64_t last_pos= -1;
+ int64_t filesize= url_fsize(&s->pb);
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_ERROR, "longs_pre_entry:%d index_type:%d entries_in_use:%d chunk_id:%X base:%16"PRIX64"\n",
+ longs_pre_entry,index_type, entries_in_use, chunk_id, base);
+#endif
+
+ if(stream_id > s->nb_streams || stream_id < 0)
+ return -1;
+ st= s->streams[stream_id];
+ ast = st->priv_data;
+
+ if(index_sub_type)
+ return -1;
+
+ get_le32(pb);
+
+ if(index_type && longs_pre_entry != 2)
+ return -1;
+ if(index_type>1)
+ return -1;
+
+ if(filesize > 0 && base >= filesize){
+ av_log(s, AV_LOG_ERROR, "ODML index invalid\n");
+ if(base>>32 == (base & 0xFFFFFFFF) && (base & 0xFFFFFFFF) < filesize && filesize <= 0xFFFFFFFF)
+ base &= 0xFFFFFFFF;
+ else
+ return -1;
+ }
+
+ for(i=0; i<entries_in_use; i++){
+ if(index_type){
+ int64_t pos= get_le32(pb) + base - 8;
+ int len = get_le32(pb);
+ int key= len >= 0;
+ len &= 0x7FFFFFFF;
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_ERROR, "pos:%"PRId64", len:%X\n", pos, len);
+#endif
+ if(last_pos == pos || pos == base - 8)
+ avi->non_interleaved= 1;
+ else
+ av_add_index_entry(st, pos, ast->cum_len / FFMAX(1, ast->sample_size), len, 0, key ? AVINDEX_KEYFRAME : 0);
+
+ if(ast->sample_size)
+ ast->cum_len += len;
+ else
+ ast->cum_len ++;
+ last_pos= pos;
+ }else{
+ int64_t offset, pos;
+ int duration;
+ offset = get_le64(pb);
+ get_le32(pb); /* size */
+ duration = get_le32(pb);
+ pos = url_ftell(pb);
+
+ url_fseek(pb, offset+8, SEEK_SET);
+ read_braindead_odml_indx(s, frame_num);
+ frame_num += duration;
+
+ url_fseek(pb, pos, SEEK_SET);
+ }
+ }
+ avi->index_loaded=1;
+ return 0;
+}
+
+static void clean_index(AVFormatContext *s){
+ int i;
+ int64_t j;
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ AVIStream *ast = st->priv_data;
+ int n= st->nb_index_entries;
+ int max= ast->sample_size;
+ int64_t pos, size, ts;
+
+ if(n != 1 || ast->sample_size==0)
+ continue;
+
+ while(max < 1024) max+=max;
+
+ pos= st->index_entries[0].pos;
+ size= st->index_entries[0].size;
+ ts= st->index_entries[0].timestamp;
+
+ for(j=0; j<size; j+=max){
+ av_add_index_entry(st, pos+j, ts + j/ast->sample_size, FFMIN(max, size-j), 0, AVINDEX_KEYFRAME);
+ }
+ }
+}
+
+static int avi_read_tag(ByteIOContext *pb, char *buf, int maxlen, unsigned int size)
+{
+ offset_t i = url_ftell(pb);
+ size += (size & 1);
+ get_strz(pb, buf, maxlen);
+ url_fseek(pb, i+size, SEEK_SET);
+ return 0;
+}
+
+static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t tag, tag1, handler;
+ int codec_type, stream_index, frame_period, bit_rate;
+ unsigned int size, nb_frames;
+ int i, n;
+ AVStream *st;
+ AVIStream *ast = NULL;
+ char str_track[4];
+
+ avi->stream_index= -1;
+
+ if (get_riff(avi, pb) < 0)
+ return -1;
+
+ /* first list tag */
+ stream_index = -1;
+ codec_type = -1;
+ frame_period = 0;
+ for(;;) {
+ if (url_feof(pb))
+ goto fail;
+ tag = get_le32(pb);
+ size = get_le32(pb);
+#ifdef DEBUG
+ print_tag("tag", tag, size);
+#endif
+
+ switch(tag) {
+ case MKTAG('L', 'I', 'S', 'T'):
+ /* ignored, except when start of video packets */
+ tag1 = get_le32(pb);
+#ifdef DEBUG
+ print_tag("list", tag1, 0);
+#endif
+ if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
+ avi->movi_list = url_ftell(pb) - 4;
+ if(size) avi->movi_end = avi->movi_list + size + (size & 1);
+ else avi->movi_end = url_fsize(pb);
+#ifdef DEBUG
+ printf("movi end=%"PRIx64"\n", avi->movi_end);
+#endif
+ goto end_of_header;
+ }
+ break;
+ case MKTAG('d', 'm', 'l', 'h'):
+ avi->is_odml = 1;
+ url_fskip(pb, size + (size & 1));
+ break;
+ case MKTAG('a', 'v', 'i', 'h'):
+ /* avi header */
+ /* using frame_period is bad idea */
+ frame_period = get_le32(pb);
+ bit_rate = get_le32(pb) * 8;
+ get_le32(pb);
+ avi->non_interleaved |= get_le32(pb) & AVIF_MUSTUSEINDEX;
+
+ url_fskip(pb, 2 * 4);
+ n = get_le32(pb);
+ for(i=0;i<n;i++) {
+ AVIStream *ast;
+ st = av_new_stream(s, i);
+ if (!st)
+ goto fail;
+
+ ast = av_mallocz(sizeof(AVIStream));
+ if (!ast)
+ goto fail;
+ st->priv_data = ast;
+ }
+ url_fskip(pb, size - 7 * 4);
+ break;
+ case MKTAG('s', 't', 'r', 'h'):
+ /* stream header */
+ stream_index++;
+ tag1 = get_le32(pb);
+ handler = get_le32(pb); /* codec tag */
+#ifdef DEBUG
+ print_tag("strh", tag1, -1);
+#endif
+ if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){
+ /*
+ * After some consideration -- I don't think we
+ * have to support anything but DV in a type1 AVIs.
+ */
+ if (s->nb_streams != 1)
+ goto fail;
+
+ if (handler != MKTAG('d', 'v', 's', 'd') &&
+ handler != MKTAG('d', 'v', 'h', 'd') &&
+ handler != MKTAG('d', 'v', 's', 'l'))
+ goto fail;
+
+ ast = s->streams[0]->priv_data;
+ av_freep(&s->streams[0]->codec->extradata);
+ av_freep(&s->streams[0]);
+ s->nb_streams = 0;
+ if (ENABLE_DV_DEMUXER) {
+ avi->dv_demux = dv_init_demux(s);
+ if (!avi->dv_demux)
+ goto fail;
+ }
+ s->streams[0]->priv_data = ast;
+ url_fskip(pb, 3 * 4);
+ ast->scale = get_le32(pb);
+ ast->rate = get_le32(pb);
+ stream_index = s->nb_streams - 1;
+ url_fskip(pb, size - 7*4);
+ break;
+ }
+
+ if (stream_index >= s->nb_streams) {
+ url_fskip(pb, size - 8);
+ /* ignore padding stream */
+ if (tag1 == MKTAG('p', 'a', 'd', 's'))
+ stream_index--;
+ break;
+ }
+ st = s->streams[stream_index];
+ ast = st->priv_data;
+ st->codec->stream_codec_tag= handler;
+
+ get_le32(pb); /* flags */
+ get_le16(pb); /* priority */
+ get_le16(pb); /* language */
+ get_le32(pb); /* initial frame */
+ ast->scale = get_le32(pb);
+ ast->rate = get_le32(pb);
+ if(ast->scale && ast->rate){
+ }else if(frame_period){
+ ast->rate = 1000000;
+ ast->scale = frame_period;
+ }else{
+ ast->rate = 25;
+ ast->scale = 1;
+ }
+ av_set_pts_info(st, 64, ast->scale, ast->rate);
+
+ ast->cum_len=get_le32(pb); /* start */
+ nb_frames = get_le32(pb);
+
+ st->start_time = 0;
+ st->duration = nb_frames;
+ get_le32(pb); /* buffer size */
+ get_le32(pb); /* quality */
+ ast->sample_size = get_le32(pb); /* sample ssize */
+ ast->cum_len *= FFMAX(1, ast->sample_size);
+// av_log(NULL, AV_LOG_DEBUG, "%d %d %d %d\n", ast->rate, ast->scale, ast->start, ast->sample_size);
+
+ switch(tag1) {
+ case MKTAG('v', 'i', 'd', 's'):
+ codec_type = CODEC_TYPE_VIDEO;
+
+ ast->sample_size = 0;
+ break;
+ case MKTAG('a', 'u', 'd', 's'):
+ codec_type = CODEC_TYPE_AUDIO;
+ break;
+ case MKTAG('t', 'x', 't', 's'):
+ //FIXME
+ codec_type = CODEC_TYPE_DATA; //CODEC_TYPE_SUB ? FIXME
+ break;
+ case MKTAG('p', 'a', 'd', 's'):
+ codec_type = CODEC_TYPE_UNKNOWN;
+ stream_index--;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
+ goto fail;
+ }
+ ast->frame_offset= ast->cum_len;
+ url_fskip(pb, size - 12 * 4);
+ break;
+ case MKTAG('s', 't', 'r', 'f'):
+ /* stream header */
+ if (stream_index >= s->nb_streams || avi->dv_demux) {
+ url_fskip(pb, size);
+ } else {
+ st = s->streams[stream_index];
+ switch(codec_type) {
+ case CODEC_TYPE_VIDEO:
+ get_le32(pb); /* size */
+ st->codec->width = get_le32(pb);
+ st->codec->height = get_le32(pb);
+ get_le16(pb); /* panes */
+ st->codec->bits_per_sample= get_le16(pb); /* depth */
+ tag1 = get_le32(pb);
+ get_le32(pb); /* ImageSize */
+ get_le32(pb); /* XPelsPerMeter */
+ get_le32(pb); /* YPelsPerMeter */
+ get_le32(pb); /* ClrUsed */
+ get_le32(pb); /* ClrImportant */
+
+ if(size > 10*4 && size<(1<<30)){
+ st->codec->extradata_size= size - 10*4;
+ st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
+ }
+
+ if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
+ get_byte(pb);
+
+ /* Extract palette from extradata if bpp <= 8 */
+ /* This code assumes that extradata contains only palette */
+ /* This is true for all paletted codecs implemented in ffmpeg */
+ if (st->codec->extradata_size && (st->codec->bits_per_sample <= 8)) {
+ st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
+#ifdef WORDS_BIGENDIAN
+ for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
+ st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
+#else
+ memcpy(st->codec->palctrl->palette, st->codec->extradata,
+ FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
+#endif
+ st->codec->palctrl->palette_changed = 1;
+ }
+
+#ifdef DEBUG
+ print_tag("video", tag1, 0);
+#endif
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_tag = tag1;
+ st->codec->codec_id = codec_get_id(codec_bmp_tags, tag1);
+ st->need_parsing = 2; //only parse headers dont do slower repacketization, this is needed to get the pict type which is needed for generating correct pts
+// url_fskip(pb, size - 5 * 4);
+ break;
+ case CODEC_TYPE_AUDIO:
+ get_wav_header(pb, st->codec, size);
+ if(ast->sample_size && st->codec->block_align && ast->sample_size % st->codec->block_align)
+ av_log(s, AV_LOG_DEBUG, "invalid sample size or block align detected\n");
+ if (size%2) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
+ url_fskip(pb, 1);
+ /* Force parsing as several audio frames can be in
+ * one packet. */
+ st->need_parsing = 1;
+ /* ADTS header is in extradata, AAC without header must be stored as exact frames, parser not needed and it will fail */
+ if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size)
+ st->need_parsing = 0;
+ /* AVI files with Xan DPCM audio (wrongly) declare PCM
+ * audio in the header but have Axan as stream_code_tag. */
+ if (st->codec->stream_codec_tag == ff_get_fourcc("Axan")){
+ st->codec->codec_id = CODEC_ID_XAN_DPCM;
+ st->codec->codec_tag = 0;
+ }
+ break;
+ default:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_id= CODEC_ID_NONE;
+ st->codec->codec_tag= 0;
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ break;
+ case MKTAG('i', 'n', 'd', 'x'):
+ i= url_ftell(pb);
+ if(!url_is_streamed(pb) && !(s->flags & AVFMT_FLAG_IGNIDX)){
+ read_braindead_odml_indx(s, 0);
+ }
+ url_fseek(pb, i+size, SEEK_SET);
+ break;
+ case MKTAG('I', 'N', 'A', 'M'):
+ avi_read_tag(pb, s->title, sizeof(s->title), size);
+ break;
+ case MKTAG('I', 'A', 'R', 'T'):
+ avi_read_tag(pb, s->author, sizeof(s->author), size);
+ break;
+ case MKTAG('I', 'C', 'O', 'P'):
+ avi_read_tag(pb, s->copyright, sizeof(s->copyright), size);
+ break;
+ case MKTAG('I', 'C', 'M', 'T'):
+ avi_read_tag(pb, s->comment, sizeof(s->comment), size);
+ break;
+ case MKTAG('I', 'G', 'N', 'R'):
+ avi_read_tag(pb, s->genre, sizeof(s->genre), size);
+ break;
+ case MKTAG('I', 'P', 'R', 'D'):
+ avi_read_tag(pb, s->album, sizeof(s->album), size);
+ break;
+ case MKTAG('I', 'P', 'R', 'T'):
+ avi_read_tag(pb, str_track, sizeof(str_track), size);
+ sscanf(str_track, "%d", &s->track);
+ break;
+ default:
+ /* skip tag */
+ size += (size & 1);
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ end_of_header:
+ /* check stream number */
+ if (stream_index != s->nb_streams - 1) {
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ av_freep(&s->streams[i]->codec->extradata);
+ av_freep(&s->streams[i]);
+ }
+ return -1;
+ }
+
+ if(!avi->index_loaded && !url_is_streamed(pb))
+ avi_load_index(s);
+ avi->index_loaded = 1;
+ avi->non_interleaved |= guess_ni_flag(s);
+ if(avi->non_interleaved)
+ clean_index(s);
+
+ return 0;
+}
+
+static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int n, d[8], size;
+ offset_t i, sync;
+ void* dstr;
+
+ if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ size = dv_get_packet(avi->dv_demux, pkt);
+ if (size >= 0)
+ return size;
+ }
+
+ if(avi->non_interleaved){
+ int best_stream_index = 0;
+ AVStream *best_st= NULL;
+ AVIStream *best_ast;
+ int64_t best_ts= INT64_MAX;
+ int i;
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ AVIStream *ast = st->priv_data;
+ int64_t ts= ast->frame_offset;
+
+ if(ast->sample_size)
+ ts /= ast->sample_size;
+ ts= av_rescale(ts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
+
+// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" %d/%d %"PRId64"\n", ts, st->time_base.num, st->time_base.den, ast->frame_offset);
+ if(ts < best_ts){
+ best_ts= ts;
+ best_st= st;
+ best_stream_index= i;
+ }
+ }
+ best_ast = best_st->priv_data;
+ best_ts= av_rescale(best_ts, best_st->time_base.den, AV_TIME_BASE * (int64_t)best_st->time_base.num); //FIXME a little ugly
+ if(best_ast->remaining)
+ i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
+ else
+ i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
+
+// av_log(NULL, AV_LOG_DEBUG, "%d\n", i);
+ if(i>=0){
+ int64_t pos= best_st->index_entries[i].pos;
+ pos += best_ast->packet_size - best_ast->remaining;
+ url_fseek(&s->pb, pos + 8, SEEK_SET);
+// av_log(NULL, AV_LOG_DEBUG, "pos=%"PRId64"\n", pos);
+
+ assert(best_ast->remaining <= best_ast->packet_size);
+
+ avi->stream_index= best_stream_index;
+ if(!best_ast->remaining)
+ best_ast->packet_size=
+ best_ast->remaining= best_st->index_entries[i].size;
+ }
+ }
+
+resync:
+ if(avi->stream_index >= 0){
+ AVStream *st= s->streams[ avi->stream_index ];
+ AVIStream *ast= st->priv_data;
+ int size;
+
+ if(ast->sample_size <= 1) // minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
+ size= INT_MAX;
+ else if(ast->sample_size < 32)
+ size= 64*ast->sample_size;
+ else
+ size= ast->sample_size;
+
+ if(size > ast->remaining)
+ size= ast->remaining;
+ av_get_packet(pb, pkt, size);
+
+ if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ dstr = pkt->destruct;
+ size = dv_produce_packet(avi->dv_demux, pkt,
+ pkt->data, pkt->size);
+ pkt->destruct = dstr;
+ pkt->flags |= PKT_FLAG_KEY;
+ } else {
+ /* XXX: how to handle B frames in avi ? */
+ pkt->dts = ast->frame_offset;
+// pkt->dts += ast->start;
+ if(ast->sample_size)
+ pkt->dts /= ast->sample_size;
+//av_log(NULL, AV_LOG_DEBUG, "dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d base:%d st:%d size:%d\n", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, avi->stream_index, size);
+ pkt->stream_index = avi->stream_index;
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(st->index_entries){
+ AVIndexEntry *e;
+ int index;
+
+ index= av_index_search_timestamp(st, pkt->dts, 0);
+ e= &st->index_entries[index];
+
+ if(index >= 0 && e->timestamp == ast->frame_offset){
+ if (e->flags & AVINDEX_KEYFRAME)
+ pkt->flags |= PKT_FLAG_KEY;
+ }
+ } else {
+ /* if no index, better to say that all frames
+ are key frames */
+ pkt->flags |= PKT_FLAG_KEY;
+ }
+ } else {
+ pkt->flags |= PKT_FLAG_KEY;
+ }
+ if(ast->sample_size)
+ ast->frame_offset += pkt->size;
+ else
+ ast->frame_offset++;
+ }
+ ast->remaining -= size;
+ if(!ast->remaining){
+ avi->stream_index= -1;
+ ast->packet_size= 0;
+ if (size & 1) {
+ get_byte(pb);
+ size++;
+ }
+ }
+
+ return size;
+ }
+
+ memset(d, -1, sizeof(int)*8);
+ for(i=sync=url_ftell(pb); !url_feof(pb); i++) {
+ int j;
+
+ if (i >= avi->movi_end) {
+ if (avi->is_odml) {
+ url_fskip(pb, avi->riff_end - i);
+ avi->riff_end = avi->movi_end = url_fsize(pb);
+ } else
+ break;
+ }
+
+ for(j=0; j<7; j++)
+ d[j]= d[j+1];
+ d[7]= get_byte(pb);
+
+ size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
+
+ if( d[2] >= '0' && d[2] <= '9'
+ && d[3] >= '0' && d[3] <= '9'){
+ n= (d[2] - '0') * 10 + (d[3] - '0');
+ }else{
+ n= 100; //invalid stream id
+ }
+//av_log(NULL, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
+ if(i + size > avi->movi_end || d[0]<0)
+ continue;
+
+ //parse ix##
+ if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams)
+ //parse JUNK
+ ||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K')){
+ url_fskip(pb, size);
+//av_log(NULL, AV_LOG_DEBUG, "SKIP\n");
+ goto resync;
+ }
+
+ if( d[0] >= '0' && d[0] <= '9'
+ && d[1] >= '0' && d[1] <= '9'){
+ n= (d[0] - '0') * 10 + (d[1] - '0');
+ }else{
+ n= 100; //invalid stream id
+ }
+
+ //parse ##dc/##wb
+ if(n < s->nb_streams){
+ AVStream *st;
+ AVIStream *ast;
+ st = s->streams[n];
+ ast = st->priv_data;
+
+ if( (st->discard >= AVDISCARD_DEFAULT && size==0)
+ /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & PKT_FLAG_KEY))*/ //FIXME needs a little reordering
+ || st->discard >= AVDISCARD_ALL){
+ if(ast->sample_size) ast->frame_offset += pkt->size;
+ else ast->frame_offset++;
+ url_fskip(pb, size);
+ goto resync;
+ }
+
+ if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
+ d[2]*256+d[3] == ast->prefix /*||
+ (d[2] == 'd' && d[3] == 'c') ||
+ (d[2] == 'w' && d[3] == 'b')*/) {
+
+//av_log(NULL, AV_LOG_DEBUG, "OK\n");
+ if(d[2]*256+d[3] == ast->prefix)
+ ast->prefix_count++;
+ else{
+ ast->prefix= d[2]*256+d[3];
+ ast->prefix_count= 0;
+ }
+
+ avi->stream_index= n;
+ ast->packet_size= size + 8;
+ ast->remaining= size;
+ goto resync;
+ }
+ }
+ /* palette changed chunk */
+ if ( d[0] >= '0' && d[0] <= '9'
+ && d[1] >= '0' && d[1] <= '9'
+ && ((d[2] == 'p' && d[3] == 'c'))
+ && n < s->nb_streams && i + size <= avi->movi_end) {
+
+ AVStream *st;
+ int first, clr, flags, k, p;
+
+ st = s->streams[n];
+
+ first = get_byte(pb);
+ clr = get_byte(pb);
+ if(!clr) /* all 256 colors used */
+ clr = 256;
+ flags = get_le16(pb);
+ p = 4;
+ for (k = first; k < clr + first; k++) {
+ int r, g, b;
+ r = get_byte(pb);
+ g = get_byte(pb);
+ b = get_byte(pb);
+ get_byte(pb);
+ st->codec->palctrl->palette[k] = b + (g << 8) + (r << 16);
+ }
+ st->codec->palctrl->palette_changed = 1;
+ goto resync;
+ }
+
+ }
+
+ return -1;
+}
+
+/* XXX: we make the implicit supposition that the position are sorted
+ for each stream */
+static int avi_read_idx1(AVFormatContext *s, int size)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int nb_index_entries, i;
+ AVStream *st;
+ AVIStream *ast;
+ unsigned int index, tag, flags, pos, len;
+ unsigned last_pos= -1;
+
+ nb_index_entries = size / 16;
+ if (nb_index_entries <= 0)
+ return -1;
+
+ /* read the entries and sort them in each stream component */
+ for(i = 0; i < nb_index_entries; i++) {
+ tag = get_le32(pb);
+ flags = get_le32(pb);
+ pos = get_le32(pb);
+ len = get_le32(pb);
+#if defined(DEBUG_SEEK)
+ av_log(NULL, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
+ i, tag, flags, pos, len);
+#endif
+ if(i==0 && pos > avi->movi_list)
+ avi->movi_list= 0; //FIXME better check
+ pos += avi->movi_list;
+
+ index = ((tag & 0xff) - '0') * 10;
+ index += ((tag >> 8) & 0xff) - '0';
+ if (index >= s->nb_streams)
+ continue;
+ st = s->streams[index];
+ ast = st->priv_data;
+
+#if defined(DEBUG_SEEK)
+ av_log(NULL, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
+#endif
+ if(last_pos == pos)
+ avi->non_interleaved= 1;
+ else
+ av_add_index_entry(st, pos, ast->cum_len / FFMAX(1, ast->sample_size), len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
+ if(ast->sample_size)
+ ast->cum_len += len;
+ else
+ ast->cum_len ++;
+ last_pos= pos;
+ }
+ return 0;
+}
+
+static int guess_ni_flag(AVFormatContext *s){
+ int i;
+ int64_t last_start=0;
+ int64_t first_end= INT64_MAX;
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ int n= st->nb_index_entries;
+
+ if(n <= 0)
+ continue;
+
+ if(st->index_entries[0].pos > last_start)
+ last_start= st->index_entries[0].pos;
+ if(st->index_entries[n-1].pos < first_end)
+ first_end= st->index_entries[n-1].pos;
+ }
+ return last_start > first_end;
+}
+
+static int avi_load_index(AVFormatContext *s)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t tag, size;
+ offset_t pos= url_ftell(pb);
+
+ url_fseek(pb, avi->movi_end, SEEK_SET);
+#ifdef DEBUG_SEEK
+ printf("movi_end=0x%"PRIx64"\n", avi->movi_end);
+#endif
+ for(;;) {
+ if (url_feof(pb))
+ break;
+ tag = get_le32(pb);
+ size = get_le32(pb);
+#ifdef DEBUG_SEEK
+ printf("tag=%c%c%c%c size=0x%x\n",
+ tag & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff,
+ size);
+#endif
+ switch(tag) {
+ case MKTAG('i', 'd', 'x', '1'):
+ if (avi_read_idx1(s, size) < 0)
+ goto skip;
+ else
+ goto the_end;
+ break;
+ default:
+ skip:
+ size += (size & 1);
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ the_end:
+ url_fseek(pb, pos, SEEK_SET);
+ return 0;
+}
+
+static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ AVIContext *avi = s->priv_data;
+ AVStream *st;
+ int i, index;
+ int64_t pos;
+
+ if (!avi->index_loaded) {
+ /* we only load the index on demand */
+ avi_load_index(s);
+ avi->index_loaded = 1;
+ }
+ assert(stream_index>= 0);
+
+ st = s->streams[stream_index];
+ index= av_index_search_timestamp(st, timestamp, flags);
+ if(index<0)
+ return -1;
+
+ /* find the position */
+ pos = st->index_entries[index].pos;
+ timestamp = st->index_entries[index].timestamp;
+
+// av_log(NULL, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
+
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st2 = s->streams[i];
+ AVIStream *ast2 = st2->priv_data;
+
+ ast2->packet_size=
+ ast2->remaining= 0;
+
+ if (st2->nb_index_entries <= 0)
+ continue;
+
+// assert(st2->codec->block_align);
+ assert(st2->time_base.den == ast2->rate);
+ assert(st2->time_base.num == ast2->scale);
+ index = av_index_search_timestamp(
+ st2,
+ av_rescale(timestamp, st2->time_base.den*(int64_t)st->time_base.num, st->time_base.den * (int64_t)st2->time_base.num),
+ flags | AVSEEK_FLAG_BACKWARD);
+ if(index<0)
+ index=0;
+
+ if(!avi->non_interleaved){
+ while(index>0 && st2->index_entries[index].pos > pos)
+ index--;
+ while(index+1 < st2->nb_index_entries && st2->index_entries[index].pos < pos)
+ index++;
+ }
+
+// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
+ /* extract the current frame number */
+ ast2->frame_offset = st2->index_entries[index].timestamp;
+ if(ast2->sample_size)
+ ast2->frame_offset *=ast2->sample_size;
+ }
+
+ if (ENABLE_DV_DEMUXER && avi->dv_demux)
+ dv_flush_audio_packets(avi->dv_demux);
+ /* do the seek */
+ url_fseek(&s->pb, pos, SEEK_SET);
+ avi->stream_index= -1;
+ return 0;
+}
+
+static int avi_read_close(AVFormatContext *s)
+{
+ int i;
+ AVIContext *avi = s->priv_data;
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ AVIStream *ast = st->priv_data;
+ av_free(ast);
+ av_free(st->codec->palctrl);
+ }
+
+ if (avi->dv_demux)
+ av_free(avi->dv_demux);
+
+ return 0;
+}
+
+static int avi_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
+ p->buf[2] == 'F' && p->buf[3] == 'F' &&
+ p->buf[8] == 'A' && p->buf[9] == 'V' &&
+ p->buf[10] == 'I' && p->buf[11] == ' ')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVInputFormat avi_demuxer = {
+ "avi",
+ "avi format",
+ sizeof(AVIContext),
+ avi_probe,
+ avi_read_header,
+ avi_read_packet,
+ avi_read_close,
+ avi_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/avienc.c b/contrib/ffmpeg/libavformat/avienc.c
new file mode 100644
index 000000000..296608704
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avienc.c
@@ -0,0 +1,580 @@
+/*
+ * AVI muxer
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "avi.h"
+#include "riff.h"
+
+/*
+ * TODO:
+ * - fill all fields if non streamed (nb_frames for example)
+ */
+
+#ifdef CONFIG_AVI_MUXER
+typedef struct AVIIentry {
+ unsigned int flags, pos, len;
+} AVIIentry;
+
+#define AVI_INDEX_CLUSTER_SIZE 16384
+
+typedef struct AVIIndex {
+ offset_t indx_start;
+ int entry;
+ int ents_allocated;
+ AVIIentry** cluster;
+} AVIIndex;
+
+typedef struct {
+ offset_t riff_start, movi_list, odml_list;
+ offset_t frames_hdr_all, frames_hdr_strm[MAX_STREAMS];
+ int audio_strm_length[MAX_STREAMS];
+ int riff_id;
+ int packet_count[MAX_STREAMS];
+
+ AVIIndex indexes[MAX_STREAMS];
+} AVIContext;
+
+static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
+{
+ int cl = ent_id / AVI_INDEX_CLUSTER_SIZE;
+ int id = ent_id % AVI_INDEX_CLUSTER_SIZE;
+ return &idx->cluster[cl][id];
+}
+
+static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
+ const char* riff_tag, const char* list_tag)
+{
+ offset_t loff;
+ int i;
+
+ avi->riff_id++;
+ for (i=0; i<MAX_STREAMS; i++)
+ avi->indexes[i].entry = 0;
+
+ avi->riff_start = start_tag(pb, "RIFF");
+ put_tag(pb, riff_tag);
+ loff = start_tag(pb, "LIST");
+ put_tag(pb, list_tag);
+ return loff;
+}
+
+static char* avi_stream2fourcc(char* tag, int index, enum CodecType type)
+{
+ tag[0] = '0';
+ tag[1] = '0' + index;
+ if (type == CODEC_TYPE_VIDEO) {
+ tag[2] = 'd';
+ tag[3] = 'c';
+ } else {
+ tag[2] = 'w';
+ tag[3] = 'b';
+ }
+ tag[4] = '\0';
+ return tag;
+}
+
+static void avi_write_info_tag(ByteIOContext *pb, const char *tag, const char *str)
+{
+ int len = strlen(str);
+ if (len > 0) {
+ len++;
+ put_tag(pb, tag);
+ put_le32(pb, len);
+ put_strz(pb, str);
+ if (len & 1)
+ put_byte(pb, 0);
+ }
+}
+
+static int avi_write_counters(AVFormatContext* s, int riff_id)
+{
+ ByteIOContext *pb = &s->pb;
+ AVIContext *avi = s->priv_data;
+ int n, au_byterate, au_ssize, au_scale, nb_frames = 0;
+ offset_t file_size;
+ AVCodecContext* stream;
+
+ file_size = url_ftell(pb);
+ for(n = 0; n < s->nb_streams; n++) {
+ assert(avi->frames_hdr_strm[n]);
+ stream = s->streams[n]->codec;
+ url_fseek(pb, avi->frames_hdr_strm[n], SEEK_SET);
+ ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
+ if(au_ssize == 0) {
+ put_le32(pb, avi->packet_count[n]);
+ } else {
+ put_le32(pb, avi->audio_strm_length[n] / au_ssize);
+ }
+ if(stream->codec_type == CODEC_TYPE_VIDEO)
+ nb_frames = FFMAX(nb_frames, avi->packet_count[n]);
+ }
+ if(riff_id == 1) {
+ assert(avi->frames_hdr_all);
+ url_fseek(pb, avi->frames_hdr_all, SEEK_SET);
+ put_le32(pb, nb_frames);
+ }
+ url_fseek(pb, file_size, SEEK_SET);
+
+ return 0;
+}
+
+static int avi_write_header(AVFormatContext *s)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
+ AVCodecContext *stream, *video_enc;
+ offset_t list1, list2, strh, strf;
+
+ /* header list */
+ avi->riff_id = 0;
+ list1 = avi_start_new_riff(avi, pb, "AVI ", "hdrl");
+
+ /* avi header */
+ put_tag(pb, "avih");
+ put_le32(pb, 14 * 4);
+ bitrate = 0;
+
+ video_enc = NULL;
+ for(n=0;n<s->nb_streams;n++) {
+ stream = s->streams[n]->codec;
+ bitrate += stream->bit_rate;
+ if (stream->codec_type == CODEC_TYPE_VIDEO)
+ video_enc = stream;
+ }
+
+ nb_frames = 0;
+
+ if(video_enc){
+ put_le32(pb, (uint32_t)(int64_t_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
+ } else {
+ put_le32(pb, 0);
+ }
+ put_le32(pb, bitrate / 8); /* XXX: not quite exact */
+ put_le32(pb, 0); /* padding */
+ if (url_is_streamed(pb))
+ put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
+ else
+ put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
+ avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
+ put_le32(pb, nb_frames); /* nb frames, filled later */
+ put_le32(pb, 0); /* initial frame */
+ put_le32(pb, s->nb_streams); /* nb streams */
+ put_le32(pb, 1024 * 1024); /* suggested buffer size */
+ if(video_enc){
+ put_le32(pb, video_enc->width);
+ put_le32(pb, video_enc->height);
+ } else {
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+ }
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+
+ /* stream list */
+ for(i=0;i<n;i++) {
+ list2 = start_tag(pb, "LIST");
+ put_tag(pb, "strl");
+
+ stream = s->streams[i]->codec;
+
+ /* FourCC should really be set by the codec itself */
+ if (! stream->codec_tag) {
+ stream->codec_tag = codec_get_bmp_tag(stream->codec_id);
+ }
+
+ /* stream generic header */
+ strh = start_tag(pb, "strh");
+ switch(stream->codec_type) {
+ case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); break;
+ case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); break;
+// case CODEC_TYPE_TEXT : put_tag(pb, "txts"); break;
+ case CODEC_TYPE_DATA : put_tag(pb, "dats"); break;
+ }
+ if(stream->codec_type == CODEC_TYPE_VIDEO)
+ put_le32(pb, stream->codec_tag);
+ else
+ put_le32(pb, 1);
+ put_le32(pb, 0); /* flags */
+ put_le16(pb, 0); /* priority */
+ put_le16(pb, 0); /* language */
+ put_le32(pb, 0); /* initial frame */
+
+ ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
+
+ put_le32(pb, au_scale); /* scale */
+ put_le32(pb, au_byterate); /* rate */
+ av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
+
+ put_le32(pb, 0); /* start */
+ avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */
+ if (url_is_streamed(pb))
+ put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
+ else
+ put_le32(pb, 0); /* length, XXX: filled later */
+
+ /* suggested buffer size */ //FIXME set at the end to largest chunk
+ if(stream->codec_type == CODEC_TYPE_VIDEO)
+ put_le32(pb, 1024 * 1024);
+ else if(stream->codec_type == CODEC_TYPE_AUDIO)
+ put_le32(pb, 12 * 1024);
+ else
+ put_le32(pb, 0);
+ put_le32(pb, -1); /* quality */
+ put_le32(pb, au_ssize); /* sample size */
+ put_le32(pb, 0);
+ put_le16(pb, stream->width);
+ put_le16(pb, stream->height);
+ end_tag(pb, strh);
+
+ if(stream->codec_type != CODEC_TYPE_DATA){
+ strf = start_tag(pb, "strf");
+ switch(stream->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ put_bmp_header(pb, stream, codec_bmp_tags, 0);
+ break;
+ case CODEC_TYPE_AUDIO:
+ if (put_wav_header(pb, stream) < 0) {
+ av_free(avi);
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ end_tag(pb, strf);
+ }
+
+ if (!url_is_streamed(pb)) {
+ unsigned char tag[5];
+ int j;
+
+ /* Starting to lay out AVI OpenDML master index.
+ * We want to make it JUNK entry for now, since we'd
+ * like to get away without making AVI an OpenDML one
+ * for compatibility reasons.
+ */
+ avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0;
+ avi->indexes[i].indx_start = start_tag(pb, "JUNK");
+ put_le16(pb, 4); /* wLongsPerEntry */
+ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
+ put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
+ put_le32(pb, 0); /* nEntriesInUse (will fill out later on) */
+ put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
+ /* dwChunkId */
+ put_le64(pb, 0); /* dwReserved[3]
+ put_le32(pb, 0); Must be 0. */
+ for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
+ put_le64(pb, 0);
+ end_tag(pb, avi->indexes[i].indx_start);
+ }
+
+ end_tag(pb, list2);
+ }
+
+ if (!url_is_streamed(pb)) {
+ /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
+ avi->odml_list = start_tag(pb, "JUNK");
+ put_tag(pb, "odml");
+ put_tag(pb, "dmlh");
+ put_le32(pb, 248);
+ for (i = 0; i < 248; i+= 4)
+ put_le32(pb, 0);
+ end_tag(pb, avi->odml_list);
+ }
+
+ end_tag(pb, list1);
+
+ list2 = start_tag(pb, "LIST");
+ put_tag(pb, "INFO");
+ avi_write_info_tag(pb, "INAM", s->title);
+ avi_write_info_tag(pb, "IART", s->author);
+ avi_write_info_tag(pb, "ICOP", s->copyright);
+ avi_write_info_tag(pb, "ICMT", s->comment);
+ avi_write_info_tag(pb, "IPRD", s->album);
+ avi_write_info_tag(pb, "IGNR", s->genre);
+ if (s->track) {
+ char str_track[4];
+ snprintf(str_track, 4, "%d", s->track);
+ avi_write_info_tag(pb, "IPRT", str_track);
+ }
+ if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
+ avi_write_info_tag(pb, "ISFT", LIBAVFORMAT_IDENT);
+ end_tag(pb, list2);
+
+ /* some padding for easier tag editing */
+ list2 = start_tag(pb, "JUNK");
+ for (i = 0; i < 1016; i += 4)
+ put_le32(pb, 0);
+ end_tag(pb, list2);
+
+ avi->movi_list = start_tag(pb, "LIST");
+ put_tag(pb, "movi");
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int avi_write_ix(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AVIContext *avi = s->priv_data;
+ char tag[5];
+ char ix_tag[] = "ix00";
+ int i, j;
+
+ assert(!url_is_streamed(pb));
+
+ if (avi->riff_id > AVI_MASTER_INDEX_SIZE)
+ return -1;
+
+ for (i=0;i<s->nb_streams;i++) {
+ offset_t ix, pos;
+
+ avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type);
+ ix_tag[3] = '0' + i;
+
+ /* Writing AVI OpenDML leaf index chunk */
+ ix = url_ftell(pb);
+ put_tag(pb, &ix_tag[0]); /* ix?? */
+ put_le32(pb, avi->indexes[i].entry * 8 + 24);
+ /* chunk size */
+ put_le16(pb, 2); /* wLongsPerEntry */
+ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
+ put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
+ put_le32(pb, avi->indexes[i].entry);
+ /* nEntriesInUse */
+ put_tag(pb, &tag[0]); /* dwChunkId */
+ put_le64(pb, avi->movi_list);/* qwBaseOffset */
+ put_le32(pb, 0); /* dwReserved_3 (must be 0) */
+
+ for (j=0; j<avi->indexes[i].entry; j++) {
+ AVIIentry* ie = avi_get_ientry(&avi->indexes[i], j);
+ put_le32(pb, ie->pos + 8);
+ put_le32(pb, ((uint32_t)ie->len & ~0x80000000) |
+ (ie->flags & 0x10 ? 0 : 0x80000000));
+ }
+ put_flush_packet(pb);
+ pos = url_ftell(pb);
+
+ /* Updating one entry in the AVI OpenDML master index */
+ url_fseek(pb, avi->indexes[i].indx_start - 8, SEEK_SET);
+ put_tag(pb, "indx"); /* enabling this entry */
+ url_fskip(pb, 8);
+ put_le32(pb, avi->riff_id); /* nEntriesInUse */
+ url_fskip(pb, 16*avi->riff_id);
+ put_le64(pb, ix); /* qwOffset */
+ put_le32(pb, pos - ix); /* dwSize */
+ put_le32(pb, avi->indexes[i].entry); /* dwDuration */
+
+ url_fseek(pb, pos, SEEK_SET);
+ }
+ return 0;
+}
+
+static int avi_write_idx1(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AVIContext *avi = s->priv_data;
+ offset_t idx_chunk;
+ int i;
+ char tag[5];
+
+ if (!url_is_streamed(pb)) {
+ AVIIentry* ie = 0, *tie;
+ int entry[MAX_STREAMS];
+ int empty, stream_id = -1;
+
+ idx_chunk = start_tag(pb, "idx1");
+ memset(&entry[0], 0, sizeof(entry));
+ do {
+ empty = 1;
+ for (i=0; i<s->nb_streams; i++) {
+ if (avi->indexes[i].entry <= entry[i])
+ continue;
+
+ tie = avi_get_ientry(&avi->indexes[i], entry[i]);
+ if (empty || tie->pos < ie->pos) {
+ ie = tie;
+ stream_id = i;
+ }
+ empty = 0;
+ }
+ if (!empty) {
+ avi_stream2fourcc(&tag[0], stream_id,
+ s->streams[stream_id]->codec->codec_type);
+ put_tag(pb, &tag[0]);
+ put_le32(pb, ie->flags);
+ put_le32(pb, ie->pos);
+ put_le32(pb, ie->len);
+ entry[stream_id]++;
+ }
+ } while (!empty);
+ end_tag(pb, idx_chunk);
+
+ avi_write_counters(s, avi->riff_id);
+ }
+ return 0;
+}
+
+static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char tag[5];
+ unsigned int flags=0;
+ const int stream_index= pkt->stream_index;
+ AVCodecContext *enc= s->streams[stream_index]->codec;
+ int size= pkt->size;
+
+// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
+ while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avi->packet_count[stream_index]){
+ AVPacket empty_packet;
+
+ av_init_packet(&empty_packet);
+ empty_packet.size= 0;
+ empty_packet.data= NULL;
+ empty_packet.stream_index= stream_index;
+ avi_write_packet(s, &empty_packet);
+// av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]);
+ }
+ avi->packet_count[stream_index]++;
+
+ // Make sure to put an OpenDML chunk when the file size exceeds the limits
+ if (!url_is_streamed(pb) &&
+ (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
+
+ avi_write_ix(s);
+ end_tag(pb, avi->movi_list);
+
+ if (avi->riff_id == 1)
+ avi_write_idx1(s);
+
+ end_tag(pb, avi->riff_start);
+ avi->movi_list = avi_start_new_riff(avi, pb, "AVIX", "movi");
+ }
+
+ avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
+ if(pkt->flags&PKT_FLAG_KEY)
+ flags = 0x10;
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ avi->audio_strm_length[stream_index] += size;
+ }
+
+ if (!url_is_streamed(&s->pb)) {
+ AVIIndex* idx = &avi->indexes[stream_index];
+ int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
+ int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
+ if (idx->ents_allocated <= idx->entry) {
+ idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
+ if (!idx->cluster)
+ return -1;
+ idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
+ if (!idx->cluster[cl])
+ return -1;
+ idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
+ }
+
+ idx->cluster[cl][id].flags = flags;
+ idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list;
+ idx->cluster[cl][id].len = size;
+ idx->entry++;
+ }
+
+ put_buffer(pb, tag, 4);
+ put_le32(pb, size);
+ put_buffer(pb, pkt->data, size);
+ if (size & 1)
+ put_byte(pb, 0);
+
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int avi_write_trailer(AVFormatContext *s)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int res = 0;
+ int i, j, n, nb_frames;
+ offset_t file_size;
+
+ if (!url_is_streamed(pb))
+ {
+ if (avi->riff_id == 1) {
+ end_tag(pb, avi->movi_list);
+ res = avi_write_idx1(s);
+ end_tag(pb, avi->riff_start);
+ } else {
+ avi_write_ix(s);
+ end_tag(pb, avi->movi_list);
+ end_tag(pb, avi->riff_start);
+
+ file_size = url_ftell(pb);
+ url_fseek(pb, avi->odml_list - 8, SEEK_SET);
+ put_tag(pb, "LIST"); /* Making this AVI OpenDML one */
+ url_fskip(pb, 16);
+
+ for (n=nb_frames=0;n<s->nb_streams;n++) {
+ AVCodecContext *stream = s->streams[n]->codec;
+ if (stream->codec_type == CODEC_TYPE_VIDEO) {
+ if (nb_frames < avi->packet_count[n])
+ nb_frames = avi->packet_count[n];
+ } else {
+ if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) {
+ nb_frames += avi->packet_count[n];
+ }
+ }
+ }
+ put_le32(pb, nb_frames);
+ url_fseek(pb, file_size, SEEK_SET);
+
+ avi_write_counters(s, avi->riff_id);
+ }
+ }
+ put_flush_packet(pb);
+
+ for (i=0; i<MAX_STREAMS; i++) {
+ for (j=0; j<avi->indexes[i].ents_allocated/AVI_INDEX_CLUSTER_SIZE; j++)
+ av_free(avi->indexes[i].cluster[j]);
+ av_free(avi->indexes[i].cluster);
+ avi->indexes[i].cluster = NULL;
+ avi->indexes[i].ents_allocated = avi->indexes[i].entry = 0;
+ }
+
+ return res;
+}
+
+AVOutputFormat avi_muxer = {
+ "avi",
+ "avi format",
+ "video/x-msvideo",
+ "avi",
+ sizeof(AVIContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG4,
+ avi_write_header,
+ avi_write_packet,
+ avi_write_trailer,
+};
+#endif //CONFIG_AVI_MUXER
diff --git a/contrib/ffmpeg/libavformat/avio.c b/contrib/ffmpeg/libavformat/avio.c
new file mode 100644
index 000000000..a2b8a8325
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avio.c
@@ -0,0 +1,192 @@
+/*
+ * Unbuffered io for ffmpeg system
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int default_interrupt_cb(void);
+
+URLProtocol *first_protocol = NULL;
+URLInterruptCB *url_interrupt_cb = default_interrupt_cb;
+
+int register_protocol(URLProtocol *protocol)
+{
+ URLProtocol **p;
+ p = &first_protocol;
+ while (*p != NULL) p = &(*p)->next;
+ *p = protocol;
+ protocol->next = NULL;
+ return 0;
+}
+
+int url_open(URLContext **puc, const char *filename, int flags)
+{
+ URLContext *uc;
+ URLProtocol *up;
+ const char *p;
+ char proto_str[128], *q;
+ int err;
+
+ p = filename;
+ q = proto_str;
+ while (*p != '\0' && *p != ':') {
+ /* protocols can only contain alphabetic chars */
+ if (!isalpha(*p))
+ goto file_proto;
+ if ((q - proto_str) < sizeof(proto_str) - 1)
+ *q++ = *p;
+ p++;
+ }
+ /* if the protocol has length 1, we consider it is a dos drive */
+ if (*p == '\0' || (q - proto_str) <= 1) {
+ file_proto:
+ strcpy(proto_str, "file");
+ } else {
+ *q = '\0';
+ }
+
+ up = first_protocol;
+ while (up != NULL) {
+ if (!strcmp(proto_str, up->name))
+ goto found;
+ up = up->next;
+ }
+ err = -ENOENT;
+ goto fail;
+ found:
+ uc = av_malloc(sizeof(URLContext) + strlen(filename));
+ if (!uc) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ strcpy(uc->filename, filename);
+ uc->prot = up;
+ uc->flags = flags;
+ uc->is_streamed = 0; /* default = not streamed */
+ uc->max_packet_size = 0; /* default: stream file */
+ err = up->url_open(uc, filename, flags);
+ if (err < 0) {
+ av_free(uc);
+ *puc = NULL;
+ return err;
+ }
+ *puc = uc;
+ return 0;
+ fail:
+ *puc = NULL;
+ return err;
+}
+
+int url_read(URLContext *h, unsigned char *buf, int size)
+{
+ int ret;
+ if (h->flags & URL_WRONLY)
+ return AVERROR_IO;
+ ret = h->prot->url_read(h, buf, size);
+ return ret;
+}
+
+#if defined(CONFIG_MUXERS) || defined(CONFIG_PROTOCOLS)
+int url_write(URLContext *h, unsigned char *buf, int size)
+{
+ int ret;
+ if (!(h->flags & (URL_WRONLY | URL_RDWR)))
+ return AVERROR_IO;
+ /* avoid sending too big packets */
+ if (h->max_packet_size && size > h->max_packet_size)
+ return AVERROR_IO;
+ ret = h->prot->url_write(h, buf, size);
+ return ret;
+}
+#endif //CONFIG_MUXERS || CONFIG_PROTOCOLS
+
+offset_t url_seek(URLContext *h, offset_t pos, int whence)
+{
+ offset_t ret;
+
+ if (!h->prot->url_seek)
+ return -EPIPE;
+ ret = h->prot->url_seek(h, pos, whence);
+ return ret;
+}
+
+int url_close(URLContext *h)
+{
+ int ret;
+
+ ret = h->prot->url_close(h);
+ av_free(h);
+ return ret;
+}
+
+int url_exist(const char *filename)
+{
+ URLContext *h;
+ if (url_open(&h, filename, URL_RDONLY) < 0)
+ return 0;
+ url_close(h);
+ return 1;
+}
+
+offset_t url_filesize(URLContext *h)
+{
+ offset_t pos, size;
+
+ pos = url_seek(h, 0, SEEK_CUR);
+ size = url_seek(h, -1, SEEK_END)+1;
+ url_seek(h, pos, SEEK_SET);
+ return size;
+}
+
+/*
+ * Return the maximum packet size associated to packetized file
+ * handle. If the file is not packetized (stream like http or file on
+ * disk), then 0 is returned.
+ *
+ * @param h file handle
+ * @return maximum packet size in bytes
+ */
+int url_get_max_packet_size(URLContext *h)
+{
+ return h->max_packet_size;
+}
+
+void url_get_filename(URLContext *h, char *buf, int buf_size)
+{
+ pstrcpy(buf, buf_size, h->filename);
+}
+
+
+static int default_interrupt_cb(void)
+{
+ return 0;
+}
+
+/**
+ * The callback is called in blocking functions to test regulary if
+ * asynchronous interruption is needed. -EINTR is returned in this
+ * case by the interrupted function. 'NULL' means no interrupt
+ * callback is given.
+ */
+void url_set_interrupt_cb(URLInterruptCB *interrupt_cb)
+{
+ if (!interrupt_cb)
+ interrupt_cb = default_interrupt_cb;
+ url_interrupt_cb = interrupt_cb;
+}
diff --git a/contrib/ffmpeg/libavformat/avio.h b/contrib/ffmpeg/libavformat/avio.h
new file mode 100644
index 000000000..f0fd1a85c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avio.h
@@ -0,0 +1,201 @@
+/*
+ * unbuffered io for ffmpeg system
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVIO_H
+#define AVIO_H
+
+/* output byte stream handling */
+
+typedef int64_t offset_t;
+
+/* unbuffered I/O */
+
+struct URLContext {
+ struct URLProtocol *prot;
+ int flags;
+ int is_streamed; /* true if streamed (no seek possible), default = false */
+ int max_packet_size; /* if non zero, the stream is packetized with this max packet size */
+ void *priv_data;
+ char filename[1]; /* specified filename */
+};
+
+typedef struct URLContext URLContext;
+
+typedef struct URLPollEntry {
+ URLContext *handle;
+ int events;
+ int revents;
+} URLPollEntry;
+
+#define URL_RDONLY 0
+#define URL_WRONLY 1
+#define URL_RDWR 2
+
+typedef int URLInterruptCB(void);
+
+int url_open(URLContext **h, const char *filename, int flags);
+int url_read(URLContext *h, unsigned char *buf, int size);
+int url_write(URLContext *h, unsigned char *buf, int size);
+offset_t url_seek(URLContext *h, offset_t pos, int whence);
+int url_close(URLContext *h);
+int url_exist(const char *filename);
+offset_t url_filesize(URLContext *h);
+int url_get_max_packet_size(URLContext *h);
+void url_get_filename(URLContext *h, char *buf, int buf_size);
+
+/* the callback is called in blocking functions to test regulary if
+ asynchronous interruption is needed. -EINTR is returned in this
+ case by the interrupted function. 'NULL' means no interrupt
+ callback is given. */
+void url_set_interrupt_cb(URLInterruptCB *interrupt_cb);
+
+/* not implemented */
+int url_poll(URLPollEntry *poll_table, int n, int timeout);
+
+typedef struct URLProtocol {
+ const char *name;
+ int (*url_open)(URLContext *h, const char *filename, int flags);
+ int (*url_read)(URLContext *h, unsigned char *buf, int size);
+ int (*url_write)(URLContext *h, unsigned char *buf, int size);
+ offset_t (*url_seek)(URLContext *h, offset_t pos, int whence);
+ int (*url_close)(URLContext *h);
+ struct URLProtocol *next;
+} URLProtocol;
+
+extern URLProtocol *first_protocol;
+extern URLInterruptCB *url_interrupt_cb;
+
+int register_protocol(URLProtocol *protocol);
+
+typedef struct {
+ unsigned char *buffer;
+ int buffer_size;
+ unsigned char *buf_ptr, *buf_end;
+ void *opaque;
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
+ offset_t (*seek)(void *opaque, offset_t offset, int whence);
+ offset_t pos; /* position in the file of the current buffer */
+ int must_flush; /* true if the next seek should flush */
+ int eof_reached; /* true if eof reached */
+ int write_flag; /* true if open for writing */
+ int is_streamed;
+ int max_packet_size;
+ unsigned long checksum;
+ unsigned char *checksum_ptr;
+ unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
+ int error; ///< contains the error code or 0 if no error happened
+} ByteIOContext;
+
+int init_put_byte(ByteIOContext *s,
+ unsigned char *buffer,
+ int buffer_size,
+ int write_flag,
+ void *opaque,
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
+ offset_t (*seek)(void *opaque, offset_t offset, int whence));
+
+void put_byte(ByteIOContext *s, int b);
+void put_buffer(ByteIOContext *s, const unsigned char *buf, int size);
+void put_le64(ByteIOContext *s, uint64_t val);
+void put_be64(ByteIOContext *s, uint64_t val);
+void put_le32(ByteIOContext *s, unsigned int val);
+void put_be32(ByteIOContext *s, unsigned int val);
+void put_le24(ByteIOContext *s, unsigned int val);
+void put_be24(ByteIOContext *s, unsigned int val);
+void put_le16(ByteIOContext *s, unsigned int val);
+void put_be16(ByteIOContext *s, unsigned int val);
+void put_tag(ByteIOContext *s, const char *tag);
+
+void put_strz(ByteIOContext *s, const char *buf);
+
+offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence);
+void url_fskip(ByteIOContext *s, offset_t offset);
+offset_t url_ftell(ByteIOContext *s);
+offset_t url_fsize(ByteIOContext *s);
+int url_feof(ByteIOContext *s);
+int url_ferror(ByteIOContext *s);
+
+#define URL_EOF (-1)
+int url_fgetc(ByteIOContext *s);
+#ifdef __GNUC__
+int url_fprintf(ByteIOContext *s, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3)));
+#else
+int url_fprintf(ByteIOContext *s, const char *fmt, ...);
+#endif
+char *url_fgets(ByteIOContext *s, char *buf, int buf_size);
+
+void put_flush_packet(ByteIOContext *s);
+
+int get_buffer(ByteIOContext *s, unsigned char *buf, int size);
+int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size);
+int get_byte(ByteIOContext *s);
+unsigned int get_le24(ByteIOContext *s);
+unsigned int get_le32(ByteIOContext *s);
+uint64_t get_le64(ByteIOContext *s);
+unsigned int get_le16(ByteIOContext *s);
+
+char *get_strz(ByteIOContext *s, char *buf, int maxlen);
+unsigned int get_be16(ByteIOContext *s);
+unsigned int get_be24(ByteIOContext *s);
+unsigned int get_be32(ByteIOContext *s);
+uint64_t get_be64(ByteIOContext *s);
+
+static inline int url_is_streamed(ByteIOContext *s)
+{
+ return s->is_streamed;
+}
+
+int url_fdopen(ByteIOContext *s, URLContext *h);
+int url_setbufsize(ByteIOContext *s, int buf_size);
+int url_fopen(ByteIOContext *s, const char *filename, int flags);
+int url_fclose(ByteIOContext *s);
+URLContext *url_fileno(ByteIOContext *s);
+int url_fget_max_packet_size(ByteIOContext *s);
+
+int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags);
+int url_close_buf(ByteIOContext *s);
+
+int url_open_dyn_buf(ByteIOContext *s);
+int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size);
+int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer);
+
+unsigned long get_checksum(ByteIOContext *s);
+void init_checksum(ByteIOContext *s, unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len), unsigned long checksum);
+
+/* file.c */
+extern URLProtocol file_protocol;
+extern URLProtocol pipe_protocol;
+
+/* udp.c */
+extern URLProtocol udp_protocol;
+int udp_set_remote_url(URLContext *h, const char *uri);
+int udp_get_local_port(URLContext *h);
+int udp_get_file_handle(URLContext *h);
+
+/* tcp.c */
+extern URLProtocol tcp_protocol;
+
+/* http.c */
+extern URLProtocol http_protocol;
+
+#endif
+
diff --git a/contrib/ffmpeg/libavformat/aviobuf.c b/contrib/ffmpeg/libavformat/aviobuf.c
new file mode 100644
index 000000000..866641ad0
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/aviobuf.c
@@ -0,0 +1,790 @@
+/*
+ * Buffered I/O for ffmpeg system
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "avio.h"
+#include <stdarg.h>
+
+#define IO_BUFFER_SIZE 32768
+
+static void fill_buffer(ByteIOContext *s);
+
+int init_put_byte(ByteIOContext *s,
+ unsigned char *buffer,
+ int buffer_size,
+ int write_flag,
+ void *opaque,
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
+ offset_t (*seek)(void *opaque, offset_t offset, int whence))
+{
+ s->buffer = buffer;
+ s->buffer_size = buffer_size;
+ s->buf_ptr = buffer;
+ s->write_flag = write_flag;
+ if (!s->write_flag)
+ s->buf_end = buffer;
+ else
+ s->buf_end = buffer + buffer_size;
+ s->opaque = opaque;
+ s->write_packet = write_packet;
+ s->read_packet = read_packet;
+ s->seek = seek;
+ s->pos = 0;
+ s->must_flush = 0;
+ s->eof_reached = 0;
+ s->error = 0;
+ s->is_streamed = 0;
+ s->max_packet_size = 0;
+ s->update_checksum= NULL;
+ if(!read_packet && !write_flag){
+ s->pos = buffer_size;
+ s->buf_end = s->buffer + buffer_size;
+ }
+ return 0;
+}
+
+static void flush_buffer(ByteIOContext *s)
+{
+ if (s->buf_ptr > s->buffer) {
+ if (s->write_packet && !s->error){
+ int ret= s->write_packet(s->opaque, s->buffer, s->buf_ptr - s->buffer);
+ if(ret < 0){
+ s->error = ret;
+ }
+ }
+ if(s->update_checksum){
+ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_ptr - s->checksum_ptr);
+ s->checksum_ptr= s->buffer;
+ }
+ s->pos += s->buf_ptr - s->buffer;
+ }
+ s->buf_ptr = s->buffer;
+}
+
+void put_byte(ByteIOContext *s, int b)
+{
+ *(s->buf_ptr)++ = b;
+ if (s->buf_ptr >= s->buf_end)
+ flush_buffer(s);
+}
+
+void put_buffer(ByteIOContext *s, const unsigned char *buf, int size)
+{
+ int len;
+
+ while (size > 0) {
+ len = (s->buf_end - s->buf_ptr);
+ if (len > size)
+ len = size;
+ memcpy(s->buf_ptr, buf, len);
+ s->buf_ptr += len;
+
+ if (s->buf_ptr >= s->buf_end)
+ flush_buffer(s);
+
+ buf += len;
+ size -= len;
+ }
+}
+
+void put_flush_packet(ByteIOContext *s)
+{
+ flush_buffer(s);
+ s->must_flush = 0;
+}
+
+offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
+{
+ offset_t offset1;
+ offset_t pos= s->pos - (s->write_flag ? 0 : (s->buf_end - s->buffer));
+
+ if (whence != SEEK_CUR && whence != SEEK_SET)
+ return -EINVAL;
+
+ if (whence == SEEK_CUR) {
+ offset1 = pos + (s->buf_ptr - s->buffer);
+ if (offset == 0)
+ return offset1;
+ offset += offset1;
+ }
+ offset1 = offset - pos;
+ if (!s->must_flush &&
+ offset1 >= 0 && offset1 < (s->buf_end - s->buffer)) {
+ /* can do the seek inside the buffer */
+ s->buf_ptr = s->buffer + offset1;
+ } else if(s->is_streamed && !s->write_flag &&
+ offset1 >= 0 && offset1 < (s->buf_end - s->buffer) + (1<<16)){
+ while(s->pos < offset && !s->eof_reached)
+ fill_buffer(s);
+ s->buf_ptr = s->buf_end + offset - s->pos;
+ } else {
+#if defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK)
+ if (s->write_flag) {
+ flush_buffer(s);
+ s->must_flush = 1;
+ } else
+#endif /* defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK) */
+ {
+ s->buf_end = s->buffer;
+ }
+ s->buf_ptr = s->buffer;
+ if (!s->seek || s->seek(s->opaque, offset, SEEK_SET) == (offset_t)-EPIPE)
+ return -EPIPE;
+ s->pos = offset;
+ }
+ s->eof_reached = 0;
+ return offset;
+}
+
+void url_fskip(ByteIOContext *s, offset_t offset)
+{
+ url_fseek(s, offset, SEEK_CUR);
+}
+
+offset_t url_ftell(ByteIOContext *s)
+{
+ return url_fseek(s, 0, SEEK_CUR);
+}
+
+offset_t url_fsize(ByteIOContext *s)
+{
+ offset_t size;
+
+ if (!s->seek)
+ return -EPIPE;
+ size = s->seek(s->opaque, -1, SEEK_END) + 1;
+ s->seek(s->opaque, s->pos, SEEK_SET);
+ return size;
+}
+
+int url_feof(ByteIOContext *s)
+{
+ return s->eof_reached;
+}
+
+int url_ferror(ByteIOContext *s)
+{
+ return s->error;
+}
+
+#if defined(CONFIG_MUXERS) || defined(CONFIG_PROTOCOLS)
+void put_le32(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val);
+ put_byte(s, val >> 8);
+ put_byte(s, val >> 16);
+ put_byte(s, val >> 24);
+}
+
+void put_be32(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val >> 24);
+ put_byte(s, val >> 16);
+ put_byte(s, val >> 8);
+ put_byte(s, val);
+}
+
+void put_strz(ByteIOContext *s, const char *str)
+{
+ if (str)
+ put_buffer(s, (const unsigned char *) str, strlen(str) + 1);
+ else
+ put_byte(s, 0);
+}
+
+void put_le64(ByteIOContext *s, uint64_t val)
+{
+ put_le32(s, (uint32_t)(val & 0xffffffff));
+ put_le32(s, (uint32_t)(val >> 32));
+}
+
+void put_be64(ByteIOContext *s, uint64_t val)
+{
+ put_be32(s, (uint32_t)(val >> 32));
+ put_be32(s, (uint32_t)(val & 0xffffffff));
+}
+
+void put_le16(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val);
+ put_byte(s, val >> 8);
+}
+
+void put_be16(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val >> 8);
+ put_byte(s, val);
+}
+
+void put_le24(ByteIOContext *s, unsigned int val)
+{
+ put_le16(s, val & 0xffff);
+ put_byte(s, val >> 16);
+}
+
+void put_be24(ByteIOContext *s, unsigned int val)
+{
+ put_be16(s, val >> 8);
+ put_byte(s, val);
+}
+
+void put_tag(ByteIOContext *s, const char *tag)
+{
+ while (*tag) {
+ put_byte(s, *tag++);
+ }
+}
+#endif //CONFIG_MUXERS || CONFIG_PROTOCOLS
+
+/* Input stream */
+
+static void fill_buffer(ByteIOContext *s)
+{
+ int len;
+
+ /* no need to do anything if EOF already reached */
+ if (s->eof_reached)
+ return;
+
+ if(s->update_checksum){
+ if(s->buf_end > s->checksum_ptr)
+ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_end - s->checksum_ptr);
+ s->checksum_ptr= s->buffer;
+ }
+
+ len = s->read_packet(s->opaque, s->buffer, s->buffer_size);
+ if (len <= 0) {
+ /* do not modify buffer if EOF reached so that a seek back can
+ be done without rereading data */
+ s->eof_reached = 1;
+ if(len<0)
+ s->error= len;
+ } else {
+ s->pos += len;
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer + len;
+ }
+}
+
+unsigned long get_checksum(ByteIOContext *s){
+ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_ptr - s->checksum_ptr);
+ s->update_checksum= NULL;
+ return s->checksum;
+}
+
+void init_checksum(ByteIOContext *s, unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len), unsigned long checksum){
+ s->update_checksum= update_checksum;
+ if(s->update_checksum){
+ s->checksum= checksum;
+ s->checksum_ptr= s->buf_ptr;
+ }
+}
+
+/* NOTE: return 0 if EOF, so you cannot use it if EOF handling is
+ necessary */
+/* XXX: put an inline version */
+int get_byte(ByteIOContext *s)
+{
+ if (s->buf_ptr < s->buf_end) {
+ return *s->buf_ptr++;
+ } else {
+ fill_buffer(s);
+ if (s->buf_ptr < s->buf_end)
+ return *s->buf_ptr++;
+ else
+ return 0;
+ }
+}
+
+/* NOTE: return URL_EOF (-1) if EOF */
+int url_fgetc(ByteIOContext *s)
+{
+ if (s->buf_ptr < s->buf_end) {
+ return *s->buf_ptr++;
+ } else {
+ fill_buffer(s);
+ if (s->buf_ptr < s->buf_end)
+ return *s->buf_ptr++;
+ else
+ return URL_EOF;
+ }
+}
+
+int get_buffer(ByteIOContext *s, unsigned char *buf, int size)
+{
+ int len, size1;
+
+ size1 = size;
+ while (size > 0) {
+ len = s->buf_end - s->buf_ptr;
+ if (len > size)
+ len = size;
+ if (len == 0) {
+ if(size > s->buffer_size && !s->update_checksum){
+ len = s->read_packet(s->opaque, buf, size);
+ if (len <= 0) {
+ /* do not modify buffer if EOF reached so that a seek back can
+ be done without rereading data */
+ s->eof_reached = 1;
+ if(len<0)
+ s->error= len;
+ break;
+ } else {
+ s->pos += len;
+ size -= len;
+ buf += len;
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer/* + len*/;
+ }
+ }else{
+ fill_buffer(s);
+ len = s->buf_end - s->buf_ptr;
+ if (len == 0)
+ break;
+ }
+ } else {
+ memcpy(buf, s->buf_ptr, len);
+ buf += len;
+ s->buf_ptr += len;
+ size -= len;
+ }
+ }
+ return size1 - size;
+}
+
+int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size)
+{
+ int len;
+
+ if(size<0)
+ return -1;
+
+ len = s->buf_end - s->buf_ptr;
+ if (len == 0) {
+ fill_buffer(s);
+ len = s->buf_end - s->buf_ptr;
+ }
+ if (len > size)
+ len = size;
+ memcpy(buf, s->buf_ptr, len);
+ s->buf_ptr += len;
+ return len;
+}
+
+unsigned int get_le16(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_byte(s);
+ val |= get_byte(s) << 8;
+ return val;
+}
+
+unsigned int get_le24(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_le16(s);
+ val |= get_byte(s) << 16;
+ return val;
+}
+
+unsigned int get_le32(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_le16(s);
+ val |= get_le16(s) << 16;
+ return val;
+}
+
+uint64_t get_le64(ByteIOContext *s)
+{
+ uint64_t val;
+ val = (uint64_t)get_le32(s);
+ val |= (uint64_t)get_le32(s) << 32;
+ return val;
+}
+
+unsigned int get_be16(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_byte(s) << 8;
+ val |= get_byte(s);
+ return val;
+}
+
+unsigned int get_be24(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_be16(s) << 8;
+ val |= get_byte(s);
+ return val;
+}
+unsigned int get_be32(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_be16(s) << 16;
+ val |= get_be16(s);
+ return val;
+}
+
+char *get_strz(ByteIOContext *s, char *buf, int maxlen)
+{
+ int i = 0;
+ char c;
+
+ while ((c = get_byte(s))) {
+ if (i < maxlen-1)
+ buf[i++] = c;
+ }
+
+ buf[i] = 0; /* Ensure null terminated, but may be truncated */
+
+ return buf;
+}
+
+uint64_t get_be64(ByteIOContext *s)
+{
+ uint64_t val;
+ val = (uint64_t)get_be32(s) << 32;
+ val |= (uint64_t)get_be32(s);
+ return val;
+}
+
+/* link with avio functions */
+
+#ifdef CONFIG_MUXERS
+static int url_write_packet(void *opaque, uint8_t *buf, int buf_size)
+{
+ URLContext *h = opaque;
+ return url_write(h, buf, buf_size);
+}
+#else
+#define url_write_packet NULL
+#endif //CONFIG_MUXERS
+
+static int url_read_packet(void *opaque, uint8_t *buf, int buf_size)
+{
+ URLContext *h = opaque;
+ return url_read(h, buf, buf_size);
+}
+
+static offset_t url_seek_packet(void *opaque, offset_t offset, int whence)
+{
+ URLContext *h = opaque;
+ return url_seek(h, offset, whence);
+ //return 0;
+}
+
+int url_fdopen(ByteIOContext *s, URLContext *h)
+{
+ uint8_t *buffer;
+ int buffer_size, max_packet_size;
+
+
+ max_packet_size = url_get_max_packet_size(h);
+ if (max_packet_size) {
+ buffer_size = max_packet_size; /* no need to bufferize more than one packet */
+ } else {
+ buffer_size = IO_BUFFER_SIZE;
+ }
+ buffer = av_malloc(buffer_size);
+ if (!buffer)
+ return -ENOMEM;
+
+ if (init_put_byte(s, buffer, buffer_size,
+ (h->flags & URL_WRONLY || h->flags & URL_RDWR), h,
+ url_read_packet, url_write_packet, url_seek_packet) < 0) {
+ av_free(buffer);
+ return AVERROR_IO;
+ }
+ s->is_streamed = h->is_streamed;
+ s->max_packet_size = max_packet_size;
+ return 0;
+}
+
+/* XXX: must be called before any I/O */
+int url_setbufsize(ByteIOContext *s, int buf_size)
+{
+ uint8_t *buffer;
+ buffer = av_malloc(buf_size);
+ if (!buffer)
+ return -ENOMEM;
+
+ av_free(s->buffer);
+ s->buffer = buffer;
+ s->buffer_size = buf_size;
+ s->buf_ptr = buffer;
+ if (!s->write_flag)
+ s->buf_end = buffer;
+ else
+ s->buf_end = buffer + buf_size;
+ return 0;
+}
+
+/* NOTE: when opened as read/write, the buffers are only used for
+ reading */
+int url_fopen(ByteIOContext *s, const char *filename, int flags)
+{
+ URLContext *h;
+ int err;
+
+ err = url_open(&h, filename, flags);
+ if (err < 0)
+ return err;
+ err = url_fdopen(s, h);
+ if (err < 0) {
+ url_close(h);
+ return err;
+ }
+ return 0;
+}
+
+int url_fclose(ByteIOContext *s)
+{
+ URLContext *h = s->opaque;
+
+ av_free(s->buffer);
+ memset(s, 0, sizeof(ByteIOContext));
+ return url_close(h);
+}
+
+URLContext *url_fileno(ByteIOContext *s)
+{
+ return s->opaque;
+}
+
+#ifdef CONFIG_MUXERS
+/* XXX: currently size is limited */
+int url_fprintf(ByteIOContext *s, const char *fmt, ...)
+{
+ va_list ap;
+ char buf[4096];
+ int ret;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ put_buffer(s, buf, strlen(buf));
+ return ret;
+}
+#endif //CONFIG_MUXERS
+
+/* note: unlike fgets, the EOL character is not returned and a whole
+ line is parsed. return NULL if first char read was EOF */
+char *url_fgets(ByteIOContext *s, char *buf, int buf_size)
+{
+ int c;
+ char *q;
+
+ c = url_fgetc(s);
+ if (c == EOF)
+ return NULL;
+ q = buf;
+ for(;;) {
+ if (c == EOF || c == '\n')
+ break;
+ if ((q - buf) < buf_size - 1)
+ *q++ = c;
+ c = url_fgetc(s);
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ return buf;
+}
+
+/*
+ * Return the maximum packet size associated to packetized buffered file
+ * handle. If the file is not packetized (stream like http or file on
+ * disk), then 0 is returned.
+ *
+ * @param h buffered file handle
+ * @return maximum packet size in bytes
+ */
+int url_fget_max_packet_size(ByteIOContext *s)
+{
+ return s->max_packet_size;
+}
+
+/* url_open_dyn_buf and url_close_dyn_buf are used in rtp.c to send a response
+ * back to the server even if CONFIG_MUXERS is not set. */
+#if defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK)
+/* buffer handling */
+int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags)
+{
+ return init_put_byte(s, buf, buf_size,
+ (flags & URL_WRONLY || flags & URL_RDWR),
+ NULL, NULL, NULL, NULL);
+}
+
+/* return the written or read size */
+int url_close_buf(ByteIOContext *s)
+{
+ put_flush_packet(s);
+ return s->buf_ptr - s->buffer;
+}
+
+/* output in a dynamic buffer */
+
+typedef struct DynBuffer {
+ int pos, size, allocated_size;
+ uint8_t *buffer;
+ int io_buffer_size;
+ uint8_t io_buffer[1];
+} DynBuffer;
+
+static int dyn_buf_write(void *opaque, uint8_t *buf, int buf_size)
+{
+ DynBuffer *d = opaque;
+ int new_size, new_allocated_size;
+
+ /* reallocate buffer if needed */
+ new_size = d->pos + buf_size;
+ new_allocated_size = d->allocated_size;
+ if(new_size < d->pos || new_size > INT_MAX/2)
+ return -1;
+ while (new_size > new_allocated_size) {
+ if (!new_allocated_size)
+ new_allocated_size = new_size;
+ else
+ new_allocated_size += new_allocated_size / 2 + 1;
+ }
+
+ if (new_allocated_size > d->allocated_size) {
+ d->buffer = av_realloc(d->buffer, new_allocated_size);
+ if(d->buffer == NULL)
+ return -1234;
+ d->allocated_size = new_allocated_size;
+ }
+ memcpy(d->buffer + d->pos, buf, buf_size);
+ d->pos = new_size;
+ if (d->pos > d->size)
+ d->size = d->pos;
+ return buf_size;
+}
+
+static int dyn_packet_buf_write(void *opaque, uint8_t *buf, int buf_size)
+{
+ unsigned char buf1[4];
+ int ret;
+
+ /* packetized write: output the header */
+ buf1[0] = (buf_size >> 24);
+ buf1[1] = (buf_size >> 16);
+ buf1[2] = (buf_size >> 8);
+ buf1[3] = (buf_size);
+ ret= dyn_buf_write(opaque, buf1, 4);
+ if(ret < 0)
+ return ret;
+
+ /* then the data */
+ return dyn_buf_write(opaque, buf, buf_size);
+}
+
+static offset_t dyn_buf_seek(void *opaque, offset_t offset, int whence)
+{
+ DynBuffer *d = opaque;
+
+ if (whence == SEEK_CUR)
+ offset += d->pos;
+ else if (whence == SEEK_END)
+ offset += d->size;
+ if (offset < 0 || offset > 0x7fffffffLL)
+ return -1;
+ d->pos = offset;
+ return 0;
+}
+
+static int url_open_dyn_buf_internal(ByteIOContext *s, int max_packet_size)
+{
+ DynBuffer *d;
+ int io_buffer_size, ret;
+
+ if (max_packet_size)
+ io_buffer_size = max_packet_size;
+ else
+ io_buffer_size = 1024;
+
+ if(sizeof(DynBuffer) + io_buffer_size < io_buffer_size)
+ return -1;
+ d = av_malloc(sizeof(DynBuffer) + io_buffer_size);
+ if (!d)
+ return -1;
+ d->io_buffer_size = io_buffer_size;
+ d->buffer = NULL;
+ d->pos = 0;
+ d->size = 0;
+ d->allocated_size = 0;
+ ret = init_put_byte(s, d->io_buffer, io_buffer_size,
+ 1, d, NULL,
+ max_packet_size ? dyn_packet_buf_write : dyn_buf_write,
+ max_packet_size ? NULL : dyn_buf_seek);
+ if (ret == 0) {
+ s->max_packet_size = max_packet_size;
+ }
+ return ret;
+}
+
+/*
+ * Open a write only memory stream.
+ *
+ * @param s new IO context
+ * @return zero if no error.
+ */
+int url_open_dyn_buf(ByteIOContext *s)
+{
+ return url_open_dyn_buf_internal(s, 0);
+}
+
+/*
+ * Open a write only packetized memory stream with a maximum packet
+ * size of 'max_packet_size'. The stream is stored in a memory buffer
+ * with a big endian 4 byte header giving the packet size in bytes.
+ *
+ * @param s new IO context
+ * @param max_packet_size maximum packet size (must be > 0)
+ * @return zero if no error.
+ */
+int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size)
+{
+ if (max_packet_size <= 0)
+ return -1;
+ return url_open_dyn_buf_internal(s, max_packet_size);
+}
+
+/*
+ * Return the written size and a pointer to the buffer. The buffer
+ * must be freed with av_free().
+ * @param s IO context
+ * @param pointer to a byte buffer
+ * @return the length of the byte buffer
+ */
+int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer)
+{
+ DynBuffer *d = s->opaque;
+ int size;
+
+ put_flush_packet(s);
+
+ *pbuffer = d->buffer;
+ size = d->size;
+ av_free(d);
+ return size;
+}
+#endif /* CONFIG_MUXERS || CONFIG_NETWORK */
diff --git a/contrib/ffmpeg/libavformat/avisynth.c b/contrib/ffmpeg/libavformat/avisynth.c
new file mode 100644
index 000000000..1afcdea5e
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avisynth.c
@@ -0,0 +1,222 @@
+/*
+ * AVISynth support for ffmpeg system
+ * Copyright (c) 2006 DivX, Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "riff.h"
+
+#include <windows.h>
+#include <vfw.h>
+
+typedef struct {
+ PAVISTREAM handle;
+ AVISTREAMINFO info;
+ DWORD read;
+ LONG chunck_size;
+ LONG chunck_samples;
+} AVISynthStream;
+
+typedef struct {
+ PAVIFILE file;
+ AVISynthStream *streams;
+ int nb_streams;
+ int next_stream;
+} AVISynthContext;
+
+static int avisynth_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ AVISynthContext *avs = s->priv_data;
+ HRESULT res;
+ AVIFILEINFO info;
+ DWORD id;
+ AVStream *st;
+ AVISynthStream *stream;
+
+ AVIFileInit();
+
+ res = AVIFileOpen(&avs->file, s->filename, OF_READ|OF_SHARE_DENY_WRITE, NULL);
+ if (res != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "AVIFileOpen failed with error %ld", res);
+ AVIFileExit();
+ return -1;
+ }
+
+ res = AVIFileInfo(avs->file, &info, sizeof(info));
+ if (res != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "AVIFileInfo failed with error %ld", res);
+ AVIFileExit();
+ return -1;
+ }
+
+ avs->streams = av_mallocz(info.dwStreams * sizeof(AVISynthStream));
+
+ for (id=0; id<info.dwStreams; id++)
+ {
+ stream = &avs->streams[id];
+ stream->read = 0;
+ if (AVIFileGetStream(avs->file, &stream->handle, 0, id) == S_OK)
+ {
+ if (AVIStreamInfo(stream->handle, &stream->info, sizeof(stream->info)) == S_OK)
+ {
+ if (stream->info.fccType == streamtypeAUDIO)
+ {
+ WAVEFORMATEX wvfmt;
+ LONG struct_size = sizeof(WAVEFORMATEX);
+ if (AVIStreamReadFormat(stream->handle, 0, &wvfmt, &struct_size) != S_OK)
+ continue;
+
+ st = av_new_stream(s, id);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+
+ st->codec->block_align = wvfmt.nBlockAlign;
+ st->codec->channels = wvfmt.nChannels;
+ st->codec->sample_rate = wvfmt.nSamplesPerSec;
+ st->codec->bit_rate = wvfmt.nAvgBytesPerSec * 8;
+ st->codec->bits_per_sample = wvfmt.wBitsPerSample;
+
+ stream->chunck_samples = wvfmt.nSamplesPerSec * (uint64_t)info.dwScale / (uint64_t)info.dwRate;
+ stream->chunck_size = stream->chunck_samples * wvfmt.nChannels * wvfmt.wBitsPerSample / 8;
+
+ st->codec->codec_tag = wvfmt.wFormatTag;
+ st->codec->codec_id = wav_codec_get_id(wvfmt.wFormatTag, st->codec->bits_per_sample);
+ }
+ else if (stream->info.fccType == streamtypeVIDEO)
+ {
+ BITMAPINFO imgfmt;
+ LONG struct_size = sizeof(BITMAPINFO);
+
+ stream->chunck_size = stream->info.dwSampleSize;
+ stream->chunck_samples = 1;
+
+ if (AVIStreamReadFormat(stream->handle, 0, &imgfmt, &struct_size) != S_OK)
+ continue;
+
+ st = av_new_stream(s, id);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->r_frame_rate.num = stream->info.dwRate;
+ st->r_frame_rate.den = stream->info.dwScale;
+
+ st->codec->width = imgfmt.bmiHeader.biWidth;
+ st->codec->height = imgfmt.bmiHeader.biHeight;
+
+ st->codec->bits_per_sample = imgfmt.bmiHeader.biBitCount;
+ st->codec->bit_rate = (uint64_t)stream->info.dwSampleSize * (uint64_t)stream->info.dwRate * 8 / (uint64_t)stream->info.dwScale;
+ st->codec->codec_tag = imgfmt.bmiHeader.biCompression;
+ st->codec->codec_id = codec_get_id(codec_bmp_tags, imgfmt.bmiHeader.biCompression);
+
+ st->duration = stream->info.dwLength;
+ }
+ else
+ {
+ AVIStreamRelease(stream->handle);
+ continue;
+ }
+
+ avs->nb_streams++;
+
+ st->codec->stream_codec_tag = stream->info.fccHandler;
+
+ av_set_pts_info(st, 64, info.dwScale, info.dwRate);
+ st->start_time = stream->info.dwStart;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVISynthContext *avs = s->priv_data;
+ HRESULT res;
+ AVISynthStream *stream;
+ int stream_id = avs->next_stream;
+ LONG read_size;
+
+ // handle interleaving manually...
+ stream = &avs->streams[stream_id];
+
+ if (stream->read >= stream->info.dwLength)
+ return AVERROR_IO;
+
+ if (av_new_packet(pkt, stream->chunck_size))
+ return AVERROR_IO;
+ pkt->stream_index = stream_id;
+ pkt->pts = avs->streams[stream_id].read / avs->streams[stream_id].chunck_samples;
+
+ res = AVIStreamRead(stream->handle, stream->read, stream->chunck_samples, pkt->data, stream->chunck_size, &read_size, NULL);
+
+ pkt->pts = stream->read;
+ pkt->size = read_size;
+
+ stream->read += stream->chunck_samples;
+
+ // prepare for the next stream to read
+ do {
+ avs->next_stream = (avs->next_stream+1) % avs->nb_streams;
+ } while (avs->next_stream != stream_id && s->streams[avs->next_stream]->discard >= AVDISCARD_ALL);
+
+ return (res == S_OK) ? pkt->size : -1;
+}
+
+static int avisynth_read_close(AVFormatContext *s)
+{
+ AVISynthContext *avs = s->priv_data;
+ int i;
+
+ for (i=0;i<avs->nb_streams;i++)
+ {
+ AVIStreamRelease(avs->streams[i].handle);
+ }
+
+ av_free(avs->streams);
+ AVIFileRelease(avs->file);
+ AVIFileExit();
+ return 0;
+}
+
+static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags)
+{
+ AVISynthContext *avs = s->priv_data;
+ int stream_id;
+
+ for (stream_id = 0; stream_id < avs->nb_streams; stream_id++)
+ {
+ avs->streams[stream_id].read = pts * avs->streams[stream_id].chunck_samples;
+ }
+
+ return 0;
+}
+
+AVInputFormat avisynth_demuxer = {
+ "avs",
+ "AVISynth",
+ sizeof(AVISynthContext),
+ NULL,
+ avisynth_read_header,
+ avisynth_read_packet,
+ avisynth_read_close,
+ avisynth_read_seek,
+ NULL,
+ 0,
+ "avs",
+};
diff --git a/contrib/ffmpeg/libavformat/avs.c b/contrib/ffmpeg/libavformat/avs.c
new file mode 100644
index 000000000..0fa77deff
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avs.c
@@ -0,0 +1,227 @@
+/*
+ * AVS demuxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "voc.h"
+
+
+typedef struct avs_format {
+ voc_dec_context_t voc;
+ AVStream *st_video;
+ AVStream *st_audio;
+ int width;
+ int height;
+ int bits_per_sample;
+ int fps;
+ int nb_frames;
+ int remaining_frame_size;
+ int remaining_audio_size;
+} avs_format_t;
+
+typedef enum avs_block_type {
+ AVS_VIDEO = 0x01,
+ AVS_AUDIO = 0x02,
+ AVS_PALETTE = 0x03,
+ AVS_GAME_DATA = 0x04,
+} avs_block_type_t;
+
+static int avs_probe(AVProbeData * p)
+{
+ const uint8_t *d;
+
+ if (p->buf_size < 2)
+ return 0;
+ d = p->buf;
+ if (d[0] == 'w' && d[1] == 'W' && d[2] == 0x10 && d[3] == 0)
+ return 50;
+
+ return 0;
+}
+
+static int avs_read_header(AVFormatContext * s, AVFormatParameters * ap)
+{
+ avs_format_t *avs = s->priv_data;
+
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+
+ url_fskip(&s->pb, 4);
+ avs->width = get_le16(&s->pb);
+ avs->height = get_le16(&s->pb);
+ avs->bits_per_sample = get_le16(&s->pb);
+ avs->fps = get_le16(&s->pb);
+ avs->nb_frames = get_le32(&s->pb);
+ avs->remaining_frame_size = 0;
+ avs->remaining_audio_size = 0;
+
+ avs->st_video = avs->st_audio = NULL;
+
+ if (avs->width != 318 || avs->height != 198)
+ av_log(s, AV_LOG_ERROR, "This avs pretend to be %dx%d "
+ "when the avs format is supposed to be 318x198 only.\n",
+ avs->width, avs->height);
+
+ return 0;
+}
+
+static int
+avs_read_video_packet(AVFormatContext * s, AVPacket * pkt,
+ avs_block_type_t type, int sub_type, int size,
+ uint8_t * palette, int palette_size)
+{
+ avs_format_t *avs = s->priv_data;
+ int ret;
+
+ ret = av_new_packet(pkt, size + palette_size);
+ if (ret < 0)
+ return ret;
+
+ if (palette_size) {
+ pkt->data[0] = 0x00;
+ pkt->data[1] = 0x03;
+ pkt->data[2] = palette_size & 0xFF;
+ pkt->data[3] = (palette_size >> 8) & 0xFF;
+ memcpy(pkt->data + 4, palette, palette_size - 4);
+ }
+
+ pkt->data[palette_size + 0] = sub_type;
+ pkt->data[palette_size + 1] = type;
+ pkt->data[palette_size + 2] = size & 0xFF;
+ pkt->data[palette_size + 3] = (size >> 8) & 0xFF;
+ ret = get_buffer(&s->pb, pkt->data + palette_size + 4, size - 4) + 4;
+ if (ret < size) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ pkt->size = ret + palette_size;
+ pkt->stream_index = avs->st_video->index;
+ if (sub_type == 0)
+ pkt->flags |= PKT_FLAG_KEY;
+
+ return 0;
+}
+
+static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt)
+{
+ avs_format_t *avs = s->priv_data;
+ int ret, size;
+
+ size = url_ftell(&s->pb);
+ ret = voc_get_packet(s, pkt, avs->st_audio, avs->remaining_audio_size);
+ size = url_ftell(&s->pb) - size;
+ avs->remaining_audio_size -= size;
+
+ if (ret == AVERROR_IO)
+ return 0; /* this indicate EOS */
+ if (ret < 0)
+ return ret;
+
+ pkt->stream_index = avs->st_audio->index;
+ pkt->flags |= PKT_FLAG_KEY;
+
+ return size;
+}
+
+static int avs_read_packet(AVFormatContext * s, AVPacket * pkt)
+{
+ avs_format_t *avs = s->priv_data;
+ int sub_type = 0, size = 0;
+ avs_block_type_t type = 0;
+ int palette_size = 0;
+ uint8_t palette[4 + 3 * 256];
+ int ret;
+
+ if (avs->remaining_audio_size > 0)
+ if (avs_read_audio_packet(s, pkt) > 0)
+ return 0;
+
+ while (1) {
+ if (avs->remaining_frame_size <= 0) {
+ if (!get_le16(&s->pb)) /* found EOF */
+ return AVERROR_IO;
+ avs->remaining_frame_size = get_le16(&s->pb) - 4;
+ }
+
+ while (avs->remaining_frame_size > 0) {
+ sub_type = get_byte(&s->pb);
+ type = get_byte(&s->pb);
+ size = get_le16(&s->pb);
+ avs->remaining_frame_size -= size;
+
+ switch (type) {
+ case AVS_PALETTE:
+ ret = get_buffer(&s->pb, palette, size - 4);
+ if (ret < size - 4)
+ return AVERROR_IO;
+ palette_size = size;
+ break;
+
+ case AVS_VIDEO:
+ if (!avs->st_video) {
+ avs->st_video = av_new_stream(s, AVS_VIDEO);
+ if (avs->st_video == NULL)
+ return AVERROR_NOMEM;
+ avs->st_video->codec->codec_type = CODEC_TYPE_VIDEO;
+ avs->st_video->codec->codec_id = CODEC_ID_AVS;
+ avs->st_video->codec->width = avs->width;
+ avs->st_video->codec->height = avs->height;
+ avs->st_video->codec->bits_per_sample=avs->bits_per_sample;
+ avs->st_video->nb_frames = avs->nb_frames;
+ avs->st_video->codec->time_base = (AVRational) {
+ 1, avs->fps};
+ }
+ return avs_read_video_packet(s, pkt, type, sub_type, size,
+ palette, palette_size);
+
+ case AVS_AUDIO:
+ if (!avs->st_audio) {
+ avs->st_audio = av_new_stream(s, AVS_AUDIO);
+ if (avs->st_audio == NULL)
+ return AVERROR_NOMEM;
+ avs->st_audio->codec->codec_type = CODEC_TYPE_AUDIO;
+ }
+ avs->remaining_audio_size = size - 4;
+ size = avs_read_audio_packet(s, pkt);
+ if (size != 0)
+ return size;
+ break;
+
+ default:
+ url_fskip(&s->pb, size - 4);
+ }
+ }
+ }
+}
+
+static int avs_read_close(AVFormatContext * s)
+{
+ return 0;
+}
+
+AVInputFormat avs_demuxer = {
+ "avs",
+ "avs format",
+ sizeof(avs_format_t),
+ avs_probe,
+ avs_read_header,
+ avs_read_packet,
+ avs_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/barpainet.h b/contrib/ffmpeg/libavformat/barpainet.h
new file mode 100644
index 000000000..b50bf82b6
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/barpainet.h
@@ -0,0 +1,45 @@
+/*
+ * copyright (c) 2002 Francois Revol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef BARPA_INET_H
+#define BARPA_INET_H
+
+#include "config.h"
+
+#ifdef CONFIG_BEOS_NETSERVER
+
+# include <socket.h>
+int inet_aton (const char * str, struct in_addr * add);
+# define PF_INET AF_INET
+# define SO_SNDBUF 0x40000001
+
+/* fake */
+struct ip_mreq {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+#include <netdb.h>
+
+#else
+# include <arpa/inet.h>
+#endif
+
+#endif /* BARPA_INET_H */
diff --git a/contrib/ffmpeg/libavformat/base64.c b/contrib/ffmpeg/libavformat/base64.c
new file mode 100644
index 000000000..6279244d3
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/base64.c
@@ -0,0 +1,231 @@
+/*
+ * Base64.c
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file base64.c
+ * @brief Base64 Encode/Decode
+ * @author Ryan Martell <rdm4@martellventures.com> (with lots of Michael)
+ */
+
+#include "common.h"
+#include "base64.h"
+
+/* ---------------- private code */
+static uint8_t map2[] =
+{
+ 0x3e, 0xff, 0xff, 0xff, 0x3f, 0x34, 0x35, 0x36,
+ 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+ 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1a, 0x1b,
+ 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33
+};
+
+int av_base64_decode(uint8_t * out, const char *in, int out_length)
+{
+ int i, v;
+ uint8_t *dst = out;
+
+ v = 0;
+ for (i = 0; in[i] && in[i] != '='; i++) {
+ unsigned int index= in[i]-43;
+ if (index>=(sizeof(map2)/sizeof(map2[0])) || map2[index] == 0xff)
+ return -1;
+ v = (v << 6) + map2[index];
+ if (i & 3) {
+ if (dst - out < out_length) {
+ *dst++ = v >> (6 - 2 * (i & 3));
+ }
+ }
+ }
+
+ return (dst - out);
+}
+
+/*****************************************************************************
+* b64_encode: stolen from VLC's http.c
+* simplified by michael
+* fixed edge cases and made it work from data (vs. strings) by ryan.
+*****************************************************************************/
+
+char *av_base64_encode(uint8_t * src, int len)
+{
+ static const char b64[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ char *ret, *dst;
+ unsigned i_bits = 0;
+ int i_shift = 0;
+ int bytes_remaining = len;
+
+ if (len < UINT_MAX / 4) {
+ ret = dst = av_malloc(len * 4 / 3 + 12);
+ } else
+ return NULL;
+
+ if (len) { // special edge case, what should we really do here?
+ while (bytes_remaining) {
+ i_bits = (i_bits << 8) + *src++;
+ bytes_remaining--;
+ i_shift += 8;
+
+ do {
+ *dst++ = b64[(i_bits << 6 >> i_shift) & 0x3f];
+ i_shift -= 6;
+ } while (i_shift > 6 || (bytes_remaining == 0 && i_shift > 0));
+ }
+ while ((dst - ret) & 3)
+ *dst++ = '=';
+ }
+ *dst = '\0';
+
+ return ret;
+}
+
+// #define TEST_BASE64
+
+#ifdef TEST_BASE64
+#include "avutil.h"
+
+int b64test()
+{
+ int numerr = 0;
+ int len;
+ int numtest = 1;
+ uint8_t decode[1000];
+ struct test {
+ void *data;
+ int len;
+ const char *result;
+ } *t, tests[] = {
+ {
+ "", 0, ""}, {
+ "1", 1, "MQ=="}, {
+ "22", 2, "MjI="}, {
+ "333", 3, "MzMz"}, {
+ "4444", 4, "NDQ0NA=="}, {
+ "55555", 5, "NTU1NTU="}, {
+ "abc:def", 7, "YWJjOmRlZg=="}, {
+ NULL}
+ };
+ for (t = tests; t->data; t++) {
+ char *str;
+
+ av_log(NULL, AV_LOG_ERROR, "Encoding %s...\n", (char *) t->data);
+ str = av_base64_encode(t->data, t->len);
+ if (str) {
+ av_log(NULL, AV_LOG_ERROR, "Encoded to %s...\n", str);
+ if (strcmp(str, t->result) != 0) {
+ av_log(NULL, AV_LOG_ERROR, "failed test %d: %s != %s\n",
+ numtest, str, t->result);
+ numerr++;
+ }
+ av_free(str);
+ }
+
+ av_log(NULL, AV_LOG_ERROR, "Done encoding, about to decode...\n");
+ len = av_base64_decode(decode, t->result, sizeof(decode));
+ if (len != t->len) {
+ av_log(NULL, AV_LOG_ERROR, "failed test %d: len %d != %d\n",
+ numtest, len, t->len);
+ numerr++;
+ } else if (memcmp(decode, t->data, t->len) != 0) {
+ av_log(NULL, AV_LOG_ERROR, "failed test %d: data\n", numtest);
+ numerr++;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Decoded to %s\n",
+ (char *) t->data);
+ }
+ numtest++;
+ }
+
+#undef srand
+#undef rand
+
+ {
+ int test_count;
+ srand(123141); // time(NULL));
+ for (test_count = 0; test_count < 100; test_count++) {
+ int size = rand() % 1024;
+ int ii;
+ uint8_t *data;
+ char *encoded_result;
+
+ av_log(NULL, AV_LOG_ERROR, "Test %d: Size %d bytes...",
+ test_count, size);
+ data = (uint8_t *) av_malloc(size);
+ for (ii = 0; ii < size; ii++) {
+ data[ii] = rand() % 255;
+ }
+
+ encoded_result = av_base64_encode(data, size);
+ if (encoded_result) {
+ int decode_buffer_size = size + 10; // try without 10 as well
+ uint8_t *decode_buffer = av_malloc(decode_buffer_size);
+ if (decode_buffer) {
+ int decoded_size =
+ av_base64_decode(decode_buffer, encoded_result,
+ decode_buffer_size);
+
+ if (decoded_size != size) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Decoded/Encoded size mismatch (%d != %d)\n",
+ decoded_size, size);
+ } else {
+ if (memcmp(decode_buffer, data, decoded_size) == 0) {
+ av_log(NULL, AV_LOG_ERROR, "Passed!\n");
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "Failed (Data differs)!\n");
+ }
+ }
+ av_free(decode_buffer);
+ }
+
+ av_free(encoded_result);
+ }
+ }
+ }
+
+ // these are invalid strings, that it currently decodes (which it probably shouldn't?)
+ {
+ uint8_t str[32];
+ if (av_base64_decode(str, "M=M=", sizeof(str)) != -1) {
+ av_log(NULL, AV_LOG_ERROR,
+ "failed test %d: successful decode of `M=M='\n",
+ numtest++);
+ numerr++;
+ }
+ if (av_base64_decode(str, "MQ===", sizeof(str)) != -1) {
+ av_log(NULL, AV_LOG_ERROR,
+ "failed test %d: successful decode of `MQ==='\n",
+ numtest++);
+ numerr++;
+ }
+ }
+
+ return numerr;
+}
+#endif
+
diff --git a/contrib/ffmpeg/libavformat/base64.h b/contrib/ffmpeg/libavformat/base64.h
new file mode 100644
index 000000000..03d43afe4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/base64.h
@@ -0,0 +1,24 @@
+/*
+ * Base64.c
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+int av_base64_decode(uint8_t * out, const char *in, int out_length); // param order as strncpy()
+char *av_base64_encode(uint8_t * src, int len); // src is not a string, it's data.
+
diff --git a/contrib/ffmpeg/libavformat/beosaudio.cpp b/contrib/ffmpeg/libavformat/beosaudio.cpp
new file mode 100644
index 000000000..6ac45ebb2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/beosaudio.cpp
@@ -0,0 +1,465 @@
+/*
+ * BeOS audio play interface
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <Application.h>
+#include <SoundPlayer.h>
+
+extern "C" {
+#include "avformat.h"
+}
+
+#ifdef HAVE_BSOUNDRECORDER
+#include <SoundRecorder.h>
+using namespace BPrivate::Media::Experimental;
+#endif
+
+/* enable performance checks */
+//#define PERF_CHECK
+
+/* enable Media Kit latency checks */
+//#define LATENCY_CHECK
+
+#define AUDIO_BLOCK_SIZE 4096
+#define AUDIO_BLOCK_COUNT 8
+
+#define AUDIO_BUFFER_SIZE (AUDIO_BLOCK_SIZE*AUDIO_BLOCK_COUNT)
+
+typedef struct {
+ int fd; // UNUSED
+ int sample_rate;
+ int channels;
+ int frame_size; /* in bytes ! */
+ CodecID codec_id;
+ uint8_t buffer[AUDIO_BUFFER_SIZE];
+ int buffer_ptr;
+ /* ring buffer */
+ sem_id input_sem;
+ int input_index;
+ sem_id output_sem;
+ int output_index;
+ BSoundPlayer *player;
+#ifdef HAVE_BSOUNDRECORDER
+ BSoundRecorder *recorder;
+#endif
+ int has_quit; /* signal callbacks not to wait */
+ volatile bigtime_t starve_time;
+} AudioData;
+
+static thread_id main_thid;
+static thread_id bapp_thid;
+static int own_BApp_created = 0;
+static int refcount = 0;
+
+/* create the BApplication and Run() it */
+static int32 bapp_thread(void *arg)
+{
+ new BApplication("application/x-vnd.ffmpeg");
+ own_BApp_created = 1;
+ be_app->Run();
+ /* kill the process group */
+// kill(0, SIGINT);
+// kill(main_thid, SIGHUP);
+ return B_OK;
+}
+
+/* create the BApplication only if needed */
+static void create_bapp_if_needed(void)
+{
+ if (refcount++ == 0) {
+ /* needed by libmedia */
+ if (be_app == NULL) {
+ bapp_thid = spawn_thread(bapp_thread, "ffmpeg BApplication", B_NORMAL_PRIORITY, NULL);
+ resume_thread(bapp_thid);
+ while (!own_BApp_created)
+ snooze(50000);
+ }
+ }
+}
+
+static void destroy_bapp_if_needed(void)
+{
+ if (--refcount == 0 && own_BApp_created) {
+ be_app->Lock();
+ be_app->Quit();
+ be_app = NULL;
+ }
+}
+
+/* called back by BSoundPlayer */
+static void audioplay_callback(void *cookie, void *buffer, size_t bufferSize, const media_raw_audio_format &format)
+{
+ AudioData *s;
+ size_t len, amount;
+ unsigned char *buf = (unsigned char *)buffer;
+
+ s = (AudioData *)cookie;
+ if (s->has_quit)
+ return;
+ while (bufferSize > 0) {
+#ifdef PERF_CHECK
+ bigtime_t t;
+ t = system_time();
+#endif
+ len = MIN(AUDIO_BLOCK_SIZE, bufferSize);
+ if (acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
+ s->has_quit = 1;
+ s->player->SetHasData(false);
+ return;
+ }
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
+ memcpy(buf, &s->buffer[s->output_index], amount);
+ s->output_index += amount;
+ if (s->output_index >= AUDIO_BUFFER_SIZE) {
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
+ s->output_index += len-amount;
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ }
+ release_sem_etc(s->input_sem, len, 0);
+#ifdef PERF_CHECK
+ t = system_time() - t;
+ s->starve_time = MAX(s->starve_time, t);
+#endif
+ buf += len;
+ bufferSize -= len;
+ }
+}
+
+#ifdef HAVE_BSOUNDRECORDER
+/* called back by BSoundRecorder */
+static void audiorecord_callback(void *cookie, bigtime_t timestamp, void *buffer, size_t bufferSize, const media_multi_audio_format &format)
+{
+ AudioData *s;
+ size_t len, amount;
+ unsigned char *buf = (unsigned char *)buffer;
+
+ s = (AudioData *)cookie;
+ if (s->has_quit)
+ return;
+
+ while (bufferSize > 0) {
+ len = MIN(bufferSize, AUDIO_BLOCK_SIZE);
+ //printf("acquire_sem(input, %d)\n", len);
+ if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
+ s->has_quit = 1;
+ return;
+ }
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
+ memcpy(&s->buffer[s->input_index], buf, amount);
+ s->input_index += amount;
+ if (s->input_index >= AUDIO_BUFFER_SIZE) {
+ s->input_index %= AUDIO_BUFFER_SIZE;
+ memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
+ s->input_index += len - amount;
+ }
+ release_sem_etc(s->output_sem, len, 0);
+ //printf("release_sem(output, %d)\n", len);
+ buf += len;
+ bufferSize -= len;
+ }
+}
+#endif
+
+static int audio_open(AudioData *s, int is_output, const char *audio_device)
+{
+ int p[2];
+ int ret;
+ media_raw_audio_format format;
+ media_multi_audio_format iformat;
+
+#ifndef HAVE_BSOUNDRECORDER
+ if (!is_output)
+ return -EIO; /* not for now */
+#endif
+ s->input_sem = create_sem(AUDIO_BUFFER_SIZE, "ffmpeg_ringbuffer_input");
+ if (s->input_sem < B_OK)
+ return -EIO;
+ s->output_sem = create_sem(0, "ffmpeg_ringbuffer_output");
+ if (s->output_sem < B_OK) {
+ delete_sem(s->input_sem);
+ return -EIO;
+ }
+ s->input_index = 0;
+ s->output_index = 0;
+ create_bapp_if_needed();
+ s->frame_size = AUDIO_BLOCK_SIZE;
+ /* bump up the priority (avoid realtime though) */
+ set_thread_priority(find_thread(NULL), B_DISPLAY_PRIORITY+1);
+#ifdef HAVE_BSOUNDRECORDER
+ if (!is_output) {
+ bool wait_for_input = false;
+ if (audio_device && !strcmp(audio_device, "wait:"))
+ wait_for_input = true;
+ s->recorder = new BSoundRecorder(&iformat, wait_for_input, "ffmpeg input", audiorecord_callback);
+ if (wait_for_input && (s->recorder->InitCheck() == B_OK)) {
+ s->recorder->WaitForIncomingConnection(&iformat);
+ }
+ if (s->recorder->InitCheck() != B_OK || iformat.format != media_raw_audio_format::B_AUDIO_SHORT) {
+ delete s->recorder;
+ s->recorder = NULL;
+ if (s->input_sem)
+ delete_sem(s->input_sem);
+ if (s->output_sem)
+ delete_sem(s->output_sem);
+ return -EIO;
+ }
+ s->codec_id = (iformat.byte_order == B_MEDIA_LITTLE_ENDIAN)?CODEC_ID_PCM_S16LE:CODEC_ID_PCM_S16BE;
+ s->channels = iformat.channel_count;
+ s->sample_rate = (int)iformat.frame_rate;
+ s->frame_size = iformat.buffer_size;
+ s->recorder->SetCookie(s);
+ s->recorder->SetVolume(1.0);
+ s->recorder->Start();
+ return 0;
+ }
+#endif
+ format = media_raw_audio_format::wildcard;
+ format.format = media_raw_audio_format::B_AUDIO_SHORT;
+ format.byte_order = B_HOST_IS_LENDIAN ? B_MEDIA_LITTLE_ENDIAN : B_MEDIA_BIG_ENDIAN;
+ format.channel_count = s->channels;
+ format.buffer_size = s->frame_size;
+ format.frame_rate = s->sample_rate;
+ s->player = new BSoundPlayer(&format, "ffmpeg output", audioplay_callback);
+ if (s->player->InitCheck() != B_OK) {
+ delete s->player;
+ s->player = NULL;
+ if (s->input_sem)
+ delete_sem(s->input_sem);
+ if (s->output_sem)
+ delete_sem(s->output_sem);
+ return -EIO;
+ }
+ s->player->SetCookie(s);
+ s->player->SetVolume(1.0);
+ s->player->Start();
+ s->player->SetHasData(true);
+ return 0;
+}
+
+static int audio_close(AudioData *s)
+{
+ if (s->input_sem)
+ delete_sem(s->input_sem);
+ if (s->output_sem)
+ delete_sem(s->output_sem);
+ s->has_quit = 1;
+ if (s->player) {
+ s->player->Stop();
+ }
+ if (s->player)
+ delete s->player;
+#ifdef HAVE_BSOUNDRECORDER
+ if (s->recorder)
+ delete s->recorder;
+#endif
+ destroy_bapp_if_needed();
+ return 0;
+}
+
+/* sound output support */
+static int audio_write_header(AVFormatContext *s1)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ st = s1->streams[0];
+ s->sample_rate = st->codec->sample_rate;
+ s->channels = st->codec->channels;
+ ret = audio_open(s, 1, NULL);
+ if (ret < 0)
+ return -EIO;
+ return 0;
+}
+
+static int audio_write_packet(AVFormatContext *s1, int stream_index,
+ const uint8_t *buf, int size, int64_t force_pts)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ int len, ret;
+#ifdef LATENCY_CHECK
+bigtime_t lat1, lat2;
+lat1 = s->player->Latency();
+#endif
+#ifdef PERF_CHECK
+ bigtime_t t = s->starve_time;
+ s->starve_time = 0;
+ printf("starve_time: %lld \n", t);
+#endif
+ while (size > 0) {
+ int amount;
+ len = MIN(size, AUDIO_BLOCK_SIZE);
+ if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK)
+ return -EIO;
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
+ memcpy(&s->buffer[s->input_index], buf, amount);
+ s->input_index += amount;
+ if (s->input_index >= AUDIO_BUFFER_SIZE) {
+ s->input_index %= AUDIO_BUFFER_SIZE;
+ memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
+ s->input_index += len - amount;
+ }
+ release_sem_etc(s->output_sem, len, 0);
+ buf += len;
+ size -= len;
+ }
+#ifdef LATENCY_CHECK
+lat2 = s->player->Latency();
+printf("#### BSoundPlayer::Latency(): before= %lld, after= %lld\n", lat1, lat2);
+#endif
+ return 0;
+}
+
+static int audio_write_trailer(AVFormatContext *s1)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+/* grab support */
+
+static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ if (!ap || ap->sample_rate <= 0 || ap->channels <= 0)
+ return -1;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+ s->sample_rate = ap->sample_rate;
+ s->channels = ap->channels;
+
+ ret = audio_open(s, 0, ap->device);
+ if (ret < 0) {
+ av_free(st);
+ return -EIO;
+ }
+ /* take real parameters */
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = s->codec_id;
+ st->codec->sample_rate = s->sample_rate;
+ st->codec->channels = s->channels;
+ return 0;
+ av_set_pts_info(s1, 48, 1, 1000000); /* 48 bits pts in us */
+}
+
+static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ int size;
+ size_t len, amount;
+ unsigned char *buf;
+ status_t err;
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return -EIO;
+ buf = (unsigned char *)pkt->data;
+ size = pkt->size;
+ while (size > 0) {
+ len = MIN(AUDIO_BLOCK_SIZE, size);
+ //printf("acquire_sem(output, %d)\n", len);
+ while ((err=acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL)) == B_INTERRUPTED);
+ if (err < B_OK) {
+ av_free_packet(pkt);
+ return -EIO;
+ }
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
+ memcpy(buf, &s->buffer[s->output_index], amount);
+ s->output_index += amount;
+ if (s->output_index >= AUDIO_BUFFER_SIZE) {
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
+ s->output_index += len-amount;
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ }
+ release_sem_etc(s->input_sem, len, 0);
+ //printf("release_sem(input, %d)\n", len);
+ buf += len;
+ size -= len;
+ }
+ //XXX: add pts info
+ return 0;
+}
+
+static int audio_read_close(AVFormatContext *s1)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+static AVInputFormat audio_demuxer = {
+ "audio_device",
+ "audio grab and output",
+ sizeof(AudioData),
+ NULL,
+ audio_read_header,
+ audio_read_packet,
+ audio_read_close,
+ NULL,
+ AVFMT_NOFILE,
+};
+
+AVOutputFormat audio_muxer = {
+ "audio_device",
+ "audio grab and output",
+ "",
+ "",
+ sizeof(AudioData),
+#ifdef WORDS_BIGENDIAN
+ CODEC_ID_PCM_S16BE,
+#else
+ CODEC_ID_PCM_S16LE,
+#endif
+ CODEC_ID_NONE,
+ audio_write_header,
+ audio_write_packet,
+ audio_write_trailer,
+ AVFMT_NOFILE,
+};
+
+extern "C" {
+
+int audio_init(void)
+{
+ main_thid = find_thread(NULL);
+ av_register_input_format(&audio_demuxer);
+ av_register_output_format(&audio_muxer);
+ return 0;
+}
+
+} // "C"
+
diff --git a/contrib/ffmpeg/libavformat/crc.c b/contrib/ffmpeg/libavformat/crc.c
new file mode 100644
index 000000000..bdbe8bcff
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/crc.c
@@ -0,0 +1,98 @@
+/*
+ * CRC decoder (for codec/format testing)
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "adler32.h"
+
+#ifdef CONFIG_CRC_MUXER
+typedef struct CRCState {
+ uint32_t crcval;
+} CRCState;
+
+static int crc_write_header(struct AVFormatContext *s)
+{
+ CRCState *crc = s->priv_data;
+
+ /* init CRC */
+ crc->crcval = 1;
+
+ return 0;
+}
+
+static int crc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ CRCState *crc = s->priv_data;
+ crc->crcval = av_adler32_update(crc->crcval, pkt->data, pkt->size);
+ return 0;
+}
+
+static int crc_write_trailer(struct AVFormatContext *s)
+{
+ CRCState *crc = s->priv_data;
+ char buf[64];
+
+ snprintf(buf, sizeof(buf), "CRC=0x%08x\n", crc->crcval);
+ put_buffer(&s->pb, buf, strlen(buf));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_FRAMECRC_MUXER
+static int framecrc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ uint32_t crc = av_adler32_update(0, pkt->data, pkt->size);
+ char buf[256];
+
+ snprintf(buf, sizeof(buf), "%d, %"PRId64", %d, 0x%08x\n", pkt->stream_index, pkt->dts, pkt->size, crc);
+ put_buffer(&s->pb, buf, strlen(buf));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_CRC_MUXER
+AVOutputFormat crc_muxer = {
+ "crc",
+ "crc testing format",
+ NULL,
+ "",
+ sizeof(CRCState),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_RAWVIDEO,
+ crc_write_header,
+ crc_write_packet,
+ crc_write_trailer,
+};
+#endif
+#ifdef CONFIG_FRAMECRC_MUXER
+AVOutputFormat framecrc_muxer = {
+ "framecrc",
+ "framecrc testing format",
+ NULL,
+ "",
+ 0,
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_RAWVIDEO,
+ NULL,
+ framecrc_write_packet,
+ NULL,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/cutils.c b/contrib/ffmpeg/libavformat/cutils.c
new file mode 100644
index 000000000..45959ec39
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/cutils.c
@@ -0,0 +1,275 @@
+/*
+ * Various simple utilities for ffmpeg system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#if !defined(CONFIG_NOCUTILS)
+/**
+ * Return TRUE if val is a prefix of str. If it returns TRUE, ptr is
+ * set to the next character in 'str' after the prefix.
+ *
+ * @param str input string
+ * @param val prefix to test
+ * @param ptr updated after the prefix in str in there is a match
+ * @return TRUE if there is a match
+ */
+int strstart(const char *str, const char *val, const char **ptr)
+{
+ const char *p, *q;
+ p = str;
+ q = val;
+ while (*q != '\0') {
+ if (*p != *q)
+ return 0;
+ p++;
+ q++;
+ }
+ if (ptr)
+ *ptr = p;
+ return 1;
+}
+
+/**
+ * Return TRUE if val is a prefix of str (case independent). If it
+ * returns TRUE, ptr is set to the next character in 'str' after the
+ * prefix.
+ *
+ * @param str input string
+ * @param val prefix to test
+ * @param ptr updated after the prefix in str in there is a match
+ * @return TRUE if there is a match */
+int stristart(const char *str, const char *val, const char **ptr)
+{
+ const char *p, *q;
+ p = str;
+ q = val;
+ while (*q != '\0') {
+ if (toupper(*(const unsigned char *)p) != toupper(*(const unsigned char *)q))
+ return 0;
+ p++;
+ q++;
+ }
+ if (ptr)
+ *ptr = p;
+ return 1;
+}
+
+/**
+ * Copy the string str to buf. If str length is bigger than buf_size -
+ * 1 then it is clamped to buf_size - 1.
+ * NOTE: this function does what strncpy should have done to be
+ * useful. NEVER use strncpy.
+ *
+ * @param buf destination buffer
+ * @param buf_size size of destination buffer
+ * @param str source string
+ */
+void pstrcpy(char *buf, int buf_size, const char *str)
+{
+ int c;
+ char *q = buf;
+
+ if (buf_size <= 0)
+ return;
+
+ for(;;) {
+ c = *str++;
+ if (c == 0 || q >= buf + buf_size - 1)
+ break;
+ *q++ = c;
+ }
+ *q = '\0';
+}
+
+/* strcat and truncate. */
+char *pstrcat(char *buf, int buf_size, const char *s)
+{
+ int len;
+ len = strlen(buf);
+ if (len < buf_size)
+ pstrcpy(buf + len, buf_size - len, s);
+ return buf;
+}
+
+#endif
+
+/* add one element to a dynamic array */
+void __dynarray_add(unsigned long **tab_ptr, int *nb_ptr, unsigned long elem)
+{
+ int nb, nb_alloc;
+ unsigned long *tab;
+
+ nb = *nb_ptr;
+ tab = *tab_ptr;
+ if ((nb & (nb - 1)) == 0) {
+ if (nb == 0)
+ nb_alloc = 1;
+ else
+ nb_alloc = nb * 2;
+ tab = av_realloc(tab, nb_alloc * sizeof(unsigned long));
+ *tab_ptr = tab;
+ }
+ tab[nb++] = elem;
+ *nb_ptr = nb;
+}
+
+time_t mktimegm(struct tm *tm)
+{
+ time_t t;
+
+ int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday;
+
+ if (m < 3) {
+ m += 12;
+ y--;
+ }
+
+ t = 86400 *
+ (d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 + y / 400 - 719469);
+
+ t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
+
+ return t;
+}
+
+#define ISLEAP(y) (((y) % 4 == 0) && (((y) % 100) != 0 || ((y) % 400) == 0))
+#define LEAPS_COUNT(y) ((y)/4 - (y)/100 + (y)/400)
+
+/* this is our own gmtime_r. it differs from its POSIX counterpart in a
+ couple of places, though. */
+struct tm *brktimegm(time_t secs, struct tm *tm)
+{
+ int days, y, ny, m;
+ int md[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
+
+ days = secs / 86400;
+ secs %= 86400;
+ tm->tm_hour = secs / 3600;
+ tm->tm_min = (secs % 3600) / 60;
+ tm->tm_sec = secs % 60;
+
+ /* oh well, may be someone some day will invent a formula for this stuff */
+ y = 1970; /* start "guessing" */
+ while (days >= (ISLEAP(y)?366:365)) {
+ ny = (y + days/366);
+ days -= (ny - y) * 365 + LEAPS_COUNT(ny - 1) - LEAPS_COUNT(y - 1);
+ y = ny;
+ }
+ md[1] = ISLEAP(y)?29:28;
+ for (m=0; days >= md[m]; m++)
+ days -= md[m];
+
+ tm->tm_year = y; /* unlike gmtime_r we store complete year here */
+ tm->tm_mon = m+1; /* unlike gmtime_r tm_mon is from 1 to 12 */
+ tm->tm_mday = days+1;
+
+ return tm;
+}
+
+/* get a positive number between n_min and n_max, for a maximum length
+ of len_max. Return -1 if error. */
+static int date_get_num(const char **pp,
+ int n_min, int n_max, int len_max)
+{
+ int i, val, c;
+ const char *p;
+
+ p = *pp;
+ val = 0;
+ for(i = 0; i < len_max; i++) {
+ c = *p;
+ if (!isdigit(c))
+ break;
+ val = (val * 10) + c - '0';
+ p++;
+ }
+ /* no number read ? */
+ if (p == *pp)
+ return -1;
+ if (val < n_min || val > n_max)
+ return -1;
+ *pp = p;
+ return val;
+}
+
+/* small strptime for ffmpeg */
+const char *small_strptime(const char *p, const char *fmt,
+ struct tm *dt)
+{
+ int c, val;
+
+ for(;;) {
+ c = *fmt++;
+ if (c == '\0') {
+ return p;
+ } else if (c == '%') {
+ c = *fmt++;
+ switch(c) {
+ case 'H':
+ val = date_get_num(&p, 0, 23, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_hour = val;
+ break;
+ case 'M':
+ val = date_get_num(&p, 0, 59, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_min = val;
+ break;
+ case 'S':
+ val = date_get_num(&p, 0, 59, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_sec = val;
+ break;
+ case 'Y':
+ val = date_get_num(&p, 0, 9999, 4);
+ if (val == -1)
+ return NULL;
+ dt->tm_year = val - 1900;
+ break;
+ case 'm':
+ val = date_get_num(&p, 1, 12, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_mon = val - 1;
+ break;
+ case 'd':
+ val = date_get_num(&p, 1, 31, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_mday = val;
+ break;
+ case '%':
+ goto match;
+ default:
+ return NULL;
+ }
+ } else {
+ match:
+ if (c != *p)
+ return NULL;
+ p++;
+ }
+ }
+ return p;
+}
+
diff --git a/contrib/ffmpeg/libavformat/daud.c b/contrib/ffmpeg/libavformat/daud.c
new file mode 100644
index 000000000..ec81b7b1c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/daud.c
@@ -0,0 +1,58 @@
+/*
+ * D-Cinema audio demuxer
+ * Copyright (c) 2005 Reimar Döffinger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int daud_header(AVFormatContext *s, AVFormatParameters *ap) {
+ AVStream *st = av_new_stream(s, 0);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S24DAUD;
+ st->codec->codec_tag = MKTAG('d', 'a', 'u', 'd');
+ st->codec->channels = 6;
+ st->codec->sample_rate = 96000;
+ st->codec->bit_rate = 3 * 6 * 96000 * 8;
+ st->codec->block_align = 3 * 6;
+ st->codec->bits_per_sample = 24;
+ return 0;
+}
+
+static int daud_packet(AVFormatContext *s, AVPacket *pkt) {
+ ByteIOContext *pb = &s->pb;
+ int ret, size;
+ if (url_feof(pb))
+ return AVERROR_IO;
+ size = get_be16(pb);
+ get_be16(pb); // unknown
+ ret = av_get_packet(pb, pkt, size);
+ pkt->stream_index = 0;
+ return ret;
+}
+
+AVInputFormat daud_demuxer = {
+ "daud",
+ "D-Cinema audio format",
+ 0,
+ NULL,
+ daud_header,
+ daud_packet,
+ NULL,
+ NULL,
+ .extensions = "302",
+};
diff --git a/contrib/ffmpeg/libavformat/dc1394.c b/contrib/ffmpeg/libavformat/dc1394.c
new file mode 100644
index 000000000..5098c0fdf
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dc1394.c
@@ -0,0 +1,193 @@
+/*
+ * IIDC1394 grab interface (uses libdc1394 and libraw1394)
+ * Copyright (c) 2004 Roman Shaposhnik
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+
+#include <libraw1394/raw1394.h>
+#include <libdc1394/dc1394_control.h>
+
+#undef free
+
+typedef struct dc1394_data {
+ raw1394handle_t handle;
+ dc1394_cameracapture camera;
+ int current_frame;
+ int fps;
+
+ AVPacket packet;
+} dc1394_data;
+
+struct dc1394_frame_format {
+ int width;
+ int height;
+ enum PixelFormat pix_fmt;
+ int frame_size_id;
+} dc1394_frame_formats[] = {
+ { 320, 240, PIX_FMT_UYVY422, MODE_320x240_YUV422 },
+ { 640, 480, PIX_FMT_UYVY411, MODE_640x480_YUV411 },
+ { 640, 480, PIX_FMT_UYVY422, MODE_640x480_YUV422 },
+ { 0, 0, 0, MODE_320x240_YUV422 } /* default -- gotta be the last one */
+};
+
+struct dc1394_frame_rate {
+ int frame_rate;
+ int frame_rate_id;
+} dc1394_frame_rates[] = {
+ { 1875, FRAMERATE_1_875 },
+ { 3750, FRAMERATE_3_75 },
+ { 7500, FRAMERATE_7_5 },
+ { 15000, FRAMERATE_15 },
+ { 30000, FRAMERATE_30 },
+ { 60000, FRAMERATE_60 },
+ { 0, FRAMERATE_30 } /* default -- gotta be the last one */
+};
+
+static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
+{
+ dc1394_data* dc1394 = c->priv_data;
+ AVStream* vst;
+ nodeid_t* camera_nodes;
+ int res;
+ struct dc1394_frame_format *fmt;
+ struct dc1394_frame_rate *fps;
+
+ for (fmt = dc1394_frame_formats; fmt->width; fmt++)
+ if (fmt->pix_fmt == ap->pix_fmt && fmt->width == ap->width && fmt->height == ap->height)
+ break;
+
+ for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
+ if (fps->frame_rate == av_rescale(1000, ap->time_base.den, ap->time_base.num))
+ break;
+
+ /* create a video stream */
+ vst = av_new_stream(c, 0);
+ if (!vst)
+ return -1;
+ av_set_pts_info(vst, 64, 1, 1000);
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_RAWVIDEO;
+ vst->codec->time_base.den = fps->frame_rate;
+ vst->codec->time_base.num = 1000;
+ vst->codec->width = fmt->width;
+ vst->codec->height = fmt->height;
+ vst->codec->pix_fmt = fmt->pix_fmt;
+
+ /* packet init */
+ av_init_packet(&dc1394->packet);
+ dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
+ dc1394->packet.stream_index = vst->index;
+ dc1394->packet.flags |= PKT_FLAG_KEY;
+
+ dc1394->current_frame = 0;
+ dc1394->fps = fps->frame_rate;
+
+ vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
+
+ /* Now lets prep the hardware */
+ dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
+ if (!dc1394->handle) {
+ av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
+ goto out;
+ }
+ camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
+ if (!camera_nodes || camera_nodes[ap->channel] == DC1394_NO_CAMERA) {
+ av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", ap->channel);
+ goto out_handle;
+ }
+ res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
+ 0,
+ FORMAT_VGA_NONCOMPRESSED,
+ fmt->frame_size_id,
+ SPEED_400,
+ fps->frame_rate_id, 8, 1,
+ ap->device,
+ &dc1394->camera);
+ dc1394_free_camera_nodes(camera_nodes);
+ if (res != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
+ goto out_handle;
+ }
+
+ res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
+ if (res != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
+ goto out_handle_dma;
+ }
+
+ return 0;
+
+out_handle_dma:
+ dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
+ dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
+out_handle:
+ dc1394_destroy_handle(dc1394->handle);
+out:
+ return -1;
+}
+
+static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
+{
+ struct dc1394_data *dc1394 = c->priv_data;
+ int res;
+
+ /* discard stale frame */
+ if (dc1394->current_frame++) {
+ if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
+ av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
+ }
+
+ res = dc1394_dma_single_capture(&dc1394->camera);
+
+ if (res == DC1394_SUCCESS) {
+ dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
+ dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->fps;
+ res = dc1394->packet.size;
+ } else {
+ av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
+ dc1394->packet.data = NULL;
+ res = -1;
+ }
+
+ *pkt = dc1394->packet;
+ return res;
+}
+
+static int dc1394_close(AVFormatContext * context)
+{
+ struct dc1394_data *dc1394 = context->priv_data;
+
+ dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
+ dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
+ dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
+ dc1394_destroy_handle(dc1394->handle);
+
+ return 0;
+}
+
+AVInputFormat dc1394_demuxer = {
+ .name = "dc1394",
+ .long_name = "dc1394 A/V grab",
+ .priv_data_size = sizeof(struct dc1394_data),
+ .read_header = dc1394_read_header,
+ .read_packet = dc1394_read_packet,
+ .read_close = dc1394_close,
+ .flags = AVFMT_NOFILE
+};
diff --git a/contrib/ffmpeg/libavformat/dsicin.c b/contrib/ffmpeg/libavformat/dsicin.c
new file mode 100644
index 000000000..f274eadf8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dsicin.c
@@ -0,0 +1,224 @@
+/*
+ * Delphine Software International CIN File Demuxer
+ * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file dsicin.c
+ * Delphine Software International CIN file demuxer
+ */
+
+#include "avformat.h"
+
+
+typedef struct CinFileHeader {
+ int video_frame_size;
+ int video_frame_width;
+ int video_frame_height;
+ int audio_frequency;
+ int audio_bits;
+ int audio_stereo;
+ int audio_frame_size;
+} CinFileHeader;
+
+typedef struct CinFrameHeader {
+ int audio_frame_type;
+ int video_frame_type;
+ int pal_colors_count;
+ int audio_frame_size;
+ int video_frame_size;
+} CinFrameHeader;
+
+typedef struct CinDemuxContext {
+ int audio_stream_index;
+ int video_stream_index;
+ CinFileHeader file_header;
+ int64_t audio_stream_pts;
+ int64_t video_stream_pts;
+ CinFrameHeader frame_header;
+ int audio_buffer_size;
+} CinDemuxContext;
+
+
+static int cin_probe(AVProbeData *p)
+{
+ if (p->buf_size < 18)
+ return 0;
+
+ /* header starts with this special marker */
+ if (LE_32(&p->buf[0]) != 0x55AA0000)
+ return 0;
+
+ /* for accuracy, check some header field values */
+ if (LE_32(&p->buf[12]) != 22050 || p->buf[16] != 16 || p->buf[17] != 0)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int cin_read_file_header(CinDemuxContext *cin, ByteIOContext *pb) {
+ CinFileHeader *hdr = &cin->file_header;
+
+ if (get_le32(pb) != 0x55AA0000)
+ return AVERROR_INVALIDDATA;
+
+ hdr->video_frame_size = get_le32(pb);
+ hdr->video_frame_width = get_le16(pb);
+ hdr->video_frame_height = get_le16(pb);
+ hdr->audio_frequency = get_le32(pb);
+ hdr->audio_bits = get_byte(pb);
+ hdr->audio_stereo = get_byte(pb);
+ hdr->audio_frame_size = get_le16(pb);
+
+ if (hdr->audio_frequency != 22050 || hdr->audio_bits != 16 || hdr->audio_stereo != 0)
+ return AVERROR_INVALIDDATA;
+
+ return 0;
+}
+
+static int cin_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ int rc;
+ CinDemuxContext *cin = (CinDemuxContext *)s->priv_data;
+ CinFileHeader *hdr = &cin->file_header;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+ rc = cin_read_file_header(cin, pb);
+ if (rc)
+ return rc;
+
+ cin->video_stream_pts = 0;
+ cin->audio_stream_pts = 0;
+ cin->audio_buffer_size = 0;
+
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, 12);
+ cin->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DSICINVIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = hdr->video_frame_width;
+ st->codec->height = hdr->video_frame_height;
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, 22050);
+ cin->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_DSICINAUDIO;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = 1;
+ st->codec->sample_rate = 22050;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample * st->codec->channels;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ return 0;
+}
+
+static int cin_read_frame_header(CinDemuxContext *cin, ByteIOContext *pb) {
+ CinFrameHeader *hdr = &cin->frame_header;
+
+ hdr->video_frame_type = get_byte(pb);
+ hdr->audio_frame_type = get_byte(pb);
+ hdr->pal_colors_count = get_le16(pb);
+ hdr->video_frame_size = get_le32(pb);
+ hdr->audio_frame_size = get_le32(pb);
+
+ if (url_feof(pb) || url_ferror(pb))
+ return AVERROR_IO;
+
+ if (get_le32(pb) != 0xAA55AA55)
+ return AVERROR_INVALIDDATA;
+
+ return 0;
+}
+
+static int cin_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ CinDemuxContext *cin = (CinDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ CinFrameHeader *hdr = &cin->frame_header;
+ int rc, palette_type, pkt_size;
+
+ if (cin->audio_buffer_size == 0) {
+ rc = cin_read_frame_header(cin, pb);
+ if (rc)
+ return rc;
+
+ if ((int16_t)hdr->pal_colors_count < 0) {
+ hdr->pal_colors_count = -(int16_t)hdr->pal_colors_count;
+ palette_type = 1;
+ } else {
+ palette_type = 0;
+ }
+
+ /* palette and video packet */
+ pkt_size = (palette_type + 3) * hdr->pal_colors_count + hdr->video_frame_size;
+
+ if (av_new_packet(pkt, 4 + pkt_size))
+ return AVERROR_NOMEM;
+
+ pkt->stream_index = cin->video_stream_index;
+ pkt->pts = cin->video_stream_pts++;
+
+ pkt->data[0] = palette_type;
+ pkt->data[1] = hdr->pal_colors_count & 0xFF;
+ pkt->data[2] = hdr->pal_colors_count >> 8;
+ pkt->data[3] = hdr->video_frame_type;
+
+ if (get_buffer(pb, &pkt->data[4], pkt_size) != pkt_size)
+ return AVERROR_IO;
+
+ /* sound buffer will be processed on next read_packet() call */
+ cin->audio_buffer_size = hdr->audio_frame_size;
+ return 0;
+ }
+
+ /* audio packet */
+ if (av_new_packet(pkt, cin->audio_buffer_size))
+ return AVERROR_NOMEM;
+
+ pkt->stream_index = cin->audio_stream_index;
+ pkt->pts = cin->audio_stream_pts;
+ cin->audio_stream_pts += cin->audio_buffer_size * 2 / cin->file_header.audio_frame_size;
+
+ if (get_buffer(pb, pkt->data, cin->audio_buffer_size) != cin->audio_buffer_size)
+ return AVERROR_IO;
+
+ cin->audio_buffer_size = 0;
+ return 0;
+}
+
+AVInputFormat dsicin_demuxer = {
+ "dsicin",
+ "Delphine Software International CIN format",
+ sizeof(CinDemuxContext),
+ cin_probe,
+ cin_read_header,
+ cin_read_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/dv.c b/contrib/ffmpeg/libavformat/dv.c
new file mode 100644
index 000000000..3ff8a3fe2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv.c
@@ -0,0 +1,451 @@
+/*
+ * General DV muxer/demuxer
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
+ * of DV technical info.
+ *
+ * Raw DV format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * 50 Mbps (DVCPRO50) support
+ * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <time.h>
+#include "avformat.h"
+#include "dvdata.h"
+#include "dv.h"
+
+struct DVDemuxContext {
+ const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */
+ AVFormatContext* fctx;
+ AVStream* vst;
+ AVStream* ast[2];
+ AVPacket audio_pkt[2];
+ uint8_t audio_buf[2][8192];
+ int ach;
+ int frames;
+ uint64_t abytes;
+};
+
+static inline uint16_t dv_audio_12to16(uint16_t sample)
+{
+ uint16_t shift, result;
+
+ sample = (sample < 0x800) ? sample : sample | 0xf000;
+ shift = (sample & 0xf00) >> 8;
+
+ if (shift < 0x2 || shift > 0xd) {
+ result = sample;
+ } else if (shift < 0x8) {
+ shift--;
+ result = (sample - (256 * shift)) << shift;
+ } else {
+ shift = 0xe - shift;
+ result = ((sample + ((256 * shift) + 1)) << shift) - 1;
+ }
+
+ return result;
+}
+
+/*
+ * This is the dumbest implementation of all -- it simply looks at
+ * a fixed offset and if pack isn't there -- fails. We might want
+ * to have a fallback mechanism for complete search of missing packs.
+ */
+static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t)
+{
+ int offs;
+
+ switch (t) {
+ case dv_audio_source:
+ offs = (80*6 + 80*16*3 + 3);
+ break;
+ case dv_audio_control:
+ offs = (80*6 + 80*16*4 + 3);
+ break;
+ case dv_video_control:
+ offs = (80*5 + 48 + 5);
+ break;
+ default:
+ return NULL;
+ }
+
+ return (frame[offs] == t ? &frame[offs] : NULL);
+}
+
+/*
+ * There's a couple of assumptions being made here:
+ * 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples.
+ * We can pass them upwards when ffmpeg will be ready to deal with them.
+ * 2. We don't do software emphasis.
+ * 3. Audio is always returned as 16bit linear samples: 12bit nonlinear samples
+ * are converted into 16bit linear ones.
+ */
+static int dv_extract_audio(uint8_t* frame, uint8_t* pcm, uint8_t* pcm2,
+ const DVprofile *sys)
+{
+ int size, chan, i, j, d, of, smpls, freq, quant, half_ch;
+ uint16_t lc, rc;
+ const uint8_t* as_pack;
+
+ as_pack = dv_extract_pack(frame, dv_audio_source);
+ if (!as_pack) /* No audio ? */
+ return 0;
+
+ smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
+ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
+ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
+
+ if (quant > 1)
+ return -1; /* Unsupported quantization */
+
+ size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
+ half_ch = sys->difseg_size/2;
+
+ /* for each DIF channel */
+ for (chan = 0; chan < sys->n_difchan; chan++) {
+ /* for each DIF segment */
+ for (i = 0; i < sys->difseg_size; i++) {
+ frame += 6 * 80; /* skip DIF segment header */
+ if (quant == 1 && i == half_ch) {
+ /* next stereo channel (12bit mode only) */
+ if (!pcm2)
+ break;
+ else
+ pcm = pcm2;
+ }
+
+ /* for each AV sequence */
+ for (j = 0; j < 9; j++) {
+ for (d = 8; d < 80; d += 2) {
+ if (quant == 0) { /* 16bit quantization */
+ of = sys->audio_shuffle[i][j] + (d - 8)/2 * sys->audio_stride;
+ if (of*2 >= size)
+ continue;
+
+ pcm[of*2] = frame[d+1]; // FIXME: may be we have to admit
+ pcm[of*2+1] = frame[d]; // that DV is a big endian PCM
+ if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00)
+ pcm[of*2+1] = 0;
+ } else { /* 12bit quantization */
+ lc = ((uint16_t)frame[d] << 4) |
+ ((uint16_t)frame[d+2] >> 4);
+ rc = ((uint16_t)frame[d+1] << 4) |
+ ((uint16_t)frame[d+2] & 0x0f);
+ lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc));
+ rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc));
+
+ of = sys->audio_shuffle[i%half_ch][j] + (d - 8)/3 * sys->audio_stride;
+ if (of*2 >= size)
+ continue;
+
+ pcm[of*2] = lc & 0xff; // FIXME: may be we have to admit
+ pcm[of*2+1] = lc >> 8; // that DV is a big endian PCM
+ of = sys->audio_shuffle[i%half_ch+half_ch][j] +
+ (d - 8)/3 * sys->audio_stride;
+ pcm[of*2] = rc & 0xff; // FIXME: may be we have to admit
+ pcm[of*2+1] = rc >> 8; // that DV is a big endian PCM
+ ++d;
+ }
+ }
+
+ frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
+ }
+ }
+
+ /* next stereo channel (50Mbps only) */
+ if(!pcm2)
+ break;
+ pcm = pcm2;
+ }
+
+ return size;
+}
+
+static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame)
+{
+ const uint8_t* as_pack;
+ int freq, stype, smpls, quant, i, ach;
+
+ as_pack = dv_extract_pack(frame, dv_audio_source);
+ if (!as_pack || !c->sys) { /* No audio ? */
+ c->ach = 0;
+ return 0;
+ }
+
+ smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
+ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
+ stype = (as_pack[3] & 0x1f); /* 0 - 2CH, 2 - 4CH */
+ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
+
+ /* note: ach counts PAIRS of channels (i.e. stereo channels) */
+ ach = (stype == 2 || (quant && (freq == 2))) ? 2 : 1;
+
+ /* Dynamic handling of the audio streams in DV */
+ for (i=0; i<ach; i++) {
+ if (!c->ast[i]) {
+ c->ast[i] = av_new_stream(c->fctx, 0);
+ if (!c->ast[i])
+ break;
+ av_set_pts_info(c->ast[i], 64, 1, 30000);
+ c->ast[i]->codec->codec_type = CODEC_TYPE_AUDIO;
+ c->ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE;
+
+ av_init_packet(&c->audio_pkt[i]);
+ c->audio_pkt[i].size = 0;
+ c->audio_pkt[i].data = c->audio_buf[i];
+ c->audio_pkt[i].stream_index = c->ast[i]->index;
+ c->audio_pkt[i].flags |= PKT_FLAG_KEY;
+ }
+ c->ast[i]->codec->sample_rate = dv_audio_frequency[freq];
+ c->ast[i]->codec->channels = 2;
+ c->ast[i]->codec->bit_rate = 2 * dv_audio_frequency[freq] * 16;
+ c->ast[i]->start_time = 0;
+ }
+ c->ach = i;
+
+ return (c->sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */;
+}
+
+static int dv_extract_video_info(DVDemuxContext *c, uint8_t* frame)
+{
+ const uint8_t* vsc_pack;
+ AVCodecContext* avctx;
+ int apt, is16_9;
+ int size = 0;
+
+ if (c->sys) {
+ avctx = c->vst->codec;
+
+ av_set_pts_info(c->vst, 64, c->sys->frame_rate_base, c->sys->frame_rate);
+ avctx->time_base= (AVRational){c->sys->frame_rate_base, c->sys->frame_rate};
+ if(!avctx->width){
+ avctx->width = c->sys->width;
+ avctx->height = c->sys->height;
+ }
+ avctx->pix_fmt = c->sys->pix_fmt;
+
+ /* finding out SAR is a little bit messy */
+ vsc_pack = dv_extract_pack(frame, dv_video_control);
+ apt = frame[4] & 0x07;
+ is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 ||
+ (!apt && (vsc_pack[2] & 0x07) == 0x07)));
+ avctx->sample_aspect_ratio = c->sys->sar[is16_9];
+ avctx->bit_rate = av_rescale(c->sys->frame_size * 8,
+ c->sys->frame_rate,
+ c->sys->frame_rate_base);
+ size = c->sys->frame_size;
+ }
+ return size;
+}
+
+/*
+ * The following 3 functions constitute our interface to the world
+ */
+
+DVDemuxContext* dv_init_demux(AVFormatContext *s)
+{
+ DVDemuxContext *c;
+
+ c = av_mallocz(sizeof(DVDemuxContext));
+ if (!c)
+ return NULL;
+
+ c->vst = av_new_stream(s, 0);
+ if (!c->vst) {
+ av_free(c);
+ return NULL;
+ }
+
+ c->sys = NULL;
+ c->fctx = s;
+ c->ast[0] = c->ast[1] = NULL;
+ c->ach = 0;
+ c->frames = 0;
+ c->abytes = 0;
+
+ c->vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ c->vst->codec->codec_id = CODEC_ID_DVVIDEO;
+ c->vst->codec->bit_rate = 25000000;
+ c->vst->start_time = 0;
+
+ return c;
+}
+
+int dv_get_packet(DVDemuxContext *c, AVPacket *pkt)
+{
+ int size = -1;
+ int i;
+
+ for (i=0; i<c->ach; i++) {
+ if (c->ast[i] && c->audio_pkt[i].size) {
+ *pkt = c->audio_pkt[i];
+ c->audio_pkt[i].size = 0;
+ size = pkt->size;
+ break;
+ }
+ }
+
+ return size;
+}
+
+int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
+ uint8_t* buf, int buf_size)
+{
+ int size, i;
+
+ if (buf_size < DV_PROFILE_BYTES ||
+ !(c->sys = dv_frame_profile(buf)) ||
+ buf_size < c->sys->frame_size) {
+ return -1; /* Broken frame, or not enough data */
+ }
+
+ /* Queueing audio packet */
+ /* FIXME: in case of no audio/bad audio we have to do something */
+ size = dv_extract_audio_info(c, buf);
+ for (i=0; i<c->ach; i++) {
+ c->audio_pkt[i].size = size;
+ c->audio_pkt[i].pts = c->abytes * 30000*8 / c->ast[i]->codec->bit_rate;
+ }
+ dv_extract_audio(buf, c->audio_buf[0], c->audio_buf[1], c->sys);
+ c->abytes += size;
+
+ /* Now it's time to return video packet */
+ size = dv_extract_video_info(c, buf);
+ av_init_packet(pkt);
+ pkt->data = buf;
+ pkt->size = size;
+ pkt->flags |= PKT_FLAG_KEY;
+ pkt->stream_index = c->vst->id;
+ pkt->pts = c->frames;
+
+ c->frames++;
+
+ return size;
+}
+
+static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c,
+ int64_t timestamp, int flags)
+{
+ // FIXME: sys may be wrong if last dv_read_packet() failed (buffer is junk)
+ const DVprofile* sys = dv_codec_profile(c->vst->codec);
+ int64_t offset;
+ int64_t size = url_fsize(&s->pb);
+ int64_t max_offset = ((size-1) / sys->frame_size) * sys->frame_size;
+
+ offset = sys->frame_size * timestamp;
+
+ if (offset > max_offset) offset = max_offset;
+ else if (offset < 0) offset = 0;
+
+ return offset;
+}
+
+void dv_flush_audio_packets(DVDemuxContext *c)
+{
+ c->audio_pkt[0].size = c->audio_pkt[1].size = 0;
+}
+
+/************************************************************
+ * Implementation of the easiest DV storage of all -- raw DV.
+ ************************************************************/
+
+typedef struct RawDVContext {
+ DVDemuxContext* dv_demux;
+ uint8_t buf[DV_MAX_FRAME_SIZE];
+} RawDVContext;
+
+static int dv_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RawDVContext *c = s->priv_data;
+
+ c->dv_demux = dv_init_demux(s);
+ if (!c->dv_demux)
+ return -1;
+
+ if (get_buffer(&s->pb, c->buf, DV_PROFILE_BYTES) <= 0 ||
+ url_fseek(&s->pb, -DV_PROFILE_BYTES, SEEK_CUR) < 0)
+ return AVERROR_IO;
+
+ c->dv_demux->sys = dv_frame_profile(c->buf);
+ s->bit_rate = av_rescale(c->dv_demux->sys->frame_size * 8,
+ c->dv_demux->sys->frame_rate,
+ c->dv_demux->sys->frame_rate_base);
+
+ return 0;
+}
+
+
+static int dv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int size;
+ RawDVContext *c = s->priv_data;
+
+ size = dv_get_packet(c->dv_demux, pkt);
+
+ if (size < 0) {
+ size = c->dv_demux->sys->frame_size;
+ if (get_buffer(&s->pb, c->buf, size) <= 0)
+ return AVERROR_IO;
+
+ size = dv_produce_packet(c->dv_demux, pkt, c->buf, size);
+ }
+
+ return size;
+}
+
+static int dv_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
+{
+ RawDVContext *r = s->priv_data;
+ DVDemuxContext *c = r->dv_demux;
+ int64_t offset= dv_frame_offset(s, c, timestamp, flags);
+
+ c->frames= offset / c->sys->frame_size;
+ if (c->ach)
+ c->abytes= av_rescale(c->frames,
+ c->ast[0]->codec->bit_rate * (int64_t)c->sys->frame_rate_base,
+ 8*c->sys->frame_rate);
+
+ dv_flush_audio_packets(c);
+ return url_fseek(&s->pb, offset, SEEK_SET);
+}
+
+static int dv_read_close(AVFormatContext *s)
+{
+ RawDVContext *c = s->priv_data;
+ av_free(c->dv_demux);
+ return 0;
+}
+
+#ifdef CONFIG_DV_DEMUXER
+AVInputFormat dv_demuxer = {
+ "dv",
+ "DV video format",
+ sizeof(RawDVContext),
+ NULL,
+ dv_read_header,
+ dv_read_packet,
+ dv_read_close,
+ dv_read_seek,
+ .extensions = "dv,dif",
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/dv.h b/contrib/ffmpeg/libavformat/dv.h
new file mode 100644
index 000000000..f39d22c9f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv.h
@@ -0,0 +1,37 @@
+/*
+ * General DV muxer/demuxer
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
+ * of DV technical info.
+ *
+ * Raw DV format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct DVDemuxContext DVDemuxContext;
+DVDemuxContext* dv_init_demux(AVFormatContext* s);
+int dv_get_packet(DVDemuxContext*, AVPacket *);
+int dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int);
+void dv_flush_audio_packets(DVDemuxContext*);
+
+typedef struct DVMuxContext DVMuxContext;
+DVMuxContext* dv_init_mux(AVFormatContext* s);
+int dv_assemble_frame(DVMuxContext *c, AVStream*, const uint8_t*, int, uint8_t**);
+void dv_delete_mux(DVMuxContext*);
diff --git a/contrib/ffmpeg/libavformat/dv1394.c b/contrib/ffmpeg/libavformat/dv1394.c
new file mode 100644
index 000000000..f00d47435
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv1394.c
@@ -0,0 +1,240 @@
+/*
+ * Linux DV1394 interface
+ * Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/poll.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include "avformat.h"
+
+#undef DV1394_DEBUG
+
+#include "dv1394.h"
+#include "dv.h"
+
+struct dv1394_data {
+ int fd;
+ int channel;
+ int format;
+
+ void *ring; /* Ring buffer */
+ int index; /* Current frame index */
+ int avail; /* Number of frames available for reading */
+ int done; /* Number of completed frames */
+
+ DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
+};
+
+/*
+ * The trick here is to kludge around well known problem with kernel Ooopsing
+ * when you try to capture PAL on a device node configure for NTSC. That's
+ * why we have to configure the device node for PAL, and then read only NTSC
+ * amount of data.
+ */
+static int dv1394_reset(struct dv1394_data *dv)
+{
+ struct dv1394_init init;
+
+ init.channel = dv->channel;
+ init.api_version = DV1394_API_VERSION;
+ init.n_frames = DV1394_RING_FRAMES;
+ init.format = DV1394_PAL;
+
+ if (ioctl(dv->fd, DV1394_INIT, &init) < 0)
+ return -1;
+
+ dv->avail = dv->done = 0;
+ return 0;
+}
+
+static int dv1394_start(struct dv1394_data *dv)
+{
+ /* Tell DV1394 driver to enable receiver */
+ if (ioctl(dv->fd, DV1394_START_RECEIVE, 0) < 0) {
+ perror("Failed to start receiver");
+ return -1;
+ }
+ return 0;
+}
+
+static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
+{
+ struct dv1394_data *dv = context->priv_data;
+ const char *video_device;
+
+ dv->dv_demux = dv_init_demux(context);
+ if (!dv->dv_demux)
+ goto failed;
+
+ if (ap->standard && !strcasecmp(ap->standard, "pal"))
+ dv->format = DV1394_PAL;
+ else
+ dv->format = DV1394_NTSC;
+
+ if (ap->channel)
+ dv->channel = ap->channel;
+ else
+ dv->channel = DV1394_DEFAULT_CHANNEL;
+
+ /* Open and initialize DV1394 device */
+ video_device = ap->device;
+ if (!video_device)
+ video_device = "/dev/dv1394/0";
+ dv->fd = open(video_device, O_RDONLY);
+ if (dv->fd < 0) {
+ perror("Failed to open DV interface");
+ goto failed;
+ }
+
+ if (dv1394_reset(dv) < 0) {
+ perror("Failed to initialize DV interface");
+ goto failed;
+ }
+
+ dv->ring = mmap(NULL, DV1394_PAL_FRAME_SIZE * DV1394_RING_FRAMES,
+ PROT_READ, MAP_PRIVATE, dv->fd, 0);
+ if (dv->ring == MAP_FAILED) {
+ perror("Failed to mmap DV ring buffer");
+ goto failed;
+ }
+
+ if (dv1394_start(dv) < 0)
+ goto failed;
+
+ return 0;
+
+failed:
+ close(dv->fd);
+ return AVERROR_IO;
+}
+
+static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
+{
+ struct dv1394_data *dv = context->priv_data;
+ int size;
+
+ size = dv_get_packet(dv->dv_demux, pkt);
+ if (size > 0)
+ return size;
+
+ if (!dv->avail) {
+ struct dv1394_status s;
+ struct pollfd p;
+
+ if (dv->done) {
+ /* Request more frames */
+ if (ioctl(dv->fd, DV1394_RECEIVE_FRAMES, dv->done) < 0) {
+ /* This usually means that ring buffer overflowed.
+ * We have to reset :(.
+ */
+
+ av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
+
+ dv1394_reset(dv);
+ dv1394_start(dv);
+ }
+ dv->done = 0;
+ }
+
+ /* Wait until more frames are available */
+restart_poll:
+ p.fd = dv->fd;
+ p.events = POLLIN | POLLERR | POLLHUP;
+ if (poll(&p, 1, -1) < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ goto restart_poll;
+ perror("Poll failed");
+ return AVERROR_IO;
+ }
+
+ if (ioctl(dv->fd, DV1394_GET_STATUS, &s) < 0) {
+ perror("Failed to get status");
+ return AVERROR_IO;
+ }
+#ifdef DV1394_DEBUG
+ av_log(context, AV_LOG_DEBUG, "DV1394: status\n"
+ "\tactive_frame\t%d\n"
+ "\tfirst_clear_frame\t%d\n"
+ "\tn_clear_frames\t%d\n"
+ "\tdropped_frames\t%d\n",
+ s.active_frame, s.first_clear_frame,
+ s.n_clear_frames, s.dropped_frames);
+#endif
+
+ dv->avail = s.n_clear_frames;
+ dv->index = s.first_clear_frame;
+ dv->done = 0;
+
+ if (s.dropped_frames) {
+ av_log(context, AV_LOG_ERROR, "DV1394: Frame drop detected (%d). Reseting ..\n",
+ s.dropped_frames);
+
+ dv1394_reset(dv);
+ dv1394_start(dv);
+ }
+ }
+
+#ifdef DV1394_DEBUG
+ av_log(context, AV_LOG_DEBUG, "index %d, avail %d, done %d\n", dv->index, dv->avail,
+ dv->done);
+#endif
+
+ size = dv_produce_packet(dv->dv_demux, pkt,
+ dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
+ DV1394_PAL_FRAME_SIZE);
+ dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
+ dv->done++; dv->avail--;
+
+ return size;
+}
+
+static int dv1394_close(AVFormatContext * context)
+{
+ struct dv1394_data *dv = context->priv_data;
+
+ /* Shutdown DV1394 receiver */
+ if (ioctl(dv->fd, DV1394_SHUTDOWN, 0) < 0)
+ perror("Failed to shutdown DV1394");
+
+ /* Unmap ring buffer */
+ if (munmap(dv->ring, DV1394_NTSC_FRAME_SIZE * DV1394_RING_FRAMES) < 0)
+ perror("Failed to munmap DV1394 ring buffer");
+
+ close(dv->fd);
+ av_free(dv->dv_demux);
+
+ return 0;
+}
+
+AVInputFormat dv1394_demuxer = {
+ .name = "dv1394",
+ .long_name = "dv1394 A/V grab",
+ .priv_data_size = sizeof(struct dv1394_data),
+ .read_header = dv1394_read_header,
+ .read_packet = dv1394_read_packet,
+ .read_close = dv1394_close,
+ .flags = AVFMT_NOFILE
+};
diff --git a/contrib/ffmpeg/libavformat/dv1394.h b/contrib/ffmpeg/libavformat/dv1394.h
new file mode 100644
index 000000000..f7db40108
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv1394.h
@@ -0,0 +1,357 @@
+/*
+ * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
+ * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
+ * receive, proc_fs by Dan Dennedy <dan@dennedy.org>
+ *
+ * based on:
+ * video1394.h - driver for OHCI 1394 boards
+ * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _DV_1394_H
+#define _DV_1394_H
+
+#define DV1394_DEFAULT_CHANNEL 63
+#define DV1394_DEFAULT_CARD 0
+#define DV1394_RING_FRAMES 20
+
+#define DV1394_WIDTH 720
+#define DV1394_NTSC_HEIGHT 480
+#define DV1394_PAL_HEIGHT 576
+
+/* This is the public user-space interface. Try not to break it. */
+
+#define DV1394_API_VERSION 0x20011127
+
+/* ********************
+ ** **
+ ** DV1394 API **
+ ** **
+ ********************
+
+ There are two methods of operating the DV1394 DV output device.
+
+ 1)
+
+ The simplest is an interface based on write(): simply write
+ full DV frames of data to the device, and they will be transmitted
+ as quickly as possible. The FD may be set for non-blocking I/O,
+ in which case you can use select() or poll() to wait for output
+ buffer space.
+
+ To set the DV output parameters (e.g. whether you want NTSC or PAL
+ video), use the DV1394_INIT ioctl, passing in the parameters you
+ want in a struct dv1394_init.
+
+ Example 1:
+ To play a raw .DV file: cat foo.DV > /dev/dv1394
+ (cat will use write() internally)
+
+ Example 2:
+ static struct dv1394_init init = {
+ 0x63, (broadcast channel)
+ 4, (four-frame ringbuffer)
+ DV1394_NTSC, (send NTSC video)
+ 0, 0 (default empty packet rate)
+ }
+
+ ioctl(fd, DV1394_INIT, &init);
+
+ while(1) {
+ read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
+ write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
+ }
+
+ 2)
+
+ For more control over buffering, and to avoid unnecessary copies
+ of the DV data, you can use the more sophisticated the mmap() interface.
+ First, call the DV1394_INIT ioctl to specify your parameters,
+ including the number of frames in the ringbuffer. Then, calling mmap()
+ on the dv1394 device will give you direct access to the ringbuffer
+ from which the DV card reads your frame data.
+
+ The ringbuffer is simply one large, contiguous region of memory
+ containing two or more frames of packed DV data. Each frame of DV data
+ is 120000 bytes (NTSC) or 144000 bytes (PAL).
+
+ Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
+ ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
+ or select()/poll() to wait until the frames are transmitted. Next, you'll
+ need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
+ frames are clear (ready to be filled with new DV data). Finally, use
+ DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
+
+
+ Example: here is what a four-frame ringbuffer might look like
+ during DV transmission:
+
+
+ frame 0 frame 1 frame 2 frame 3
+
+ *--------------------------------------*
+ | CLEAR | DV data | DV data | CLEAR |
+ *--------------------------------------*
+ <ACTIVE>
+
+ transmission goes in this direction --->>>
+
+
+ The DV hardware is currently transmitting the data in frame 1.
+ Once frame 1 is finished, it will automatically transmit frame 2.
+ (if frame 2 finishes before frame 3 is submitted, the device
+ will continue to transmit frame 2, and will increase the dropped_frames
+ counter each time it repeats the transmission).
+
+
+ If you called DV1394_GET_STATUS at this instant, you would
+ receive the following values:
+
+ n_frames = 4
+ active_frame = 1
+ first_clear_frame = 3
+ n_clear_frames = 2
+
+ At this point, you should write new DV data into frame 3 and optionally
+ frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
+ it may transmit the new frames.
+
+ ERROR HANDLING
+
+ An error (buffer underflow/overflow or a break in the DV stream due
+ to a 1394 bus reset) can be detected by checking the dropped_frames
+ field of struct dv1394_status (obtained through the
+ DV1394_GET_STATUS ioctl).
+
+ The best way to recover from such an error is to re-initialize
+ dv1394, either by using the DV1394_INIT ioctl call, or closing the
+ file descriptor and opening it again. (note that you must unmap all
+ ringbuffer mappings when closing the file descriptor, or else
+ dv1394 will still be considered 'in use').
+
+ MAIN LOOP
+
+ For maximum efficiency and robustness against bus errors, you are
+ advised to model the main loop of your application after the
+ following pseudo-code example:
+
+ (checks of system call return values omitted for brevity; always
+ check return values in your code!)
+
+ while( frames left ) {
+
+ struct pollfd *pfd = ...;
+
+ pfd->fd = dv1394_fd;
+ pfd->revents = 0;
+ pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
+
+ (add other sources of I/O here)
+
+ poll(pfd, 1, -1); (or select(); add a timeout if you want)
+
+ if(pfd->revents) {
+ struct dv1394_status status;
+
+ ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
+
+ if(status.dropped_frames > 0) {
+ reset_dv1394();
+ } else {
+ for(int i = 0; i < status.n_clear_frames; i++) {
+ copy_DV_frame();
+ }
+ }
+ }
+ }
+
+ where copy_DV_frame() reads or writes on the dv1394 file descriptor
+ (read/write mode) or copies data to/from the mmap ringbuffer and
+ then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
+ frames are availble (mmap mode).
+
+ reset_dv1394() is called in the event of a buffer
+ underflow/overflow or a halt in the DV stream (e.g. due to a 1394
+ bus reset). To guarantee recovery from the error, this function
+ should close the dv1394 file descriptor (and munmap() all
+ ringbuffer mappings, if you are using them), then re-open the
+ dv1394 device (and re-map the ringbuffer).
+
+*/
+
+
+/* maximum number of frames in the ringbuffer */
+#define DV1394_MAX_FRAMES 32
+
+/* number of *full* isochronous packets per DV frame */
+#define DV1394_NTSC_PACKETS_PER_FRAME 250
+#define DV1394_PAL_PACKETS_PER_FRAME 300
+
+/* size of one frame's worth of DV data, in bytes */
+#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
+#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
+
+
+/* ioctl() commands */
+
+enum {
+ /* I don't like using 0 as a valid ioctl() */
+ DV1394_INVALID = 0,
+
+
+ /* get the driver ready to transmit video.
+ pass a struct dv1394_init* as the parameter (see below),
+ or NULL to get default parameters */
+ DV1394_INIT,
+
+
+ /* stop transmitting video and free the ringbuffer */
+ DV1394_SHUTDOWN,
+
+
+ /* submit N new frames to be transmitted, where
+ the index of the first new frame is first_clear_buffer,
+ and the index of the last new frame is
+ (first_clear_buffer + N) % n_frames */
+ DV1394_SUBMIT_FRAMES,
+
+
+ /* block until N buffers are clear (pass N as the parameter)
+ Because we re-transmit the last frame on underrun, there
+ will at most be n_frames - 1 clear frames at any time */
+ DV1394_WAIT_FRAMES,
+
+ /* capture new frames that have been received, where
+ the index of the first new frame is first_clear_buffer,
+ and the index of the last new frame is
+ (first_clear_buffer + N) % n_frames */
+ DV1394_RECEIVE_FRAMES,
+
+
+ DV1394_START_RECEIVE,
+
+
+ /* pass a struct dv1394_status* as the parameter (see below) */
+ DV1394_GET_STATUS,
+};
+
+
+
+enum pal_or_ntsc {
+ DV1394_NTSC = 0,
+ DV1394_PAL
+};
+
+
+
+
+/* this is the argument to DV1394_INIT */
+struct dv1394_init {
+ /* DV1394_API_VERSION */
+ unsigned int api_version;
+
+ /* isochronous transmission channel to use */
+ unsigned int channel;
+
+ /* number of frames in the ringbuffer. Must be at least 2
+ and at most DV1394_MAX_FRAMES. */
+ unsigned int n_frames;
+
+ /* send/receive PAL or NTSC video format */
+ enum pal_or_ntsc format;
+
+ /* the following are used only for transmission */
+
+ /* set these to zero unless you want a
+ non-default empty packet rate (see below) */
+ unsigned long cip_n;
+ unsigned long cip_d;
+
+ /* set this to zero unless you want a
+ non-default SYT cycle offset (default = 3 cycles) */
+ unsigned int syt_offset;
+};
+
+/* NOTE: you may only allocate the DV frame ringbuffer once each time
+ you open the dv1394 device. DV1394_INIT will fail if you call it a
+ second time with different 'n_frames' or 'format' arguments (which
+ would imply a different size for the ringbuffer). If you need a
+ different buffer size, simply close and re-open the device, then
+ initialize it with your new settings. */
+
+/* Q: What are cip_n and cip_d? */
+
+/*
+ A: DV video streams do not utilize 100% of the potential bandwidth offered
+ by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
+ DV devices must periodically insert empty packets into the 1394 data stream.
+ Typically there is one empty packet per 14-16 data-carrying packets.
+
+ Some DV devices will accept a wide range of empty packet rates, while others
+ require a precise rate. If the dv1394 driver produces empty packets at
+ a rate that your device does not accept, you may see ugly patterns on the
+ DV output, or even no output at all.
+
+ The default empty packet insertion rate seems to work for many people; if
+ your DV output is stable, you can simply ignore this discussion. However,
+ we have exposed the empty packet rate as a parameter to support devices that
+ do not work with the default rate.
+
+ The decision to insert an empty packet is made with a numerator/denominator
+ algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
+ You can alter the empty packet rate by passing non-zero values for cip_n
+ and cip_d to the INIT ioctl.
+
+ */
+
+
+
+struct dv1394_status {
+ /* this embedded init struct returns the current dv1394
+ parameters in use */
+ struct dv1394_init init;
+
+ /* the ringbuffer frame that is currently being
+ displayed. (-1 if the device is not transmitting anything) */
+ int active_frame;
+
+ /* index of the first buffer (ahead of active_frame) that
+ is ready to be filled with data */
+ unsigned int first_clear_frame;
+
+ /* how many buffers, including first_clear_buffer, are
+ ready to be filled with data */
+ unsigned int n_clear_frames;
+
+ /* how many times the DV stream has underflowed, overflowed,
+ or otherwise encountered an error, since the previous call
+ to DV1394_GET_STATUS */
+ unsigned int dropped_frames;
+
+ /* N.B. The dropped_frames counter is only a lower bound on the actual
+ number of dropped frames, with the special case that if dropped_frames
+ is zero, then it is guaranteed that NO frames have been dropped
+ since the last call to DV1394_GET_STATUS.
+ */
+};
+
+
+#endif /* _DV_1394_H */
diff --git a/contrib/ffmpeg/libavformat/dvenc.c b/contrib/ffmpeg/libavformat/dvenc.c
new file mode 100644
index 000000000..79cee7af6
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dvenc.c
@@ -0,0 +1,407 @@
+/*
+ * General DV muxer/demuxer
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
+ * of DV technical info.
+ *
+ * Raw DV format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * 50 Mbps (DVCPRO50) support
+ * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <time.h>
+#include "avformat.h"
+#include "dvdata.h"
+#include "dv.h"
+#include "fifo.h"
+
+struct DVMuxContext {
+ const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */
+ int n_ast; /* Number of stereo audio streams (up to 2) */
+ AVStream *ast[2]; /* Stereo audio streams */
+ AVFifoBuffer audio_data[2]; /* Fifo for storing excessive amounts of PCM */
+ int frames; /* Number of a current frame */
+ time_t start_time; /* Start time of recording */
+ int has_audio; /* frame under contruction has audio */
+ int has_video; /* frame under contruction has video */
+ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */
+};
+
+static const int dv_aaux_packs_dist[12][9] = {
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+};
+
+static int dv_audio_frame_size(const DVprofile* sys, int frame)
+{
+ return sys->audio_samples_dist[frame % (sizeof(sys->audio_samples_dist)/
+ sizeof(sys->audio_samples_dist[0]))];
+}
+
+static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf)
+{
+ struct tm tc;
+ time_t ct;
+ int ltc_frame;
+
+ buf[0] = (uint8_t)pack_id;
+ switch (pack_id) {
+ case dv_timecode:
+ ct = (time_t)(c->frames / ((float)c->sys->frame_rate /
+ (float)c->sys->frame_rate_base));
+ brktimegm(ct, &tc);
+ /*
+ * LTC drop-frame frame counter drops two frames (0 and 1) every
+ * minute, unless it is exactly divisible by 10
+ */
+ ltc_frame = (c->frames + 2*ct/60 - 2*ct/600) % c->sys->ltc_divisor;
+ buf[1] = (0 << 7) | /* Color fame: 0 - unsync; 1 - sync mode */
+ (1 << 6) | /* Drop frame timecode: 0 - nondrop; 1 - drop */
+ ((ltc_frame / 10) << 4) | /* Tens of frames */
+ (ltc_frame % 10); /* Units of frames */
+ buf[2] = (1 << 7) | /* Biphase mark polarity correction: 0 - even; 1 - odd */
+ ((tc.tm_sec / 10) << 4) | /* Tens of seconds */
+ (tc.tm_sec % 10); /* Units of seconds */
+ buf[3] = (1 << 7) | /* Binary group flag BGF0 */
+ ((tc.tm_min / 10) << 4) | /* Tens of minutes */
+ (tc.tm_min % 10); /* Units of minutes */
+ buf[4] = (1 << 7) | /* Binary group flag BGF2 */
+ (1 << 6) | /* Binary group flag BGF1 */
+ ((tc.tm_hour / 10) << 4) | /* Tens of hours */
+ (tc.tm_hour % 10); /* Units of hours */
+ break;
+ case dv_audio_source: /* AAUX source pack */
+ buf[1] = (0 << 7) | /* locked mode */
+ (1 << 6) | /* reserved -- always 1 */
+ (dv_audio_frame_size(c->sys, c->frames) -
+ c->sys->audio_min_samples[0]);
+ /* # of samples */
+ buf[2] = (0 << 7) | /* multi-stereo */
+ (0 << 5) | /* #of audio channels per block: 0 -- 1 channel */
+ (0 << 4) | /* pair bit: 0 -- one pair of channels */
+ 0; /* audio mode */
+ buf[3] = (1 << 7) | /* res */
+ (1 << 6) | /* multi-language flag */
+ (c->sys->dsf << 5) | /* system: 60fields/50fields */
+ (c->sys->n_difchan & 2); /* definition: 0 -- 25Mbps, 2 -- 50Mbps */
+ buf[4] = (1 << 7) | /* emphasis: 1 -- off */
+ (0 << 6) | /* emphasis time constant: 0 -- reserved */
+ (0 << 3) | /* frequency: 0 -- 48Khz, 1 -- 44,1Khz, 2 -- 32Khz */
+ 0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */
+ break;
+ case dv_audio_control:
+ buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */
+ (1 << 4) | /* input source: 1 -- digital input */
+ (3 << 2) | /* compression: 3 -- no information */
+ 0; /* misc. info/SMPTE emphasis off */
+ buf[2] = (1 << 7) | /* recording start point: 1 -- no */
+ (1 << 6) | /* recording end point: 1 -- no */
+ (1 << 3) | /* recording mode: 1 -- original */
+ 7;
+ buf[3] = (1 << 7) | /* direction: 1 -- forward */
+ (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0x20 : /* speed */
+ c->sys->ltc_divisor*4);
+ buf[4] = (1 << 7) | /* reserved -- always 1 */
+ 0x7f; /* genre category */
+ break;
+ case dv_audio_recdate:
+ case dv_video_recdate: /* VAUX recording date */
+ ct = c->start_time + (time_t)(c->frames /
+ ((float)c->sys->frame_rate / (float)c->sys->frame_rate_base));
+ brktimegm(ct, &tc);
+ buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */
+ /* 0xff is very likely to be "unknown" */
+ buf[2] = (3 << 6) | /* reserved -- always 1 */
+ ((tc.tm_mday / 10) << 4) | /* Tens of day */
+ (tc.tm_mday % 10); /* Units of day */
+ buf[3] = /* we set high 4 bits to 0, shouldn't we set them to week? */
+ ((tc.tm_mon / 10) << 4) | /* Tens of month */
+ (tc.tm_mon % 10); /* Units of month */
+ buf[4] = (((tc.tm_year % 100) / 10) << 4) | /* Tens of year */
+ (tc.tm_year % 10); /* Units of year */
+ break;
+ case dv_audio_rectime: /* AAUX recording time */
+ case dv_video_rectime: /* VAUX recording time */
+ ct = c->start_time + (time_t)(c->frames /
+ ((float)c->sys->frame_rate / (float)c->sys->frame_rate_base));
+ brktimegm(ct, &tc);
+ buf[1] = (3 << 6) | /* reserved -- always 1 */
+ 0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */
+ buf[2] = (1 << 7) | /* reserved -- always 1 */
+ ((tc.tm_sec / 10) << 4) | /* Tens of seconds */
+ (tc.tm_sec % 10); /* Units of seconds */
+ buf[3] = (1 << 7) | /* reserved -- always 1 */
+ ((tc.tm_min / 10) << 4) | /* Tens of minutes */
+ (tc.tm_min % 10); /* Units of minutes */
+ buf[4] = (3 << 6) | /* reserved -- always 1 */
+ ((tc.tm_hour / 10) << 4) | /* Tens of hours */
+ (tc.tm_hour % 10); /* Units of hours */
+ break;
+ default:
+ buf[1] = buf[2] = buf[3] = buf[4] = 0xff;
+ }
+ return 5;
+}
+
+static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr)
+{
+ int i, j, d, of, size;
+ size = 4 * dv_audio_frame_size(c->sys, c->frames);
+ frame_ptr += channel * c->sys->difseg_size * 150 * 80;
+ for (i = 0; i < c->sys->difseg_size; i++) {
+ frame_ptr += 6 * 80; /* skip DIF segment header */
+ for (j = 0; j < 9; j++) {
+ dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3]);
+ for (d = 8; d < 80; d+=2) {
+ of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride;
+ if (of*2 >= size)
+ continue;
+
+ frame_ptr[d] = av_fifo_peek(&c->audio_data[channel], of*2+1); // FIXME: may be we have to admit
+ frame_ptr[d+1] = av_fifo_peek(&c->audio_data[channel], of*2); // that DV is a big endian PCM
+ }
+ frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
+ }
+ }
+}
+
+static void dv_inject_metadata(DVMuxContext *c, uint8_t* frame)
+{
+ int j, k;
+ uint8_t* buf;
+
+ for (buf = frame; buf < frame + c->sys->frame_size; buf += 150 * 80) {
+ /* DV subcode: 2nd and 3d DIFs */
+ for (j = 80; j < 80 * 3; j += 80) {
+ for (k = 6; k < 6 * 8; k += 8)
+ dv_write_pack(dv_timecode, c, &buf[j+k]);
+
+ if (((long)(buf-frame)/(c->sys->frame_size/(c->sys->difseg_size*c->sys->n_difchan))%c->sys->difseg_size) > 5) { /* FIXME: is this really needed ? */
+ dv_write_pack(dv_video_recdate, c, &buf[j+14]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+22]);
+ dv_write_pack(dv_video_recdate, c, &buf[j+38]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+46]);
+ }
+ }
+
+ /* DV VAUX: 4th, 5th and 6th 3DIFs */
+ for (j = 80*3 + 3; j < 80*6; j += 80) {
+ dv_write_pack(dv_video_recdate, c, &buf[j+5*2]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+5*3]);
+ dv_write_pack(dv_video_recdate, c, &buf[j+5*11]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+5*12]);
+ }
+ }
+}
+
+/*
+ * The following 3 functions constitute our interface to the world
+ */
+
+int dv_assemble_frame(DVMuxContext *c, AVStream* st,
+ const uint8_t* data, int data_size, uint8_t** frame)
+{
+ int i, reqasize;
+
+ *frame = &c->frame_buf[0];
+ reqasize = 4 * dv_audio_frame_size(c->sys, c->frames);
+
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ /* FIXME: we have to have more sensible approach than this one */
+ if (c->has_video)
+ av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames);
+
+ memcpy(*frame, data, c->sys->frame_size);
+ c->has_video = 1;
+ break;
+ case CODEC_TYPE_AUDIO:
+ for (i = 0; i < c->n_ast && st != c->ast[i]; i++);
+
+ /* FIXME: we have to have more sensible approach than this one */
+ if (av_fifo_size(&c->audio_data[i]) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE)
+ av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
+ av_fifo_write(&c->audio_data[i], data, data_size);
+
+ /* Lets see if we've got enough audio for one DV frame */
+ c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i);
+
+ break;
+ default:
+ break;
+ }
+
+ /* Lets see if we have enough data to construct one DV frame */
+ if (c->has_video == 1 && c->has_audio + 1 == 1<<c->n_ast) {
+ dv_inject_metadata(c, *frame);
+ for (i=0; i<c->n_ast; i++) {
+ dv_inject_audio(c, i, *frame);
+ av_fifo_drain(&c->audio_data[i], reqasize);
+ }
+
+ c->has_video = 0;
+ c->has_audio = 0;
+ c->frames++;
+
+ return c->sys->frame_size;
+ }
+
+ return 0;
+}
+
+DVMuxContext* dv_init_mux(AVFormatContext* s)
+{
+ DVMuxContext *c = (DVMuxContext *)s->priv_data;
+ AVStream *vst = NULL;
+ int i;
+
+ /* we support at most 1 video and 2 audio streams */
+ if (s->nb_streams > 3)
+ return NULL;
+
+ c->n_ast = 0;
+ c->ast[0] = c->ast[1] = NULL;
+
+ /* We have to sort out where audio and where video stream is */
+ for (i=0; i<s->nb_streams; i++) {
+ switch (s->streams[i]->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ vst = s->streams[i];
+ break;
+ case CODEC_TYPE_AUDIO:
+ c->ast[c->n_ast++] = s->streams[i];
+ break;
+ default:
+ goto bail_out;
+ }
+ }
+
+ /* Some checks -- DV format is very picky about its incoming streams */
+ if (!vst || vst->codec->codec_id != CODEC_ID_DVVIDEO)
+ goto bail_out;
+ for (i=0; i<c->n_ast; i++) {
+ if (c->ast[i] && (c->ast[i]->codec->codec_id != CODEC_ID_PCM_S16LE ||
+ c->ast[i]->codec->sample_rate != 48000 ||
+ c->ast[i]->codec->channels != 2))
+ goto bail_out;
+ }
+ c->sys = dv_codec_profile(vst->codec);
+ if (!c->sys)
+ goto bail_out;
+
+ if((c->n_ast > 1) && (c->sys->n_difchan < 2)) {
+ /* only 1 stereo pair is allowed in 25Mbps mode */
+ goto bail_out;
+ }
+
+ /* Ok, everything seems to be in working order */
+ c->frames = 0;
+ c->has_audio = 0;
+ c->has_video = 0;
+ c->start_time = (time_t)s->timestamp;
+
+ for (i=0; i<c->n_ast; i++) {
+ if (c->ast[i] && av_fifo_init(&c->audio_data[i], 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) < 0) {
+ while (i>0) {
+ i--;
+ av_fifo_free(&c->audio_data[i]);
+ }
+ goto bail_out;
+ }
+ }
+
+ return c;
+
+bail_out:
+ return NULL;
+}
+
+void dv_delete_mux(DVMuxContext *c)
+{
+ int i;
+ for (i=0; i < c->n_ast; i++)
+ av_fifo_free(&c->audio_data[i]);
+}
+
+#ifdef CONFIG_MUXERS
+static int dv_write_header(AVFormatContext *s)
+{
+ if (!dv_init_mux(s)) {
+ av_log(s, AV_LOG_ERROR, "Can't initialize DV format!\n"
+ "Make sure that you supply exactly two streams:\n"
+ " video: 25fps or 29.97fps, audio: 2ch/48Khz/PCM\n"
+ " (50Mbps allows an optional second audio stream)\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ uint8_t* frame;
+ int fsize;
+
+ fsize = dv_assemble_frame((DVMuxContext *)s->priv_data, s->streams[pkt->stream_index],
+ pkt->data, pkt->size, &frame);
+ if (fsize > 0) {
+ put_buffer(&s->pb, frame, fsize);
+ put_flush_packet(&s->pb);
+ }
+ return 0;
+}
+
+/*
+ * We might end up with some extra A/V data without matching counterpart.
+ * E.g. video data without enough audio to write the complete frame.
+ * Currently we simply drop the last frame. I don't know whether this
+ * is the best strategy of all
+ */
+static int dv_write_trailer(struct AVFormatContext *s)
+{
+ dv_delete_mux((DVMuxContext *)s->priv_data);
+ return 0;
+}
+#endif /* CONFIG_MUXERS */
+
+#ifdef CONFIG_DV_MUXER
+AVOutputFormat dv_muxer = {
+ "dv",
+ "DV video format",
+ NULL,
+ "dv",
+ sizeof(DVMuxContext),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_DVVIDEO,
+ dv_write_header,
+ dv_write_packet,
+ dv_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/electronicarts.c b/contrib/ffmpeg/libavformat/electronicarts.c
new file mode 100644
index 000000000..943f75b42
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/electronicarts.c
@@ -0,0 +1,291 @@
+/* Electronic Arts Multimedia File Demuxer
+ * Copyright (c) 2004 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file electronicarts.c
+ * Electronic Arts Multimedia file demuxer (WVE/UV2/etc.)
+ * by Robin Kay (komadori at gekkou.co.uk)
+ */
+
+#include "avformat.h"
+
+#define SCHl_TAG MKTAG('S', 'C', 'H', 'l')
+#define PT00_TAG MKTAG('P', 'T', 0x0, 0x0)
+#define SCDl_TAG MKTAG('S', 'C', 'D', 'l')
+#define pIQT_TAG MKTAG('p', 'I', 'Q', 'T')
+#define SCEl_TAG MKTAG('S', 'C', 'E', 'l')
+#define _TAG MKTAG('', '', '', '')
+
+#define EA_SAMPLE_RATE 22050
+#define EA_BITS_PER_SAMPLE 16
+#define EA_PREAMBLE_SIZE 8
+
+typedef struct EaDemuxContext {
+ int width;
+ int height;
+ int video_stream_index;
+ int track_count;
+
+ int audio_stream_index;
+ int audio_frame_counter;
+
+ int64_t audio_pts;
+ int64_t video_pts;
+ int video_pts_inc;
+ float fps;
+
+ int num_channels;
+ int num_samples;
+ int compression_type;
+} EaDemuxContext;
+
+static uint32_t read_arbitary(ByteIOContext *pb) {
+ uint8_t size, byte;
+ int i;
+ uint32_t word;
+
+ size = get_byte(pb);
+
+ word = 0;
+ for (i = 0; i < size; i++) {
+ byte = get_byte(pb);
+ word <<= 8;
+ word |= byte;
+ }
+
+ return word;
+}
+
+/*
+ * Process WVE file header
+ * Returns 1 if the WVE file is valid and successfully opened, 0 otherwise
+ */
+static int process_ea_header(AVFormatContext *s) {
+ int inHeader;
+ uint32_t blockid, size;
+ EaDemuxContext *ea = (EaDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ if (get_buffer(pb, (void*)&blockid, 4) != 4) {
+ return 0;
+ }
+ if (le2me_32(blockid) != SCHl_TAG) {
+ return 0;
+ }
+
+ if (get_buffer(pb, (void*)&size, 4) != 4) {
+ return 0;
+ }
+ size = le2me_32(size);
+
+ if (get_buffer(pb, (void*)&blockid, 4) != 4) {
+ return 0;
+ }
+ if (le2me_32(blockid) != PT00_TAG) {
+ av_log (s, AV_LOG_ERROR, "PT header missing\n");
+ return 0;
+ }
+
+ inHeader = 1;
+ while (inHeader) {
+ int inSubheader;
+ uint8_t byte;
+ byte = get_byte(pb) & 0xFF;
+
+ switch (byte) {
+ case 0xFD:
+ av_log (s, AV_LOG_INFO, "entered audio subheader\n");
+ inSubheader = 1;
+ while (inSubheader) {
+ uint8_t subbyte;
+ subbyte = get_byte(pb) & 0xFF;
+
+ switch (subbyte) {
+ case 0x82:
+ ea->num_channels = read_arbitary(pb);
+ av_log (s, AV_LOG_INFO, "num_channels (element 0x82) set to 0x%08x\n", ea->num_channels);
+ break;
+ case 0x83:
+ ea->compression_type = read_arbitary(pb);
+ av_log (s, AV_LOG_INFO, "compression_type (element 0x83) set to 0x%08x\n", ea->compression_type);
+ break;
+ case 0x85:
+ ea->num_samples = read_arbitary(pb);
+ av_log (s, AV_LOG_INFO, "num_samples (element 0x85) set to 0x%08x\n", ea->num_samples);
+ break;
+ case 0x8A:
+ av_log (s, AV_LOG_INFO, "element 0x%02x set to 0x%08x\n", subbyte, read_arbitary(pb));
+ av_log (s, AV_LOG_INFO, "exited audio subheader\n");
+ inSubheader = 0;
+ break;
+ default:
+ av_log (s, AV_LOG_INFO, "element 0x%02x set to 0x%08x\n", subbyte, read_arbitary(pb));
+ break;
+ }
+ }
+ break;
+ case 0xFF:
+ av_log (s, AV_LOG_INFO, "end of header block reached\n");
+ inHeader = 0;
+ break;
+ default:
+ av_log (s, AV_LOG_INFO, "header element 0x%02x set to 0x%08x\n", byte, read_arbitary(pb));
+ break;
+ }
+ }
+
+ if ((ea->num_channels != 2) || (ea->compression_type != 7)) {
+ av_log (s, AV_LOG_ERROR, "unsupported stream type\n");
+ return 0;
+ }
+
+ /* skip to the start of the data */
+ url_fseek(pb, size, SEEK_SET);
+
+ return 1;
+}
+
+
+static int ea_probe(AVProbeData *p)
+{
+ if (p->buf_size < 4)
+ return 0;
+
+ if (LE_32(&p->buf[0]) != SCHl_TAG)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int ea_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ EaDemuxContext *ea = (EaDemuxContext *)s->priv_data;
+ AVStream *st;
+
+ if (!process_ea_header(s))
+ return AVERROR_IO;
+
+#if 0
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ ea->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_EA_MJPEG;
+ st->codec->codec_tag = 0; /* no fourcc */
+#endif
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, EA_SAMPLE_RATE);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ADPCM_EA;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = ea->num_channels;
+ st->codec->sample_rate = EA_SAMPLE_RATE;
+ st->codec->bits_per_sample = EA_BITS_PER_SAMPLE;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample / 4;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ ea->audio_stream_index = st->index;
+ ea->audio_frame_counter = 0;
+
+ return 1;
+}
+
+static int ea_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ EaDemuxContext *ea = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = 0;
+ int packet_read = 0;
+ unsigned char preamble[EA_PREAMBLE_SIZE];
+ unsigned int chunk_type, chunk_size;
+
+ while (!packet_read) {
+
+ if (get_buffer(pb, preamble, EA_PREAMBLE_SIZE) != EA_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ chunk_type = LE_32(&preamble[0]);
+ chunk_size = LE_32(&preamble[4]) - EA_PREAMBLE_SIZE;
+
+ switch (chunk_type) {
+ /* audio data */
+ case SCDl_TAG:
+ ret = av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ ret = AVERROR_IO;
+ else {
+ pkt->stream_index = ea->audio_stream_index;
+ pkt->pts = 90000;
+ pkt->pts *= ea->audio_frame_counter;
+ pkt->pts /= EA_SAMPLE_RATE;
+
+ /* 2 samples/byte, 1 or 2 samples per frame depending
+ * on stereo; chunk also has 12-byte header */
+ ea->audio_frame_counter += ((chunk_size - 12) * 2) /
+ ea->num_channels;
+ }
+
+ packet_read = 1;
+ break;
+
+ /* ending tag */
+ case SCEl_TAG:
+ ret = AVERROR_IO;
+ packet_read = 1;
+ break;
+
+ default:
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+ }
+
+ /* ending packet */
+ if (chunk_type == SCEl_TAG) {
+ }
+ }
+
+ return ret;
+}
+
+static int ea_read_close(AVFormatContext *s)
+{
+// EaDemuxContext *ea = (EaDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat ea_demuxer = {
+ "ea",
+ "Electronic Arts Multimedia Format",
+ sizeof(EaDemuxContext),
+ ea_probe,
+ ea_read_header,
+ ea_read_packet,
+ ea_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/ffm.c b/contrib/ffmpeg/libavformat/ffm.c
new file mode 100644
index 000000000..539b45d5f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ffm.c
@@ -0,0 +1,792 @@
+/*
+ * FFM (ffserver live feed) muxer and demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+
+/* The FFM file is made of blocks of fixed size */
+#define FFM_HEADER_SIZE 14
+#define PACKET_ID 0x666d
+
+/* each packet contains frames (which can span several packets */
+#define FRAME_HEADER_SIZE 8
+#define FLAG_KEY_FRAME 0x01
+
+typedef struct FFMStream {
+ int64_t pts;
+} FFMStream;
+
+enum {
+ READ_HEADER,
+ READ_DATA,
+};
+
+typedef struct FFMContext {
+ /* only reading mode */
+ offset_t write_index, file_size;
+ int read_state;
+ uint8_t header[FRAME_HEADER_SIZE];
+
+ /* read and write */
+ int first_packet; /* true if first packet, needed to set the discontinuity tag */
+ int first_frame_in_packet; /* true if first frame in packet, needed to know if PTS information is valid */
+ int packet_size;
+ int frame_offset;
+ int64_t pts;
+ uint8_t *packet_ptr, *packet_end;
+ uint8_t packet[FFM_PACKET_SIZE];
+} FFMContext;
+
+static int64_t get_pts(AVFormatContext *s, offset_t pos);
+
+/* disable pts hack for testing */
+int ffm_nopts = 0;
+
+#ifdef CONFIG_MUXERS
+static void flush_packet(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ int fill_size, h;
+ ByteIOContext *pb = &s->pb;
+
+ fill_size = ffm->packet_end - ffm->packet_ptr;
+ memset(ffm->packet_ptr, 0, fill_size);
+
+ if (url_ftell(pb) % ffm->packet_size)
+ av_abort();
+
+ /* put header */
+ put_be16(pb, PACKET_ID);
+ put_be16(pb, fill_size);
+ put_be64(pb, ffm->pts);
+ h = ffm->frame_offset;
+ if (ffm->first_packet)
+ h |= 0x8000;
+ put_be16(pb, h);
+ put_buffer(pb, ffm->packet, ffm->packet_end - ffm->packet);
+ put_flush_packet(pb);
+
+ /* prepare next packet */
+ ffm->frame_offset = 0; /* no key frame */
+ ffm->pts = 0; /* no pts */
+ ffm->packet_ptr = ffm->packet;
+ ffm->first_packet = 0;
+}
+
+/* 'first' is true if first data of a frame */
+static void ffm_write_data(AVFormatContext *s,
+ const uint8_t *buf, int size,
+ int64_t pts, int first)
+{
+ FFMContext *ffm = s->priv_data;
+ int len;
+
+ if (first && ffm->frame_offset == 0)
+ ffm->frame_offset = ffm->packet_ptr - ffm->packet + FFM_HEADER_SIZE;
+ if (first && ffm->pts == 0)
+ ffm->pts = pts;
+
+ /* write as many packets as needed */
+ while (size > 0) {
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (len > size)
+ len = size;
+ memcpy(ffm->packet_ptr, buf, len);
+
+ ffm->packet_ptr += len;
+ buf += len;
+ size -= len;
+ if (ffm->packet_ptr >= ffm->packet_end) {
+ /* special case : no pts in packet : we leave the current one */
+ if (ffm->pts == 0)
+ ffm->pts = pts;
+
+ flush_packet(s);
+ }
+ }
+}
+
+static int ffm_write_header(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ AVStream *st;
+ FFMStream *fst;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *codec;
+ int bit_rate, i;
+
+ ffm->packet_size = FFM_PACKET_SIZE;
+
+ /* header */
+ put_le32(pb, MKTAG('F', 'F', 'M', '1'));
+ put_be32(pb, ffm->packet_size);
+ /* XXX: store write position in other file ? */
+ put_be64(pb, ffm->packet_size); /* current write position */
+
+ put_be32(pb, s->nb_streams);
+ bit_rate = 0;
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ bit_rate += st->codec->bit_rate;
+ }
+ put_be32(pb, bit_rate);
+
+ /* list of streams */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ fst = av_mallocz(sizeof(FFMStream));
+ if (!fst)
+ goto fail;
+ av_set_pts_info(st, 64, 1, 1000000);
+ st->priv_data = fst;
+
+ codec = st->codec;
+ /* generic info */
+ put_be32(pb, codec->codec_id);
+ put_byte(pb, codec->codec_type);
+ put_be32(pb, codec->bit_rate);
+ put_be32(pb, st->quality);
+ put_be32(pb, codec->flags);
+ put_be32(pb, codec->flags2);
+ put_be32(pb, codec->debug);
+ /* specific info */
+ switch(codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ put_be32(pb, codec->time_base.num);
+ put_be32(pb, codec->time_base.den);
+ put_be16(pb, codec->width);
+ put_be16(pb, codec->height);
+ put_be16(pb, codec->gop_size);
+ put_be32(pb, codec->pix_fmt);
+ put_byte(pb, codec->qmin);
+ put_byte(pb, codec->qmax);
+ put_byte(pb, codec->max_qdiff);
+ put_be16(pb, (int) (codec->qcompress * 10000.0));
+ put_be16(pb, (int) (codec->qblur * 10000.0));
+ put_be32(pb, codec->bit_rate_tolerance);
+ put_strz(pb, codec->rc_eq);
+ put_be32(pb, codec->rc_max_rate);
+ put_be32(pb, codec->rc_min_rate);
+ put_be32(pb, codec->rc_buffer_size);
+ put_be64(pb, av_dbl2int(codec->i_quant_factor));
+ put_be64(pb, av_dbl2int(codec->b_quant_factor));
+ put_be64(pb, av_dbl2int(codec->i_quant_offset));
+ put_be64(pb, av_dbl2int(codec->b_quant_offset));
+ put_be32(pb, codec->dct_algo);
+ put_be32(pb, codec->strict_std_compliance);
+ put_be32(pb, codec->max_b_frames);
+ put_be32(pb, codec->luma_elim_threshold);
+ put_be32(pb, codec->chroma_elim_threshold);
+ put_be32(pb, codec->mpeg_quant);
+ put_be32(pb, codec->intra_dc_precision);
+ put_be32(pb, codec->me_method);
+ put_be32(pb, codec->mb_decision);
+ put_be32(pb, codec->nsse_weight);
+ put_be32(pb, codec->frame_skip_cmp);
+ put_be64(pb, av_dbl2int(codec->rc_buffer_aggressivity));
+ break;
+ case CODEC_TYPE_AUDIO:
+ put_be32(pb, codec->sample_rate);
+ put_le16(pb, codec->channels);
+ put_le16(pb, codec->frame_size);
+ break;
+ default:
+ return -1;
+ }
+ /* hack to have real time */
+ if (ffm_nopts)
+ fst->pts = 0;
+ else
+ fst->pts = av_gettime();
+ }
+
+ /* flush until end of block reached */
+ while ((url_ftell(pb) % ffm->packet_size) != 0)
+ put_byte(pb, 0);
+
+ put_flush_packet(pb);
+
+ /* init packet mux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
+ assert(ffm->packet_end >= ffm->packet);
+ ffm->frame_offset = 0;
+ ffm->pts = 0;
+ ffm->first_packet = 1;
+
+ return 0;
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ av_freep(&st->priv_data);
+ }
+ return -1;
+}
+
+static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st = s->streams[pkt->stream_index];
+ FFMStream *fst = st->priv_data;
+ int64_t pts;
+ uint8_t header[FRAME_HEADER_SIZE];
+ int duration;
+ int size= pkt->size;
+
+ //XXX/FIXME use duration from pkt
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ duration = ((float)st->codec->frame_size / st->codec->sample_rate * 1000000.0);
+ } else {
+ duration = (1000000.0 * st->codec->time_base.num / (float)st->codec->time_base.den);
+ }
+
+ pts = fst->pts;
+ /* packet size & key_frame */
+ header[0] = pkt->stream_index;
+ header[1] = 0;
+ if (pkt->flags & PKT_FLAG_KEY)
+ header[1] |= FLAG_KEY_FRAME;
+ header[2] = (size >> 16) & 0xff;
+ header[3] = (size >> 8) & 0xff;
+ header[4] = size & 0xff;
+ header[5] = (duration >> 16) & 0xff;
+ header[6] = (duration >> 8) & 0xff;
+ header[7] = duration & 0xff;
+ ffm_write_data(s, header, FRAME_HEADER_SIZE, pts, 1);
+ ffm_write_data(s, pkt->data, size, pts, 0);
+
+ fst->pts += duration;
+ return 0;
+}
+
+static int ffm_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ FFMContext *ffm = s->priv_data;
+
+ /* flush packets */
+ if (ffm->packet_ptr > ffm->packet)
+ flush_packet(s);
+
+ put_flush_packet(pb);
+
+ if (!url_is_streamed(pb)) {
+ int64_t size;
+ /* update the write offset */
+ size = url_ftell(pb);
+ url_fseek(pb, 8, SEEK_SET);
+ put_be64(pb, size);
+ put_flush_packet(pb);
+ }
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/* ffm demux */
+
+static int ffm_is_avail_data(AVFormatContext *s, int size)
+{
+ FFMContext *ffm = s->priv_data;
+ offset_t pos, avail_size;
+ int len;
+
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (!ffm_nopts) {
+ /* XXX: I don't understand this test, so I disabled it for testing */
+ if (size <= len)
+ return 1;
+ }
+ pos = url_ftell(&s->pb);
+ if (pos == ffm->write_index) {
+ /* exactly at the end of stream */
+ return 0;
+ } else if (pos < ffm->write_index) {
+ avail_size = ffm->write_index - pos;
+ } else {
+ avail_size = (ffm->file_size - pos) + (ffm->write_index - FFM_PACKET_SIZE);
+ }
+ avail_size = (avail_size / ffm->packet_size) * (ffm->packet_size - FFM_HEADER_SIZE) + len;
+ if (size <= avail_size)
+ return 1;
+ else
+ return 0;
+}
+
+/* first is true if we read the frame header */
+static int ffm_read_data(AVFormatContext *s,
+ uint8_t *buf, int size, int first)
+{
+ FFMContext *ffm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int len, fill_size, size1, frame_offset;
+
+ size1 = size;
+ while (size > 0) {
+ redo:
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (len > size)
+ len = size;
+ if (len == 0) {
+ if (url_ftell(pb) == ffm->file_size)
+ url_fseek(pb, ffm->packet_size, SEEK_SET);
+ retry_read:
+ get_be16(pb); /* PACKET_ID */
+ fill_size = get_be16(pb);
+ ffm->pts = get_be64(pb);
+ ffm->first_frame_in_packet = 1;
+ frame_offset = get_be16(pb);
+ get_buffer(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
+ ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
+ if (ffm->packet_end < ffm->packet)
+ return -1;
+ /* if first packet or resynchronization packet, we must
+ handle it specifically */
+ if (ffm->first_packet || (frame_offset & 0x8000)) {
+ if (!frame_offset) {
+ /* This packet has no frame headers in it */
+ if (url_ftell(pb) >= ffm->packet_size * 3) {
+ url_fseek(pb, -ffm->packet_size * 2, SEEK_CUR);
+ goto retry_read;
+ }
+ /* This is bad, we cannot find a valid frame header */
+ return 0;
+ }
+ ffm->first_packet = 0;
+ if ((frame_offset & 0x7ffff) < FFM_HEADER_SIZE)
+ return -1;
+ ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
+ if (!first)
+ break;
+ } else {
+ ffm->packet_ptr = ffm->packet;
+ }
+ goto redo;
+ }
+ memcpy(buf, ffm->packet_ptr, len);
+ buf += len;
+ ffm->packet_ptr += len;
+ size -= len;
+ first = 0;
+ }
+ return size1 - size;
+}
+
+
+static void adjust_write_index(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int64_t pts;
+ //offset_t orig_write_index = ffm->write_index;
+ offset_t pos_min, pos_max;
+ int64_t pts_start;
+ offset_t ptr = url_ftell(pb);
+
+
+ pos_min = 0;
+ pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE;
+
+ pts_start = get_pts(s, pos_min);
+
+ pts = get_pts(s, pos_max);
+
+ if (pts - 100000 > pts_start)
+ goto end;
+
+ ffm->write_index = FFM_PACKET_SIZE;
+
+ pts_start = get_pts(s, pos_min);
+
+ pts = get_pts(s, pos_max);
+
+ if (pts - 100000 <= pts_start) {
+ while (1) {
+ offset_t newpos;
+ int64_t newpts;
+
+ newpos = ((pos_max + pos_min) / (2 * FFM_PACKET_SIZE)) * FFM_PACKET_SIZE;
+
+ if (newpos == pos_min)
+ break;
+
+ newpts = get_pts(s, newpos);
+
+ if (newpts - 100000 <= pts) {
+ pos_max = newpos;
+ pts = newpts;
+ } else {
+ pos_min = newpos;
+ }
+ }
+ ffm->write_index += pos_max;
+ }
+
+ //printf("Adjusted write index from %"PRId64" to %"PRId64": pts=%0.6f\n", orig_write_index, ffm->write_index, pts / 1000000.);
+ //printf("pts range %0.6f - %0.6f\n", get_pts(s, 0) / 1000000. , get_pts(s, ffm->file_size - 2 * FFM_PACKET_SIZE) / 1000000. );
+
+ end:
+ url_fseek(pb, ptr, SEEK_SET);
+}
+
+
+static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ FFMContext *ffm = s->priv_data;
+ AVStream *st;
+ FFMStream *fst;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *codec;
+ int i, nb_streams;
+ uint32_t tag;
+
+ /* header */
+ tag = get_le32(pb);
+ if (tag != MKTAG('F', 'F', 'M', '1'))
+ goto fail;
+ ffm->packet_size = get_be32(pb);
+ if (ffm->packet_size != FFM_PACKET_SIZE)
+ goto fail;
+ ffm->write_index = get_be64(pb);
+ /* get also filesize */
+ if (!url_is_streamed(pb)) {
+ ffm->file_size = url_fsize(pb);
+ adjust_write_index(s);
+ } else {
+ ffm->file_size = (uint64_t_C(1) << 63) - 1;
+ }
+
+ nb_streams = get_be32(pb);
+ get_be32(pb); /* total bitrate */
+ /* read each stream */
+ for(i=0;i<nb_streams;i++) {
+ char rc_eq_buf[128];
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ fst = av_mallocz(sizeof(FFMStream));
+ if (!fst)
+ goto fail;
+ s->streams[i] = st;
+
+ av_set_pts_info(st, 64, 1, 1000000);
+
+ st->priv_data = fst;
+
+ codec = st->codec;
+ /* generic info */
+ codec->codec_id = get_be32(pb);
+ codec->codec_type = get_byte(pb); /* codec_type */
+ codec->bit_rate = get_be32(pb);
+ st->quality = get_be32(pb);
+ codec->flags = get_be32(pb);
+ codec->flags2 = get_be32(pb);
+ codec->debug = get_be32(pb);
+ /* specific info */
+ switch(codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ codec->time_base.num = get_be32(pb);
+ codec->time_base.den = get_be32(pb);
+ codec->width = get_be16(pb);
+ codec->height = get_be16(pb);
+ codec->gop_size = get_be16(pb);
+ codec->pix_fmt = get_be32(pb);
+ codec->qmin = get_byte(pb);
+ codec->qmax = get_byte(pb);
+ codec->max_qdiff = get_byte(pb);
+ codec->qcompress = get_be16(pb) / 10000.0;
+ codec->qblur = get_be16(pb) / 10000.0;
+ codec->bit_rate_tolerance = get_be32(pb);
+ codec->rc_eq = av_strdup(get_strz(pb, rc_eq_buf, sizeof(rc_eq_buf)));
+ codec->rc_max_rate = get_be32(pb);
+ codec->rc_min_rate = get_be32(pb);
+ codec->rc_buffer_size = get_be32(pb);
+ codec->i_quant_factor = av_int2dbl(get_be64(pb));
+ codec->b_quant_factor = av_int2dbl(get_be64(pb));
+ codec->i_quant_offset = av_int2dbl(get_be64(pb));
+ codec->b_quant_offset = av_int2dbl(get_be64(pb));
+ codec->dct_algo = get_be32(pb);
+ codec->strict_std_compliance = get_be32(pb);
+ codec->max_b_frames = get_be32(pb);
+ codec->luma_elim_threshold = get_be32(pb);
+ codec->chroma_elim_threshold = get_be32(pb);
+ codec->mpeg_quant = get_be32(pb);
+ codec->intra_dc_precision = get_be32(pb);
+ codec->me_method = get_be32(pb);
+ codec->mb_decision = get_be32(pb);
+ codec->nsse_weight = get_be32(pb);
+ codec->frame_skip_cmp = get_be32(pb);
+ codec->rc_buffer_aggressivity = av_int2dbl(get_be64(pb));
+ break;
+ case CODEC_TYPE_AUDIO:
+ codec->sample_rate = get_be32(pb);
+ codec->channels = get_le16(pb);
+ codec->frame_size = get_le16(pb);
+ break;
+ default:
+ goto fail;
+ }
+
+ }
+
+ /* get until end of block reached */
+ while ((url_ftell(pb) % ffm->packet_size) != 0)
+ get_byte(pb);
+
+ /* init packet demux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet;
+ ffm->frame_offset = 0;
+ ffm->pts = 0;
+ ffm->read_state = READ_HEADER;
+ ffm->first_packet = 1;
+ return 0;
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (st) {
+ av_freep(&st->priv_data);
+ av_free(st);
+ }
+ }
+ return -1;
+}
+
+/* return < 0 if eof */
+static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int size;
+ FFMContext *ffm = s->priv_data;
+ int duration;
+
+ switch(ffm->read_state) {
+ case READ_HEADER:
+ if (!ffm_is_avail_data(s, FRAME_HEADER_SIZE)) {
+ return -EAGAIN;
+ }
+#if 0
+ printf("pos=%08"PRIx64" spos=%"PRIx64", write_index=%"PRIx64" size=%"PRIx64"\n",
+ url_ftell(&s->pb), s->pb.pos, ffm->write_index, ffm->file_size);
+#endif
+ if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
+ FRAME_HEADER_SIZE)
+ return -EAGAIN;
+#if 0
+ {
+ int i;
+ for(i=0;i<FRAME_HEADER_SIZE;i++)
+ printf("%02x ", ffm->header[i]);
+ printf("\n");
+ }
+#endif
+ ffm->read_state = READ_DATA;
+ /* fall thru */
+ case READ_DATA:
+ size = (ffm->header[2] << 16) | (ffm->header[3] << 8) | ffm->header[4];
+ if (!ffm_is_avail_data(s, size)) {
+ return -EAGAIN;
+ }
+
+ duration = (ffm->header[5] << 16) | (ffm->header[6] << 8) | ffm->header[7];
+
+ av_new_packet(pkt, size);
+ pkt->stream_index = ffm->header[0];
+ pkt->pos = url_ftell(&s->pb);
+ if (ffm->header[1] & FLAG_KEY_FRAME)
+ pkt->flags |= PKT_FLAG_KEY;
+
+ ffm->read_state = READ_HEADER;
+ if (ffm_read_data(s, pkt->data, size, 0) != size) {
+ /* bad case: desynchronized packet. we cancel all the packet loading */
+ av_free_packet(pkt);
+ return -EAGAIN;
+ }
+ if (ffm->first_frame_in_packet)
+ {
+ pkt->pts = ffm->pts;
+ ffm->first_frame_in_packet = 0;
+ }
+ pkt->duration = duration;
+ break;
+ }
+ return 0;
+}
+
+//#define DEBUG_SEEK
+
+/* pos is between 0 and file_size - FFM_PACKET_SIZE. It is translated
+ by the write position inside this function */
+static void ffm_seek1(AVFormatContext *s, offset_t pos1)
+{
+ FFMContext *ffm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t pos;
+
+ pos = pos1 + ffm->write_index;
+ if (pos >= ffm->file_size)
+ pos -= (ffm->file_size - FFM_PACKET_SIZE);
+#ifdef DEBUG_SEEK
+ printf("seek to %"PRIx64" -> %"PRIx64"\n", pos1, pos);
+#endif
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+static int64_t get_pts(AVFormatContext *s, offset_t pos)
+{
+ ByteIOContext *pb = &s->pb;
+ int64_t pts;
+
+ ffm_seek1(s, pos);
+ url_fskip(pb, 4);
+ pts = get_be64(pb);
+#ifdef DEBUG_SEEK
+ printf("pts=%0.6f\n", pts / 1000000.0);
+#endif
+ return pts;
+}
+
+/* seek to a given time in the file. The file read pointer is
+ positionned at or before pts. XXX: the following code is quite
+ approximative */
+static int ffm_seek(AVFormatContext *s, int stream_index, int64_t wanted_pts, int flags)
+{
+ FFMContext *ffm = s->priv_data;
+ offset_t pos_min, pos_max, pos;
+ int64_t pts_min, pts_max, pts;
+ double pos1;
+
+#ifdef DEBUG_SEEK
+ printf("wanted_pts=%0.6f\n", wanted_pts / 1000000.0);
+#endif
+ /* find the position using linear interpolation (better than
+ dichotomy in typical cases) */
+ pos_min = 0;
+ pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE;
+ while (pos_min <= pos_max) {
+ pts_min = get_pts(s, pos_min);
+ pts_max = get_pts(s, pos_max);
+ /* linear interpolation */
+ pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) /
+ (double)(pts_max - pts_min);
+ pos = (((int64_t)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE;
+ if (pos <= pos_min)
+ pos = pos_min;
+ else if (pos >= pos_max)
+ pos = pos_max;
+ pts = get_pts(s, pos);
+ /* check if we are lucky */
+ if (pts == wanted_pts) {
+ goto found;
+ } else if (pts > wanted_pts) {
+ pos_max = pos - FFM_PACKET_SIZE;
+ } else {
+ pos_min = pos + FFM_PACKET_SIZE;
+ }
+ }
+ pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
+ if (pos > 0)
+ pos -= FFM_PACKET_SIZE;
+ found:
+ ffm_seek1(s, pos);
+ return 0;
+}
+
+#ifdef CONFIG_FFSERVER
+offset_t ffm_read_write_index(int fd)
+{
+ uint8_t buf[8];
+ offset_t pos;
+ int i;
+
+ lseek(fd, 8, SEEK_SET);
+ read(fd, buf, 8);
+ pos = 0;
+ for(i=0;i<8;i++)
+ pos |= (int64_t)buf[i] << (56 - i * 8);
+ return pos;
+}
+
+void ffm_write_write_index(int fd, offset_t pos)
+{
+ uint8_t buf[8];
+ int i;
+
+ for(i=0;i<8;i++)
+ buf[i] = (pos >> (56 - i * 8)) & 0xff;
+ lseek(fd, 8, SEEK_SET);
+ write(fd, buf, 8);
+}
+
+void ffm_set_write_index(AVFormatContext *s, offset_t pos, offset_t file_size)
+{
+ FFMContext *ffm = s->priv_data;
+ ffm->write_index = pos;
+ ffm->file_size = file_size;
+}
+#endif // CONFIG_FFSERVER
+
+static int ffm_read_close(AVFormatContext *s)
+{
+ AVStream *st;
+ int i;
+
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ av_freep(&st->priv_data);
+ }
+ return 0;
+}
+
+static int ffm_probe(AVProbeData *p)
+{
+ if (p->buf_size >= 4 &&
+ p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
+ p->buf[3] == '1')
+ return AVPROBE_SCORE_MAX + 1;
+ return 0;
+}
+
+#ifdef CONFIG_FFM_DEMUXER
+AVInputFormat ffm_demuxer = {
+ "ffm",
+ "ffm format",
+ sizeof(FFMContext),
+ ffm_probe,
+ ffm_read_header,
+ ffm_read_packet,
+ ffm_read_close,
+ ffm_seek,
+};
+#endif
+#ifdef CONFIG_FFM_MUXER
+AVOutputFormat ffm_muxer = {
+ "ffm",
+ "ffm format",
+ "",
+ "ffm",
+ sizeof(FFMContext),
+ /* not really used */
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG1VIDEO,
+ ffm_write_header,
+ ffm_write_packet,
+ ffm_write_trailer,
+};
+#endif //CONFIG_FFM_MUXER
diff --git a/contrib/ffmpeg/libavformat/file.c b/contrib/ffmpeg/libavformat/file.c
new file mode 100644
index 000000000..db671698f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/file.c
@@ -0,0 +1,140 @@
+/*
+ * Buffered file io for ffmpeg system
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <fcntl.h>
+#ifndef __MINGW32__
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#else
+#include <io.h>
+#define open(fname,oflag,pmode) _open(fname,oflag,pmode)
+#endif /* __MINGW32__ */
+
+
+/* standard file protocol */
+
+static int file_open(URLContext *h, const char *filename, int flags)
+{
+ int access;
+ int fd;
+
+ strstart(filename, "file:", &filename);
+
+ if (flags & URL_RDWR) {
+ access = O_CREAT | O_TRUNC | O_RDWR;
+ } else if (flags & URL_WRONLY) {
+ access = O_CREAT | O_TRUNC | O_WRONLY;
+ } else {
+ access = O_RDONLY;
+ }
+#if defined(__MINGW32__) || defined(CONFIG_OS2) || defined(__CYGWIN__)
+ access |= O_BINARY;
+#endif
+ fd = open(filename, access, 0666);
+ if (fd < 0)
+ return -ENOENT;
+ h->priv_data = (void *)(size_t)fd;
+ return 0;
+}
+
+static int file_read(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return read(fd, buf, size);
+}
+
+static int file_write(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return write(fd, buf, size);
+}
+
+/* XXX: use llseek */
+static offset_t file_seek(URLContext *h, offset_t pos, int whence)
+{
+ int fd = (size_t)h->priv_data;
+#if defined(__MINGW32__)
+ return _lseeki64(fd, pos, whence);
+#else
+ return lseek(fd, pos, whence);
+#endif
+}
+
+static int file_close(URLContext *h)
+{
+ int fd = (size_t)h->priv_data;
+ return close(fd);
+}
+
+URLProtocol file_protocol = {
+ "file",
+ file_open,
+ file_read,
+ file_write,
+ file_seek,
+ file_close,
+};
+
+/* pipe protocol */
+
+static int pipe_open(URLContext *h, const char *filename, int flags)
+{
+ int fd;
+
+ if (flags & URL_WRONLY) {
+ fd = 1;
+ } else {
+ fd = 0;
+ }
+#if defined(__MINGW32__) || defined(CONFIG_OS2) || defined(__CYGWIN__)
+ setmode(fd, O_BINARY);
+#endif
+ h->priv_data = (void *)(size_t)fd;
+ h->is_streamed = 1;
+ return 0;
+}
+
+static int pipe_read(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return read(fd, buf, size);
+}
+
+static int pipe_write(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return write(fd, buf, size);
+}
+
+static int pipe_close(URLContext *h)
+{
+ return 0;
+}
+
+URLProtocol pipe_protocol = {
+ "pipe",
+ pipe_open,
+ pipe_read,
+ pipe_write,
+ NULL,
+ pipe_close,
+};
diff --git a/contrib/ffmpeg/libavformat/flic.c b/contrib/ffmpeg/libavformat/flic.c
new file mode 100644
index 000000000..ac32e7392
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flic.c
@@ -0,0 +1,221 @@
+/*
+ * FLI/FLC Animation File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file flic.c
+ * FLI/FLC file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the .fli/.flc file format and all of its many
+ * variations, visit:
+ * http://www.compuphase.com/flic.htm
+ *
+ * This demuxer handles standard 0xAF11- and 0xAF12-type FLIs. It also
+ * handles special FLIs from the PC game "Magic Carpet".
+ */
+
+#include "avformat.h"
+
+#define FLIC_FILE_MAGIC_1 0xAF11
+#define FLIC_FILE_MAGIC_2 0xAF12
+#define FLIC_FILE_MAGIC_3 0xAF44 /* Flic Type for Extended FLX Format which
+ originated in Dave's Targa Animator (DTA) */
+#define FLIC_CHUNK_MAGIC_1 0xF1FA
+#define FLIC_CHUNK_MAGIC_2 0xF5FA
+#define FLIC_MC_PTS_INC 6000 /* pts increment for Magic Carpet game FLIs */
+#define FLIC_DEFAULT_PTS_INC 6000 /* for FLIs that have 0 speed */
+
+#define FLIC_HEADER_SIZE 128
+#define FLIC_PREAMBLE_SIZE 6
+
+typedef struct FlicDemuxContext {
+ int frame_pts_inc;
+ int64_t pts;
+ int video_stream_index;
+} FlicDemuxContext;
+
+static int flic_probe(AVProbeData *p)
+{
+ int magic_number;
+
+ if (p->buf_size < 6)
+ return 0;
+
+ magic_number = LE_16(&p->buf[4]);
+ if ((magic_number != FLIC_FILE_MAGIC_1) &&
+ (magic_number != FLIC_FILE_MAGIC_2) &&
+ (magic_number != FLIC_FILE_MAGIC_3))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int flic_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char header[FLIC_HEADER_SIZE];
+ AVStream *st;
+ int speed;
+ int magic_number;
+
+ flic->pts = 0;
+
+ /* load the whole header and pull out the width and height */
+ if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
+ return AVERROR_IO;
+
+ magic_number = LE_16(&header[4]);
+ speed = LE_32(&header[0x10]);
+
+ /* initialize the decoder streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ flic->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_FLIC;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = LE_16(&header[0x08]);
+ st->codec->height = LE_16(&header[0x0A]);
+
+ if (!st->codec->width || !st->codec->height)
+ return AVERROR_INVALIDDATA;
+
+ /* send over the whole 128-byte FLIC header */
+ st->codec->extradata_size = FLIC_HEADER_SIZE;
+ st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
+ memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);
+
+ av_set_pts_info(st, 33, 1, 90000);
+
+ /* Time to figure out the framerate: If there is a FLIC chunk magic
+ * number at offset 0x10, assume this is from the Bullfrog game,
+ * Magic Carpet. */
+ if (LE_16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
+
+ flic->frame_pts_inc = FLIC_MC_PTS_INC;
+
+ /* rewind the stream since the first chunk is at offset 12 */
+ url_fseek(pb, 12, SEEK_SET);
+
+ /* send over abbreviated FLIC header chunk */
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = 12;
+ st->codec->extradata = av_malloc(12);
+ memcpy(st->codec->extradata, header, 12);
+
+ } else if (magic_number == FLIC_FILE_MAGIC_1) {
+ /*
+ * in this case, the speed (n) is number of 1/70s ticks between frames:
+ *
+ * pts n * frame #
+ * -------- = ----------- => pts = n * (90000/70) * frame #
+ * 90000 70
+ *
+ * therefore, the frame pts increment = n * 1285.7
+ */
+ flic->frame_pts_inc = speed * 1285.7;
+ } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
+ (magic_number == FLIC_FILE_MAGIC_3)) {
+ /*
+ * in this case, the speed (n) is number of milliseconds between frames:
+ *
+ * pts n * frame #
+ * -------- = ----------- => pts = n * 90 * frame #
+ * 90000 1000
+ *
+ * therefore, the frame pts increment = n * 90
+ */
+ flic->frame_pts_inc = speed * 90;
+ } else
+ return AVERROR_INVALIDDATA;
+
+ if (flic->frame_pts_inc == 0)
+ flic->frame_pts_inc = FLIC_DEFAULT_PTS_INC;
+
+ return 0;
+}
+
+static int flic_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int packet_read = 0;
+ unsigned int size;
+ int magic;
+ int ret = 0;
+ unsigned char preamble[FLIC_PREAMBLE_SIZE];
+
+ while (!packet_read) {
+
+ if ((ret = get_buffer(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
+ FLIC_PREAMBLE_SIZE) {
+ ret = AVERROR_IO;
+ break;
+ }
+
+ size = LE_32(&preamble[0]);
+ magic = LE_16(&preamble[4]);
+
+ if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
+ if (av_new_packet(pkt, size)) {
+ ret = AVERROR_IO;
+ break;
+ }
+ pkt->stream_index = flic->video_stream_index;
+ pkt->pts = flic->pts;
+ pkt->pos = url_ftell(pb);
+ memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
+ ret = get_buffer(pb, pkt->data + FLIC_PREAMBLE_SIZE,
+ size - FLIC_PREAMBLE_SIZE);
+ if (ret != size - FLIC_PREAMBLE_SIZE) {
+ av_free_packet(pkt);
+ ret = AVERROR_IO;
+ }
+ flic->pts += flic->frame_pts_inc;
+ packet_read = 1;
+ } else {
+ /* not interested in this chunk */
+ url_fseek(pb, size - 6, SEEK_CUR);
+ }
+ }
+
+ return ret;
+}
+
+static int flic_read_close(AVFormatContext *s)
+{
+// FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat flic_demuxer = {
+ "flic",
+ "FLI/FLC/FLX animation format",
+ sizeof(FlicDemuxContext),
+ flic_probe,
+ flic_read_header,
+ flic_read_packet,
+ flic_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/flvdec.c b/contrib/ffmpeg/libavformat/flvdec.c
new file mode 100644
index 000000000..a1c2aa4eb
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flvdec.c
@@ -0,0 +1,259 @@
+/*
+ * FLV demuxer
+ * Copyright (c) 2003 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * This demuxer will generate a 1 byte extradata for VP6F content.
+ * It is composed of:
+ * - upper 4bits: difference between encoded width and visible width
+ * - lower 4bits: difference between encoded height and visible height
+ */
+#include "avformat.h"
+
+static int flv_probe(AVProbeData *p)
+{
+ const uint8_t *d;
+
+ if (p->buf_size < 6)
+ return 0;
+ d = p->buf;
+ if (d[0] == 'F' && d[1] == 'L' && d[2] == 'V') {
+ return 50;
+ }
+ return 0;
+}
+
+static int flv_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int offset, flags, size;
+
+ s->ctx_flags |= AVFMTCTX_NOHEADER; //ok we have a header but theres no fps, codec type, sample_rate, ...
+
+ url_fskip(&s->pb, 4);
+ flags = get_byte(&s->pb);
+
+ offset = get_be32(&s->pb);
+
+ if(!url_is_streamed(&s->pb)){
+ const int fsize= url_fsize(&s->pb);
+ url_fseek(&s->pb, fsize-4, SEEK_SET);
+ size= get_be32(&s->pb);
+ url_fseek(&s->pb, fsize-3-size, SEEK_SET);
+ if(size == get_be24(&s->pb) + 11){
+ s->duration= get_be24(&s->pb) * (int64_t)AV_TIME_BASE / 1000;
+ }
+ }
+
+ url_fseek(&s->pb, offset, SEEK_SET);
+
+ s->start_time = 0;
+
+ return 0;
+}
+
+static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, i, type, size, pts, flags, is_audio, next, pos;
+ AVStream *st = NULL;
+
+ for(;;){
+ pos = url_ftell(&s->pb);
+ url_fskip(&s->pb, 4); /* size of previous packet */
+ type = get_byte(&s->pb);
+ size = get_be24(&s->pb);
+ pts = get_be24(&s->pb);
+// av_log(s, AV_LOG_DEBUG, "type:%d, size:%d, pts:%d\n", type, size, pts);
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ url_fskip(&s->pb, 4); /* reserved */
+ flags = 0;
+
+ if(size == 0)
+ continue;
+
+ next= size + url_ftell(&s->pb);
+
+ if (type == 8) {
+ is_audio=1;
+ flags = get_byte(&s->pb);
+ } else if (type == 9) {
+ is_audio=0;
+ flags = get_byte(&s->pb);
+ } else if (type == 18 && size > 13+1+4) {
+ url_fskip(&s->pb, 13); //onMetaData blah
+ if(get_byte(&s->pb) == 8){
+ url_fskip(&s->pb, 4);
+ }
+ while(url_ftell(&s->pb) + 5 < next){
+ char tmp[128];
+ int type, len;
+ double d= 0;
+
+ len= get_be16(&s->pb);
+ if(len >= sizeof(tmp) || !len)
+ break;
+ get_buffer(&s->pb, tmp, len);
+ tmp[len]=0;
+
+ type= get_byte(&s->pb);
+ if(type==0){
+ d= av_int2dbl(get_be64(&s->pb));
+ }else if(type==2){
+ len= get_be16(&s->pb);
+ if(len >= sizeof(tmp))
+ break;
+ url_fskip(&s->pb, len);
+ }else if(type==8){
+ //array
+ break;
+ }else if(type==11){
+ d= av_int2dbl(get_be64(&s->pb));
+ get_be16(&s->pb);
+ }
+
+ if(!strcmp(tmp, "duration")){
+ s->duration = d*AV_TIME_BASE;
+ }else if(!strcmp(tmp, "videodatarate")){
+ }else if(!strcmp(tmp, "audiodatarate")){
+ }
+ }
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
+ } else {
+ /* skip packet */
+ av_log(s, AV_LOG_ERROR, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
+ }
+
+ /* now find stream */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (st->id == is_audio)
+ break;
+ }
+ if(i == s->nb_streams){
+ st = av_new_stream(s, is_audio);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 24, 1, 1000); /* 24 bit pts in ms */
+ st->codec->time_base= (AVRational){1,1000};
+ }
+// av_log(NULL, AV_LOG_DEBUG, "%d %X %d \n", is_audio, flags, st->discard);
+ if( (st->discard >= AVDISCARD_NONKEY && !((flags >> 4)==1 || is_audio))
+ ||(st->discard >= AVDISCARD_BIDIR && ((flags >> 4)==3 && !is_audio))
+ || st->discard >= AVDISCARD_ALL
+ ){
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
+ }
+ if ((flags >> 4)==1)
+ av_add_index_entry(st, pos, pts, size, 0, AVINDEX_KEYFRAME);
+ break;
+ }
+
+ if(is_audio){
+ if(st->codec->sample_rate == 0){
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->channels = (flags&1)+1;
+ if((flags >> 4) == 5)
+ st->codec->sample_rate= 8000;
+ else
+ st->codec->sample_rate = (44100<<((flags>>2)&3))>>3;
+ switch(flags >> 4){/* 0: uncompressed 1: ADPCM 2: mp3 5: Nellymoser 8kHz mono 6: Nellymoser*/
+ case 0: if (flags&2) st->codec->codec_id = CODEC_ID_PCM_S16BE;
+ else st->codec->codec_id = CODEC_ID_PCM_S8; break;
+ case 1: st->codec->codec_id = CODEC_ID_ADPCM_SWF; break;
+ case 2: st->codec->codec_id = CODEC_ID_MP3; st->need_parsing = 1; break;
+ // this is not listed at FLV but at SWF, strange...
+ case 3: if (flags&2) st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ else st->codec->codec_id = CODEC_ID_PCM_S8; break;
+ default:
+ av_log(s, AV_LOG_INFO, "Unsupported audio codec (%x)\n", flags >> 4);
+ st->codec->codec_tag= (flags >> 4);
+ }
+ st->codec->bits_per_sample = (flags & 2) ? 16 : 8;
+ }
+ }else{
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ switch(flags & 0xF){
+ case 2: st->codec->codec_id = CODEC_ID_FLV1; break;
+ case 3: st->codec->codec_id = CODEC_ID_FLASHSV; break;
+ case 4:
+ st->codec->codec_id = CODEC_ID_VP6F;
+ if (st->codec->extradata_size != 1) {
+ st->codec->extradata_size = 1;
+ st->codec->extradata = av_malloc(1);
+ }
+ /* width and height adjustment */
+ st->codec->extradata[0] = get_byte(&s->pb);
+ size--;
+ break;
+ default:
+ av_log(s, AV_LOG_INFO, "Unsupported video codec (%x)\n", flags & 0xf);
+ st->codec->codec_tag= flags & 0xF;
+ }
+ }
+
+ ret= av_get_packet(&s->pb, pkt, size - 1);
+ if (ret <= 0) {
+ return AVERROR_IO;
+ }
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ pkt->pts = pts;
+ pkt->stream_index = st->index;
+
+ if (is_audio || ((flags >> 4)==1))
+ pkt->flags |= PKT_FLAG_KEY;
+
+ return ret;
+}
+
+static int flv_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int flv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ int index = av_index_search_timestamp(st, timestamp, flags);
+ if (index < 0)
+ return -1;
+ url_fseek(&s->pb, st->index_entries[index].pos, SEEK_SET);
+
+ return 0;
+}
+
+AVInputFormat flv_demuxer = {
+ "flv",
+ "flv format",
+ 0,
+ flv_probe,
+ flv_read_header,
+ flv_read_packet,
+ flv_read_close,
+ flv_read_seek,
+ .extensions = "flv",
+ .value = CODEC_ID_FLV1,
+};
diff --git a/contrib/ffmpeg/libavformat/flvenc.c b/contrib/ffmpeg/libavformat/flvenc.c
new file mode 100644
index 000000000..0b09d9830
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flvenc.c
@@ -0,0 +1,284 @@
+/*
+ * FLV muxer
+ * Copyright (c) 2003 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct FLVContext {
+ int hasAudio;
+ int hasVideo;
+ int reserved;
+ offset_t duration_offset;
+ offset_t filesize_offset;
+ int64_t duration;
+} FLVContext;
+
+static int get_audio_flags(AVCodecContext *enc){
+ int flags = (enc->bits_per_sample == 16) ? 0x2 : 0x0;
+
+ switch (enc->sample_rate) {
+ case 44100:
+ flags |= 0x0C;
+ break;
+ case 22050:
+ flags |= 0x08;
+ break;
+ case 11025:
+ flags |= 0x04;
+ break;
+ case 8000: //nellymoser only
+ case 5512: //not mp3
+ flags |= 0x00;
+ break;
+ default:
+ av_log(enc, AV_LOG_ERROR, "flv doesnt support that sample rate, choose from (44100, 22050, 11025)\n");
+ return -1;
+ }
+
+ if (enc->channels > 1) {
+ flags |= 0x01;
+ }
+
+ switch(enc->codec_id){
+ case CODEC_ID_MP3:
+ flags |= 0x20 | 0x2;
+ break;
+ case CODEC_ID_PCM_S8:
+ break;
+ case CODEC_ID_PCM_S16BE:
+ flags |= 0x2;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ flags |= 0x30 | 0x2;
+ break;
+ case CODEC_ID_ADPCM_SWF:
+ flags |= 0x10;
+ break;
+ case 0:
+ flags |= enc->codec_tag<<4;
+ break;
+ default:
+ av_log(enc, AV_LOG_ERROR, "codec not compatible with flv\n");
+ return -1;
+ }
+
+ return flags;
+}
+
+#define AMF_DOUBLE 0
+#define AMF_BOOLEAN 1
+#define AMF_STRING 2
+#define AMF_OBJECT 3
+#define AMF_MIXED_ARRAY 8
+#define AMF_ARRAY 10
+#define AMF_DATE 11
+
+static void put_amf_string(ByteIOContext *pb, const char *str)
+{
+ size_t len = strlen(str);
+ put_be16(pb, len);
+ put_buffer(pb, str, len);
+}
+
+static void put_amf_double(ByteIOContext *pb, double d)
+{
+ put_byte(pb, AMF_DOUBLE);
+ put_be64(pb, av_dbl2int(d));
+}
+
+static int flv_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ FLVContext *flv = s->priv_data;
+ int i, width, height, samplerate;
+ double framerate = 0.0;
+ int metadata_size_pos, data_size;
+
+ flv->hasAudio = 0;
+ flv->hasVideo = 0;
+
+ put_tag(pb,"FLV");
+ put_byte(pb,1);
+ put_byte(pb,0); // delayed write
+ put_be32(pb,9);
+ put_be32(pb,0);
+
+ for(i=0; i<s->nb_streams; i++){
+ AVCodecContext *enc = s->streams[i]->codec;
+ if (enc->codec_type == CODEC_TYPE_VIDEO) {
+ width = enc->width;
+ height = enc->height;
+ if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) {
+ framerate = av_q2d(s->streams[i]->r_frame_rate);
+ } else {
+ framerate = 1/av_q2d(s->streams[i]->codec->time_base);
+ }
+ flv->hasVideo=1;
+ } else {
+ flv->hasAudio=1;
+ samplerate = enc->sample_rate;
+ }
+ av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */
+ if(enc->codec_tag == 5){
+ put_byte(pb,8); // message type
+ put_be24(pb,0); // include flags
+ put_be24(pb,0); // time stamp
+ put_be32(pb,0); // reserved
+ put_be32(pb,11); // size
+ flv->reserved=5;
+ }
+ if(enc->codec_type == CODEC_TYPE_AUDIO && get_audio_flags(enc)<0)
+ return -1;
+ }
+
+ /* write meta_tag */
+ put_byte(pb, 18); // tag type META
+ metadata_size_pos= url_ftell(pb);
+ put_be24(pb, 0); // size of data part (sum of all parts below)
+ put_be24(pb, 0); // time stamp
+ put_be32(pb, 0); // reserved
+
+ /* now data of data_size size */
+
+ /* first event name as a string */
+ put_byte(pb, AMF_STRING); // 1 byte
+ put_amf_string(pb, "onMetaData"); // 12 bytes
+
+ /* mixed array (hash) with size and string/type/data tuples */
+ put_byte(pb, AMF_MIXED_ARRAY);
+ put_be32(pb, 4*flv->hasVideo + flv->hasAudio + 2); // +2 for duration and file size
+
+ put_amf_string(pb, "duration");
+ flv->duration_offset= url_ftell(pb);
+ put_amf_double(pb, 0); // delayed write
+
+ if(flv->hasVideo){
+ put_amf_string(pb, "width");
+ put_amf_double(pb, width);
+
+ put_amf_string(pb, "height");
+ put_amf_double(pb, height);
+
+ put_amf_string(pb, "videodatarate");
+ put_amf_double(pb, s->bit_rate / 1024.0);
+
+ put_amf_string(pb, "framerate");
+ put_amf_double(pb, framerate);
+ }
+
+ if(flv->hasAudio){
+ put_amf_string(pb, "audiosamplerate");
+ put_amf_double(pb, samplerate);
+ }
+
+ put_amf_string(pb, "filesize");
+ flv->filesize_offset= url_ftell(pb);
+ put_amf_double(pb, 0); // delayed write
+
+ put_amf_string(pb, "");
+ put_byte(pb, 9); // end marker 1 byte
+
+ /* write total size of tag */
+ data_size= url_ftell(pb) - metadata_size_pos - 10;
+ url_fseek(pb, metadata_size_pos, SEEK_SET);
+ put_be24(pb, data_size);
+ url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
+ put_be32(pb, data_size + 11);
+
+ return 0;
+}
+
+static int flv_write_trailer(AVFormatContext *s)
+{
+ int64_t file_size;
+ int flags = 0;
+
+ ByteIOContext *pb = &s->pb;
+ FLVContext *flv = s->priv_data;
+
+ file_size = url_ftell(pb);
+ flags |= flv->hasAudio ? 4 : 0;
+ flags |= flv->hasVideo ? 1 : 0;
+ url_fseek(pb, 4, SEEK_SET);
+ put_byte(pb,flags);
+
+ /* update informations */
+ url_fseek(pb, flv->duration_offset, SEEK_SET);
+ put_amf_double(pb, flv->duration / (double)1000);
+ url_fseek(pb, flv->filesize_offset, SEEK_SET);
+ put_amf_double(pb, file_size);
+
+ url_fseek(pb, file_size, SEEK_SET);
+ return 0;
+}
+
+static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
+ FLVContext *flv = s->priv_data;
+ int size= pkt->size;
+ int flags;
+
+// av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size);
+
+ if (enc->codec_type == CODEC_TYPE_VIDEO) {
+ put_byte(pb, 9);
+ flags = 2; // choose h263
+ flags |= pkt->flags & PKT_FLAG_KEY ? 0x10 : 0x20; // add keyframe indicator
+ } else {
+ assert(enc->codec_type == CODEC_TYPE_AUDIO);
+ flags = get_audio_flags(enc);
+
+ assert(size);
+
+ put_byte(pb, 8);
+ }
+
+ put_be24(pb,size+1); // include flags
+ put_be24(pb,pkt->pts);
+ put_be32(pb,flv->reserved);
+ put_byte(pb,flags);
+ put_buffer(pb, pkt->data, size);
+ put_be32(pb,size+1+11); // previous tag size
+ flv->duration = pkt->pts + pkt->duration;
+
+ put_flush_packet(pb);
+ return 0;
+}
+
+AVOutputFormat flv_muxer = {
+ "flv",
+ "flv format",
+ "video/x-flv",
+ "flv",
+ sizeof(FLVContext),
+#ifdef CONFIG_MP3LAME
+ CODEC_ID_MP3,
+#else // CONFIG_MP3LAME
+ CODEC_ID_NONE,
+#endif // CONFIG_MP3LAME
+ CODEC_ID_FLV1,
+ flv_write_header,
+ flv_write_packet,
+ flv_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/framehook.c b/contrib/ffmpeg/libavformat/framehook.c
new file mode 100644
index 000000000..03bbc95f6
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/framehook.c
@@ -0,0 +1,121 @@
+/*
+ * Video processing hooks
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <errno.h>
+#include "config.h"
+#include "avformat.h"
+#include "framehook.h"
+
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+
+typedef struct _FrameHookEntry {
+ struct _FrameHookEntry *next;
+ FrameHookConfigureFn Configure;
+ FrameHookProcessFn Process;
+ FrameHookReleaseFn Release;
+ void *ctx;
+} FrameHookEntry;
+
+static FrameHookEntry *first_hook;
+
+/* Returns 0 on OK */
+int frame_hook_add(int argc, char *argv[])
+{
+#ifdef CONFIG_VHOOK
+ void *loaded;
+ FrameHookEntry *fhe, **fhep;
+
+ if (argc < 1) {
+ return ENOENT;
+ }
+
+ loaded = dlopen(argv[0], RTLD_NOW);
+ if (!loaded) {
+ av_log(NULL, AV_LOG_ERROR, "%s\n", dlerror());
+ return -1;
+ }
+
+ fhe = av_mallocz(sizeof(*fhe));
+ if (!fhe) {
+ return errno;
+ }
+
+ fhe->Configure = dlsym(loaded, "Configure");
+ fhe->Process = dlsym(loaded, "Process");
+ fhe->Release = dlsym(loaded, "Release"); /* Optional */
+
+ if (!fhe->Process) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to find Process entrypoint in %s\n", argv[0]);
+ return -1;
+ }
+
+ if (!fhe->Configure && argc > 1) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to find Configure entrypoint in %s\n", argv[0]);
+ return -1;
+ }
+
+ if (argc > 1 || fhe->Configure) {
+ if (fhe->Configure(&fhe->ctx, argc, argv)) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to Configure %s\n", argv[0]);
+ return -1;
+ }
+ }
+
+ for (fhep = &first_hook; *fhep; fhep = &((*fhep)->next)) {
+ }
+
+ *fhep = fhe;
+
+ return 0;
+#else
+ av_log(NULL, AV_LOG_ERROR, "Video hooking not compiled into this version\n");
+ return 1;
+#endif
+}
+
+void frame_hook_process(AVPicture *pict, enum PixelFormat pix_fmt, int width, int height)
+{
+ if (first_hook) {
+ FrameHookEntry *fhe;
+ int64_t pts = av_gettime();
+
+ for (fhe = first_hook; fhe; fhe = fhe->next) {
+ fhe->Process(fhe->ctx, pict, pix_fmt, width, height, pts);
+ }
+ }
+}
+
+void frame_hook_release(void)
+{
+ FrameHookEntry *fhe;
+ FrameHookEntry *fhenext;
+
+ for (fhe = first_hook; fhe; fhe = fhenext) {
+ fhenext = fhe->next;
+ if (fhe->Release)
+ fhe->Release(fhe->ctx);
+ av_free(fhe);
+ }
+
+ first_hook = NULL;
+}
diff --git a/contrib/ffmpeg/libavformat/framehook.h b/contrib/ffmpeg/libavformat/framehook.h
new file mode 100644
index 000000000..d843ddb85
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/framehook.h
@@ -0,0 +1,50 @@
+/*
+ * video processing hooks
+ * copyright (c) 2000, 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRAMEHOOK_H
+#define _FRAMEHOOK_H
+
+/*
+ * Prototypes for interface to .so that implement a video processing hook
+ */
+
+#include "avcodec.h"
+
+/* Function must be called 'Configure' */
+typedef int (FrameHookConfigure)(void **ctxp, int argc, char *argv[]);
+typedef FrameHookConfigure *FrameHookConfigureFn;
+extern FrameHookConfigure Configure;
+
+/* Function must be called 'Process' */
+typedef void (FrameHookProcess)(void *ctx, struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts);
+typedef FrameHookProcess *FrameHookProcessFn;
+extern FrameHookProcess Process;
+
+/* Function must be called 'Release' */
+typedef void (FrameHookRelease)(void *ctx);
+typedef FrameHookRelease *FrameHookReleaseFn;
+extern FrameHookRelease Release;
+
+extern int frame_hook_add(int argc, char *argv[]);
+extern void frame_hook_process(struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height);
+extern void frame_hook_release(void);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/gif.c b/contrib/ffmpeg/libavformat/gif.c
new file mode 100644
index 000000000..1083710d5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gif.c
@@ -0,0 +1,419 @@
+/*
+ * Animated GIF muxer
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * First version by Francois Revol revol@free.fr
+ *
+ * Features and limitations:
+ * - currently no compression is performed,
+ * in fact the size of the data is 9/8 the size of the image in 8bpp
+ * - uses only a global standard palette
+ * - tested with IE 5.0, Opera for BeOS, NetPositive (BeOS), and Mozilla (BeOS).
+ *
+ * Reference documents:
+ * http://www.goice.co.jp/member/mo/formats/gif.html
+ * http://astronomy.swin.edu.au/pbourke/dataformats/gif/
+ * http://www.dcs.ed.ac.uk/home/mxr/gfx/2d/GIF89a.txt
+ *
+ * this url claims to have an LZW algorithm not covered by Unisys patent:
+ * http://www.msg.net/utility/whirlgif/gifencod.html
+ * could help reduce the size of the files _a lot_...
+ * some sites mentions an RLE type compression also.
+ */
+
+#include "avformat.h"
+#include "bitstream.h"
+
+/* bitstream minipacket size */
+#define GIF_CHUNKS 100
+
+/* slows down the decoding (and some browsers don't like it) */
+/* update on the 'some browsers don't like it issue from above: this was probably due to missing 'Data Sub-block Terminator' (byte 19) in the app_header */
+#define GIF_ADD_APP_HEADER // required to enable looping of animated gif
+
+typedef struct {
+ unsigned char r;
+ unsigned char g;
+ unsigned char b;
+} rgb_triplet;
+
+/* we use the standard 216 color palette */
+
+/* this script was used to create the palette:
+ * for r in 00 33 66 99 cc ff; do for g in 00 33 66 99 cc ff; do echo -n " "; for b in 00 33 66 99 cc ff; do
+ * echo -n "{ 0x$r, 0x$g, 0x$b }, "; done; echo ""; done; done
+ */
+
+static const rgb_triplet gif_clut[216] = {
+ { 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x33 }, { 0x00, 0x00, 0x66 }, { 0x00, 0x00, 0x99 }, { 0x00, 0x00, 0xcc }, { 0x00, 0x00, 0xff },
+ { 0x00, 0x33, 0x00 }, { 0x00, 0x33, 0x33 }, { 0x00, 0x33, 0x66 }, { 0x00, 0x33, 0x99 }, { 0x00, 0x33, 0xcc }, { 0x00, 0x33, 0xff },
+ { 0x00, 0x66, 0x00 }, { 0x00, 0x66, 0x33 }, { 0x00, 0x66, 0x66 }, { 0x00, 0x66, 0x99 }, { 0x00, 0x66, 0xcc }, { 0x00, 0x66, 0xff },
+ { 0x00, 0x99, 0x00 }, { 0x00, 0x99, 0x33 }, { 0x00, 0x99, 0x66 }, { 0x00, 0x99, 0x99 }, { 0x00, 0x99, 0xcc }, { 0x00, 0x99, 0xff },
+ { 0x00, 0xcc, 0x00 }, { 0x00, 0xcc, 0x33 }, { 0x00, 0xcc, 0x66 }, { 0x00, 0xcc, 0x99 }, { 0x00, 0xcc, 0xcc }, { 0x00, 0xcc, 0xff },
+ { 0x00, 0xff, 0x00 }, { 0x00, 0xff, 0x33 }, { 0x00, 0xff, 0x66 }, { 0x00, 0xff, 0x99 }, { 0x00, 0xff, 0xcc }, { 0x00, 0xff, 0xff },
+ { 0x33, 0x00, 0x00 }, { 0x33, 0x00, 0x33 }, { 0x33, 0x00, 0x66 }, { 0x33, 0x00, 0x99 }, { 0x33, 0x00, 0xcc }, { 0x33, 0x00, 0xff },
+ { 0x33, 0x33, 0x00 }, { 0x33, 0x33, 0x33 }, { 0x33, 0x33, 0x66 }, { 0x33, 0x33, 0x99 }, { 0x33, 0x33, 0xcc }, { 0x33, 0x33, 0xff },
+ { 0x33, 0x66, 0x00 }, { 0x33, 0x66, 0x33 }, { 0x33, 0x66, 0x66 }, { 0x33, 0x66, 0x99 }, { 0x33, 0x66, 0xcc }, { 0x33, 0x66, 0xff },
+ { 0x33, 0x99, 0x00 }, { 0x33, 0x99, 0x33 }, { 0x33, 0x99, 0x66 }, { 0x33, 0x99, 0x99 }, { 0x33, 0x99, 0xcc }, { 0x33, 0x99, 0xff },
+ { 0x33, 0xcc, 0x00 }, { 0x33, 0xcc, 0x33 }, { 0x33, 0xcc, 0x66 }, { 0x33, 0xcc, 0x99 }, { 0x33, 0xcc, 0xcc }, { 0x33, 0xcc, 0xff },
+ { 0x33, 0xff, 0x00 }, { 0x33, 0xff, 0x33 }, { 0x33, 0xff, 0x66 }, { 0x33, 0xff, 0x99 }, { 0x33, 0xff, 0xcc }, { 0x33, 0xff, 0xff },
+ { 0x66, 0x00, 0x00 }, { 0x66, 0x00, 0x33 }, { 0x66, 0x00, 0x66 }, { 0x66, 0x00, 0x99 }, { 0x66, 0x00, 0xcc }, { 0x66, 0x00, 0xff },
+ { 0x66, 0x33, 0x00 }, { 0x66, 0x33, 0x33 }, { 0x66, 0x33, 0x66 }, { 0x66, 0x33, 0x99 }, { 0x66, 0x33, 0xcc }, { 0x66, 0x33, 0xff },
+ { 0x66, 0x66, 0x00 }, { 0x66, 0x66, 0x33 }, { 0x66, 0x66, 0x66 }, { 0x66, 0x66, 0x99 }, { 0x66, 0x66, 0xcc }, { 0x66, 0x66, 0xff },
+ { 0x66, 0x99, 0x00 }, { 0x66, 0x99, 0x33 }, { 0x66, 0x99, 0x66 }, { 0x66, 0x99, 0x99 }, { 0x66, 0x99, 0xcc }, { 0x66, 0x99, 0xff },
+ { 0x66, 0xcc, 0x00 }, { 0x66, 0xcc, 0x33 }, { 0x66, 0xcc, 0x66 }, { 0x66, 0xcc, 0x99 }, { 0x66, 0xcc, 0xcc }, { 0x66, 0xcc, 0xff },
+ { 0x66, 0xff, 0x00 }, { 0x66, 0xff, 0x33 }, { 0x66, 0xff, 0x66 }, { 0x66, 0xff, 0x99 }, { 0x66, 0xff, 0xcc }, { 0x66, 0xff, 0xff },
+ { 0x99, 0x00, 0x00 }, { 0x99, 0x00, 0x33 }, { 0x99, 0x00, 0x66 }, { 0x99, 0x00, 0x99 }, { 0x99, 0x00, 0xcc }, { 0x99, 0x00, 0xff },
+ { 0x99, 0x33, 0x00 }, { 0x99, 0x33, 0x33 }, { 0x99, 0x33, 0x66 }, { 0x99, 0x33, 0x99 }, { 0x99, 0x33, 0xcc }, { 0x99, 0x33, 0xff },
+ { 0x99, 0x66, 0x00 }, { 0x99, 0x66, 0x33 }, { 0x99, 0x66, 0x66 }, { 0x99, 0x66, 0x99 }, { 0x99, 0x66, 0xcc }, { 0x99, 0x66, 0xff },
+ { 0x99, 0x99, 0x00 }, { 0x99, 0x99, 0x33 }, { 0x99, 0x99, 0x66 }, { 0x99, 0x99, 0x99 }, { 0x99, 0x99, 0xcc }, { 0x99, 0x99, 0xff },
+ { 0x99, 0xcc, 0x00 }, { 0x99, 0xcc, 0x33 }, { 0x99, 0xcc, 0x66 }, { 0x99, 0xcc, 0x99 }, { 0x99, 0xcc, 0xcc }, { 0x99, 0xcc, 0xff },
+ { 0x99, 0xff, 0x00 }, { 0x99, 0xff, 0x33 }, { 0x99, 0xff, 0x66 }, { 0x99, 0xff, 0x99 }, { 0x99, 0xff, 0xcc }, { 0x99, 0xff, 0xff },
+ { 0xcc, 0x00, 0x00 }, { 0xcc, 0x00, 0x33 }, { 0xcc, 0x00, 0x66 }, { 0xcc, 0x00, 0x99 }, { 0xcc, 0x00, 0xcc }, { 0xcc, 0x00, 0xff },
+ { 0xcc, 0x33, 0x00 }, { 0xcc, 0x33, 0x33 }, { 0xcc, 0x33, 0x66 }, { 0xcc, 0x33, 0x99 }, { 0xcc, 0x33, 0xcc }, { 0xcc, 0x33, 0xff },
+ { 0xcc, 0x66, 0x00 }, { 0xcc, 0x66, 0x33 }, { 0xcc, 0x66, 0x66 }, { 0xcc, 0x66, 0x99 }, { 0xcc, 0x66, 0xcc }, { 0xcc, 0x66, 0xff },
+ { 0xcc, 0x99, 0x00 }, { 0xcc, 0x99, 0x33 }, { 0xcc, 0x99, 0x66 }, { 0xcc, 0x99, 0x99 }, { 0xcc, 0x99, 0xcc }, { 0xcc, 0x99, 0xff },
+ { 0xcc, 0xcc, 0x00 }, { 0xcc, 0xcc, 0x33 }, { 0xcc, 0xcc, 0x66 }, { 0xcc, 0xcc, 0x99 }, { 0xcc, 0xcc, 0xcc }, { 0xcc, 0xcc, 0xff },
+ { 0xcc, 0xff, 0x00 }, { 0xcc, 0xff, 0x33 }, { 0xcc, 0xff, 0x66 }, { 0xcc, 0xff, 0x99 }, { 0xcc, 0xff, 0xcc }, { 0xcc, 0xff, 0xff },
+ { 0xff, 0x00, 0x00 }, { 0xff, 0x00, 0x33 }, { 0xff, 0x00, 0x66 }, { 0xff, 0x00, 0x99 }, { 0xff, 0x00, 0xcc }, { 0xff, 0x00, 0xff },
+ { 0xff, 0x33, 0x00 }, { 0xff, 0x33, 0x33 }, { 0xff, 0x33, 0x66 }, { 0xff, 0x33, 0x99 }, { 0xff, 0x33, 0xcc }, { 0xff, 0x33, 0xff },
+ { 0xff, 0x66, 0x00 }, { 0xff, 0x66, 0x33 }, { 0xff, 0x66, 0x66 }, { 0xff, 0x66, 0x99 }, { 0xff, 0x66, 0xcc }, { 0xff, 0x66, 0xff },
+ { 0xff, 0x99, 0x00 }, { 0xff, 0x99, 0x33 }, { 0xff, 0x99, 0x66 }, { 0xff, 0x99, 0x99 }, { 0xff, 0x99, 0xcc }, { 0xff, 0x99, 0xff },
+ { 0xff, 0xcc, 0x00 }, { 0xff, 0xcc, 0x33 }, { 0xff, 0xcc, 0x66 }, { 0xff, 0xcc, 0x99 }, { 0xff, 0xcc, 0xcc }, { 0xff, 0xcc, 0xff },
+ { 0xff, 0xff, 0x00 }, { 0xff, 0xff, 0x33 }, { 0xff, 0xff, 0x66 }, { 0xff, 0xff, 0x99 }, { 0xff, 0xff, 0xcc }, { 0xff, 0xff, 0xff },
+};
+
+/* The GIF format uses reversed order for bitstreams... */
+/* at least they don't use PDP_ENDIAN :) */
+/* so we 'extend' PutBitContext. hmmm, OOP :) */
+/* seems this thing changed slightly since I wrote it... */
+
+#ifdef ALT_BITSTREAM_WRITER
+# error no ALT_BITSTREAM_WRITER support for now
+#endif
+
+static void gif_put_bits_rev(PutBitContext *s, int n, unsigned int value)
+{
+ unsigned int bit_buf;
+ int bit_cnt;
+
+ // printf("put_bits=%d %x\n", n, value);
+ assert(n == 32 || value < (1U << n));
+
+ bit_buf = s->bit_buf;
+ bit_cnt = 32 - s->bit_left; /* XXX:lazyness... was = s->bit_cnt; */
+
+ // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf);
+ /* XXX: optimize */
+ if (n < (32-bit_cnt)) {
+ bit_buf |= value << (bit_cnt);
+ bit_cnt+=n;
+ } else {
+ bit_buf |= value << (bit_cnt);
+
+ *s->buf_ptr = bit_buf & 0xff;
+ s->buf_ptr[1] = (bit_buf >> 8) & 0xff;
+ s->buf_ptr[2] = (bit_buf >> 16) & 0xff;
+ s->buf_ptr[3] = (bit_buf >> 24) & 0xff;
+
+ //printf("bitbuf = %08x\n", bit_buf);
+ s->buf_ptr+=4;
+ if (s->buf_ptr >= s->buf_end)
+ puts("bit buffer overflow !!"); // should never happen ! who got rid of the callback ???
+// flush_buffer_rev(s);
+ bit_cnt=bit_cnt + n - 32;
+ if (bit_cnt == 0) {
+ bit_buf = 0;
+ } else {
+ bit_buf = value >> (n - bit_cnt);
+ }
+ }
+
+ s->bit_buf = bit_buf;
+ s->bit_left = 32 - bit_cnt;
+}
+
+/* pad the end of the output stream with zeros */
+static void gif_flush_put_bits_rev(PutBitContext *s)
+{
+ while (s->bit_left < 32) {
+ /* XXX: should test end of buffer */
+ *s->buf_ptr++=s->bit_buf & 0xff;
+ s->bit_buf>>=8;
+ s->bit_left+=8;
+ }
+// flush_buffer_rev(s);
+ s->bit_left=32;
+ s->bit_buf=0;
+}
+
+/* !RevPutBitContext */
+
+/* GIF header */
+static int gif_image_write_header(ByteIOContext *pb,
+ int width, int height, int loop_count,
+ uint32_t *palette)
+{
+ int i;
+ unsigned int v;
+
+ put_tag(pb, "GIF");
+ put_tag(pb, "89a");
+ put_le16(pb, width);
+ put_le16(pb, height);
+
+ put_byte(pb, 0xf7); /* flags: global clut, 256 entries */
+ put_byte(pb, 0x1f); /* background color index */
+ put_byte(pb, 0); /* aspect ratio */
+
+ /* the global palette */
+ if (!palette) {
+ put_buffer(pb, (const unsigned char *)gif_clut, 216*3);
+ for(i=0;i<((256-216)*3);i++)
+ put_byte(pb, 0);
+ } else {
+ for(i=0;i<256;i++) {
+ v = palette[i];
+ put_byte(pb, (v >> 16) & 0xff);
+ put_byte(pb, (v >> 8) & 0xff);
+ put_byte(pb, (v) & 0xff);
+ }
+ }
+
+ /* update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif
+ see http://members.aol.com/royalef/gifabout.htm#net-extension
+
+ byte 1 : 33 (hex 0x21) GIF Extension code
+ byte 2 : 255 (hex 0xFF) Application Extension Label
+ byte 3 : 11 (hex (0x0B) Length of Application Block
+ (eleven bytes of data to follow)
+ bytes 4 to 11 : "NETSCAPE"
+ bytes 12 to 14 : "2.0"
+ byte 15 : 3 (hex 0x03) Length of Data Sub-Block
+ (three bytes of data to follow)
+ byte 16 : 1 (hex 0x01)
+ bytes 17 to 18 : 0 to 65535, an unsigned integer in
+ lo-hi byte format. This indicate the
+ number of iterations the loop should
+ be executed.
+ bytes 19 : 0 (hex 0x00) a Data Sub-block Terminator
+ */
+
+ /* application extension header */
+#ifdef GIF_ADD_APP_HEADER
+ if (loop_count >= 0 && loop_count <= 65535) {
+ put_byte(pb, 0x21);
+ put_byte(pb, 0xff);
+ put_byte(pb, 0x0b);
+ put_tag(pb, "NETSCAPE2.0"); // bytes 4 to 14
+ put_byte(pb, 0x03); // byte 15
+ put_byte(pb, 0x01); // byte 16
+ put_le16(pb, (uint16_t)loop_count);
+ put_byte(pb, 0x00); // byte 19
+ }
+#endif
+ return 0;
+}
+
+/* this is maybe slow, but allows for extensions */
+static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
+{
+ return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
+}
+
+
+static int gif_image_write_image(ByteIOContext *pb,
+ int x1, int y1, int width, int height,
+ const uint8_t *buf, int linesize, int pix_fmt)
+{
+ PutBitContext p;
+ uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
+ int i, left, w, v;
+ const uint8_t *ptr;
+ /* image block */
+
+ put_byte(pb, 0x2c);
+ put_le16(pb, x1);
+ put_le16(pb, y1);
+ put_le16(pb, width);
+ put_le16(pb, height);
+ put_byte(pb, 0x00); /* flags */
+ /* no local clut */
+
+ put_byte(pb, 0x08);
+
+ left= width * height;
+
+ init_put_bits(&p, buffer, 130);
+
+/*
+ * the thing here is the bitstream is written as little packets, with a size byte before
+ * but it's still the same bitstream between packets (no flush !)
+ */
+ ptr = buf;
+ w = width;
+ while(left>0) {
+
+ gif_put_bits_rev(&p, 9, 0x0100); /* clear code */
+
+ for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) {
+ if (pix_fmt == PIX_FMT_RGB24) {
+ v = gif_clut_index(ptr[0], ptr[1], ptr[2]);
+ ptr+=3;
+ } else {
+ v = *ptr++;
+ }
+ gif_put_bits_rev(&p, 9, v);
+ if (--w == 0) {
+ w = width;
+ buf += linesize;
+ ptr = buf;
+ }
+ }
+
+ if(left<=GIF_CHUNKS) {
+ gif_put_bits_rev(&p, 9, 0x101); /* end of stream */
+ gif_flush_put_bits_rev(&p);
+ }
+ if(pbBufPtr(&p) - p.buf > 0) {
+ put_byte(pb, pbBufPtr(&p) - p.buf); /* byte count of the packet */
+ put_buffer(pb, p.buf, pbBufPtr(&p) - p.buf); /* the actual buffer */
+ p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */
+ }
+ left-=GIF_CHUNKS;
+ }
+ put_byte(pb, 0x00); /* end of image block */
+
+ return 0;
+}
+
+typedef struct {
+ int64_t time, file_time;
+ uint8_t buffer[100]; /* data chunks */
+} GIFContext;
+
+static int gif_write_header(AVFormatContext *s)
+{
+ GIFContext *gif = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc, *video_enc;
+ int i, width, height, loop_count /*, rate*/;
+
+/* XXX: do we reject audio streams or just ignore them ?
+ if(s->nb_streams > 1)
+ return -1;
+*/
+ gif->time = 0;
+ gif->file_time = 0;
+
+ video_enc = NULL;
+ for(i=0;i<s->nb_streams;i++) {
+ enc = s->streams[i]->codec;
+ if (enc->codec_type != CODEC_TYPE_AUDIO)
+ video_enc = enc;
+ }
+
+ if (!video_enc) {
+ av_free(gif);
+ return -1;
+ } else {
+ width = video_enc->width;
+ height = video_enc->height;
+ loop_count = s->loop_output;
+// rate = video_enc->time_base.den;
+ }
+
+ if (video_enc->pix_fmt != PIX_FMT_RGB24) {
+ av_log(s, AV_LOG_ERROR, "ERROR: gif only handles the rgb24 pixel format. Use -pix_fmt rgb24.\n");
+ return AVERROR_IO;
+ }
+
+ gif_image_write_header(pb, width, height, loop_count, NULL);
+
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int gif_write_video(AVFormatContext *s,
+ AVCodecContext *enc, const uint8_t *buf, int size)
+{
+ ByteIOContext *pb = &s->pb;
+ GIFContext *gif = s->priv_data;
+ int jiffies;
+ int64_t delay;
+
+ /* graphic control extension block */
+ put_byte(pb, 0x21);
+ put_byte(pb, 0xf9);
+ put_byte(pb, 0x04); /* block size */
+ put_byte(pb, 0x04); /* flags */
+
+ /* 1 jiffy is 1/70 s */
+ /* the delay_time field indicates the number of jiffies - 1 */
+ delay = gif->file_time - gif->time;
+
+ /* XXX: should use delay, in order to be more accurate */
+ /* instead of using the same rounded value each time */
+ /* XXX: don't even remember if I really use it for now */
+ jiffies = (70*enc->time_base.num/enc->time_base.den) - 1;
+
+ put_le16(pb, jiffies);
+
+ put_byte(pb, 0x1f); /* transparent color index */
+ put_byte(pb, 0x00);
+
+ gif_image_write_image(pb, 0, 0, enc->width, enc->height,
+ buf, enc->width * 3, PIX_FMT_RGB24);
+
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int gif_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
+ if (codec->codec_type == CODEC_TYPE_AUDIO)
+ return 0; /* just ignore audio */
+ else
+ return gif_write_video(s, codec, pkt->data, pkt->size);
+}
+
+static int gif_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+
+ put_byte(pb, 0x3b);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+AVOutputFormat gif_muxer = {
+ "gif",
+ "GIF Animation",
+ "image/gif",
+ "gif",
+ sizeof(GIFContext),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ gif_write_header,
+ gif_write_packet,
+ gif_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/gifdec.c b/contrib/ffmpeg/libavformat/gifdec.c
new file mode 100644
index 000000000..692ca6466
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gifdec.c
@@ -0,0 +1,593 @@
+/*
+ * GIF demuxer
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+//#define DEBUG
+
+#define MAXBITS 12
+#define SIZTABLE (1<<MAXBITS)
+
+#define GCE_DISPOSAL_NONE 0
+#define GCE_DISPOSAL_INPLACE 1
+#define GCE_DISPOSAL_BACKGROUND 2
+#define GCE_DISPOSAL_RESTORE 3
+
+typedef struct GifState {
+ int screen_width;
+ int screen_height;
+ int bits_per_pixel;
+ int background_color_index;
+ int transparent_color_index;
+ int color_resolution;
+ uint8_t *image_buf;
+ int image_linesize;
+ uint32_t *image_palette;
+ int pix_fmt;
+
+ /* after the frame is displayed, the disposal method is used */
+ int gce_disposal;
+ /* delay during which the frame is shown */
+ int gce_delay;
+
+ /* LZW compatible decoder */
+ ByteIOContext *f;
+ int eob_reached;
+ uint8_t *pbuf, *ebuf;
+ int bbits;
+ unsigned int bbuf;
+
+ int cursize; /* The current code size */
+ int curmask;
+ int codesize;
+ int clear_code;
+ int end_code;
+ int newcodes; /* First available code */
+ int top_slot; /* Highest code for current size */
+ int slot; /* Last read code */
+ int fc, oc;
+ uint8_t *sp;
+ uint8_t stack[SIZTABLE];
+ uint8_t suffix[SIZTABLE];
+ uint16_t prefix[SIZTABLE];
+
+ /* aux buffers */
+ uint8_t global_palette[256 * 3];
+ uint8_t local_palette[256 * 3];
+ uint8_t buf[256];
+} GifState;
+
+
+static const uint8_t gif87a_sig[6] = "GIF87a";
+static const uint8_t gif89a_sig[6] = "GIF89a";
+
+static const uint16_t mask[17] =
+{
+ 0x0000, 0x0001, 0x0003, 0x0007,
+ 0x000F, 0x001F, 0x003F, 0x007F,
+ 0x00FF, 0x01FF, 0x03FF, 0x07FF,
+ 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF
+};
+
+/* Probe gif video format or gif image format. The current heuristic
+ supposes the gif87a is always a single image. For gif89a, we
+ consider it as a video only if a GCE extension is present in the
+ first kilobyte. */
+static int gif_video_probe(AVProbeData * pd)
+{
+ const uint8_t *p, *p_end;
+ int bits_per_pixel, has_global_palette, ext_code, ext_len;
+ int gce_flags, gce_disposal;
+
+ if (pd->buf_size < 24 ||
+ memcmp(pd->buf, gif89a_sig, 6) != 0)
+ return 0;
+ p_end = pd->buf + pd->buf_size;
+ p = pd->buf + 6;
+ bits_per_pixel = (p[4] & 0x07) + 1;
+ has_global_palette = (p[4] & 0x80);
+ p += 7;
+ if (has_global_palette)
+ p += (1 << bits_per_pixel) * 3;
+ for(;;) {
+ if (p >= p_end)
+ return 0;
+ if (*p != '!')
+ break;
+ p++;
+ if (p >= p_end)
+ return 0;
+ ext_code = *p++;
+ if (p >= p_end)
+ return 0;
+ ext_len = *p++;
+ if (ext_code == 0xf9) {
+ if (p >= p_end)
+ return 0;
+ /* if GCE extension found with gce_disposal != 0: it is
+ likely to be an animation */
+ gce_flags = *p++;
+ gce_disposal = (gce_flags >> 2) & 0x7;
+ if (gce_disposal != 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+ }
+ for(;;) {
+ if (ext_len == 0)
+ break;
+ p += ext_len;
+ if (p >= p_end)
+ return 0;
+ ext_len = *p++;
+ }
+ }
+ return 0;
+}
+
+static void GLZWDecodeInit(GifState * s, int csize)
+{
+ /* read buffer */
+ s->eob_reached = 0;
+ s->pbuf = s->buf;
+ s->ebuf = s->buf;
+ s->bbuf = 0;
+ s->bbits = 0;
+
+ /* decoder */
+ s->codesize = csize;
+ s->cursize = s->codesize + 1;
+ s->curmask = mask[s->cursize];
+ s->top_slot = 1 << s->cursize;
+ s->clear_code = 1 << s->codesize;
+ s->end_code = s->clear_code + 1;
+ s->slot = s->newcodes = s->clear_code + 2;
+ s->oc = s->fc = 0;
+ s->sp = s->stack;
+}
+
+/* XXX: optimize */
+static inline int GetCode(GifState * s)
+{
+ int c, sizbuf;
+ uint8_t *ptr;
+
+ while (s->bbits < s->cursize) {
+ ptr = s->pbuf;
+ if (ptr >= s->ebuf) {
+ if (!s->eob_reached) {
+ sizbuf = get_byte(s->f);
+ s->ebuf = s->buf + sizbuf;
+ s->pbuf = s->buf;
+ if (sizbuf > 0) {
+ get_buffer(s->f, s->buf, sizbuf);
+ } else {
+ s->eob_reached = 1;
+ }
+ }
+ ptr = s->pbuf;
+ }
+ s->bbuf |= ptr[0] << s->bbits;
+ ptr++;
+ s->pbuf = ptr;
+ s->bbits += 8;
+ }
+ c = s->bbuf & s->curmask;
+ s->bbuf >>= s->cursize;
+ s->bbits -= s->cursize;
+ return c;
+}
+
+/* NOTE: the algorithm here is inspired from the LZW GIF decoder
+ written by Steven A. Bennett in 1987. */
+/* return the number of byte decoded */
+static int GLZWDecode(GifState * s, uint8_t * buf, int len)
+{
+ int l, c, code, oc, fc;
+ uint8_t *sp;
+
+ if (s->end_code < 0)
+ return 0;
+
+ l = len;
+ sp = s->sp;
+ oc = s->oc;
+ fc = s->fc;
+
+ while (sp > s->stack) {
+ *buf++ = *(--sp);
+ if ((--l) == 0)
+ goto the_end;
+ }
+
+ for (;;) {
+ c = GetCode(s);
+ if (c == s->end_code) {
+ s->end_code = -1;
+ break;
+ } else if (c == s->clear_code) {
+ s->cursize = s->codesize + 1;
+ s->curmask = mask[s->cursize];
+ s->slot = s->newcodes;
+ s->top_slot = 1 << s->cursize;
+ while ((c = GetCode(s)) == s->clear_code);
+ if (c == s->end_code) {
+ s->end_code = -1;
+ break;
+ }
+ /* test error */
+ if (c >= s->slot)
+ c = 0;
+ fc = oc = c;
+ *buf++ = c;
+ if ((--l) == 0)
+ break;
+ } else {
+ code = c;
+ if (code >= s->slot) {
+ *sp++ = fc;
+ code = oc;
+ }
+ while (code >= s->newcodes) {
+ *sp++ = s->suffix[code];
+ code = s->prefix[code];
+ }
+ *sp++ = code;
+ if (s->slot < s->top_slot) {
+ s->suffix[s->slot] = fc = code;
+ s->prefix[s->slot++] = oc;
+ oc = c;
+ }
+ if (s->slot >= s->top_slot) {
+ if (s->cursize < MAXBITS) {
+ s->top_slot <<= 1;
+ s->curmask = mask[++s->cursize];
+ }
+ }
+ while (sp > s->stack) {
+ *buf++ = *(--sp);
+ if ((--l) == 0)
+ goto the_end;
+ }
+ }
+ }
+ the_end:
+ s->sp = sp;
+ s->oc = oc;
+ s->fc = fc;
+ return len - l;
+}
+
+static int gif_read_image(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ int left, top, width, height, bits_per_pixel, code_size, flags;
+ int is_interleaved, has_local_palette, y, x, pass, y1, linesize, n, i;
+ uint8_t *ptr, *line, *d, *spal, *palette, *sptr, *ptr1;
+
+ left = get_le16(f);
+ top = get_le16(f);
+ width = get_le16(f);
+ height = get_le16(f);
+ flags = get_byte(f);
+ is_interleaved = flags & 0x40;
+ has_local_palette = flags & 0x80;
+ bits_per_pixel = (flags & 0x07) + 1;
+#ifdef DEBUG
+ printf("gif: image x=%d y=%d w=%d h=%d\n", left, top, width, height);
+#endif
+
+ if (has_local_palette) {
+ get_buffer(f, s->local_palette, 3 * (1 << bits_per_pixel));
+ palette = s->local_palette;
+ } else {
+ palette = s->global_palette;
+ bits_per_pixel = s->bits_per_pixel;
+ }
+
+ /* verify that all the image is inside the screen dimensions */
+ if (left + width > s->screen_width ||
+ top + height > s->screen_height)
+ return -EINVAL;
+
+ /* build the palette */
+ if (s->pix_fmt == PIX_FMT_RGB24) {
+ line = av_malloc(width);
+ if (!line)
+ return -ENOMEM;
+ } else {
+ n = (1 << bits_per_pixel);
+ spal = palette;
+ for(i = 0; i < n; i++) {
+ s->image_palette[i] = (0xff << 24) |
+ (spal[0] << 16) | (spal[1] << 8) | (spal[2]);
+ spal += 3;
+ }
+ for(; i < 256; i++)
+ s->image_palette[i] = (0xff << 24);
+ /* handle transparency */
+ if (s->transparent_color_index >= 0)
+ s->image_palette[s->transparent_color_index] = 0;
+ line = NULL;
+ }
+
+ /* now get the image data */
+ s->f = f;
+ code_size = get_byte(f);
+ GLZWDecodeInit(s, code_size);
+
+ /* read all the image */
+ linesize = s->image_linesize;
+ ptr1 = s->image_buf + top * linesize + (left * 3);
+ ptr = ptr1;
+ pass = 0;
+ y1 = 0;
+ for (y = 0; y < height; y++) {
+ if (s->pix_fmt == PIX_FMT_RGB24) {
+ /* transcode to RGB24 */
+ GLZWDecode(s, line, width);
+ d = ptr;
+ sptr = line;
+ for(x = 0; x < width; x++) {
+ spal = palette + sptr[0] * 3;
+ d[0] = spal[0];
+ d[1] = spal[1];
+ d[2] = spal[2];
+ d += 3;
+ sptr++;
+ }
+ } else {
+ GLZWDecode(s, ptr, width);
+ }
+ if (is_interleaved) {
+ switch(pass) {
+ default:
+ case 0:
+ case 1:
+ y1 += 8;
+ ptr += linesize * 8;
+ if (y1 >= height) {
+ y1 = 4;
+ if (pass == 0)
+ ptr = ptr1 + linesize * 4;
+ else
+ ptr = ptr1 + linesize * 2;
+ pass++;
+ }
+ break;
+ case 2:
+ y1 += 4;
+ ptr += linesize * 4;
+ if (y1 >= height) {
+ y1 = 1;
+ ptr = ptr1 + linesize;
+ pass++;
+ }
+ break;
+ case 3:
+ y1 += 2;
+ ptr += linesize * 2;
+ break;
+ }
+ } else {
+ ptr += linesize;
+ }
+ }
+ av_free(line);
+
+ /* read the garbage data until end marker is found */
+ while (!s->eob_reached)
+ GetCode(s);
+ return 0;
+}
+
+static int gif_read_extension(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ int ext_code, ext_len, i, gce_flags, gce_transparent_index;
+
+ /* extension */
+ ext_code = get_byte(f);
+ ext_len = get_byte(f);
+#ifdef DEBUG
+ printf("gif: ext_code=0x%x len=%d\n", ext_code, ext_len);
+#endif
+ switch(ext_code) {
+ case 0xf9:
+ if (ext_len != 4)
+ goto discard_ext;
+ s->transparent_color_index = -1;
+ gce_flags = get_byte(f);
+ s->gce_delay = get_le16(f);
+ gce_transparent_index = get_byte(f);
+ if (gce_flags & 0x01)
+ s->transparent_color_index = gce_transparent_index;
+ else
+ s->transparent_color_index = -1;
+ s->gce_disposal = (gce_flags >> 2) & 0x7;
+#ifdef DEBUG
+ printf("gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n",
+ gce_flags, s->gce_delay,
+ s->transparent_color_index, s->gce_disposal);
+#endif
+ ext_len = get_byte(f);
+ break;
+ }
+
+ /* NOTE: many extension blocks can come after */
+ discard_ext:
+ while (ext_len != 0) {
+ for (i = 0; i < ext_len; i++)
+ get_byte(f);
+ ext_len = get_byte(f);
+#ifdef DEBUG
+ printf("gif: ext_len1=%d\n", ext_len);
+#endif
+ }
+ return 0;
+}
+
+static int gif_read_header1(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ uint8_t sig[6];
+ int ret, v, n;
+ int has_global_palette;
+
+ /* read gif signature */
+ ret = get_buffer(f, sig, 6);
+ if (ret != 6)
+ return -1;
+ if (memcmp(sig, gif87a_sig, 6) != 0 &&
+ memcmp(sig, gif89a_sig, 6) != 0)
+ return -1;
+
+ /* read screen header */
+ s->transparent_color_index = -1;
+ s->screen_width = get_le16(f);
+ s->screen_height = get_le16(f);
+ if( (unsigned)s->screen_width > 32767
+ || (unsigned)s->screen_height > 32767){
+ av_log(NULL, AV_LOG_ERROR, "picture size too large\n");
+ return -1;
+ }
+
+ v = get_byte(f);
+ s->color_resolution = ((v & 0x70) >> 4) + 1;
+ has_global_palette = (v & 0x80);
+ s->bits_per_pixel = (v & 0x07) + 1;
+ s->background_color_index = get_byte(f);
+ get_byte(f); /* ignored */
+#ifdef DEBUG
+ printf("gif: screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
+ s->screen_width, s->screen_height, s->bits_per_pixel,
+ has_global_palette);
+#endif
+ if (has_global_palette) {
+ n = 1 << s->bits_per_pixel;
+ get_buffer(f, s->global_palette, n * 3);
+ }
+ return 0;
+}
+
+static int gif_parse_next_image(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ int ret, code;
+
+ for (;;) {
+ code = url_fgetc(f);
+#ifdef DEBUG
+ printf("gif: code=%02x '%c'\n", code, code);
+#endif
+ switch (code) {
+ case ',':
+ if (gif_read_image(s) < 0)
+ return AVERROR_IO;
+ ret = 0;
+ goto the_end;
+ case ';':
+ /* end of image */
+ ret = AVERROR_IO;
+ goto the_end;
+ case '!':
+ if (gif_read_extension(s) < 0)
+ return AVERROR_IO;
+ break;
+ case EOF:
+ default:
+ /* error or errneous EOF */
+ ret = AVERROR_IO;
+ goto the_end;
+ }
+ }
+ the_end:
+ return ret;
+}
+
+static int gif_read_header(AVFormatContext * s1,
+ AVFormatParameters * ap)
+{
+ GifState *s = s1->priv_data;
+ ByteIOContext *f = &s1->pb;
+ AVStream *st;
+
+ s->f = f;
+ if (gif_read_header1(s) < 0)
+ return -1;
+
+ /* allocate image buffer */
+ s->image_linesize = s->screen_width * 3;
+ s->image_buf = av_malloc(s->screen_height * s->image_linesize);
+ if (!s->image_buf)
+ return -ENOMEM;
+ s->pix_fmt = PIX_FMT_RGB24;
+ /* now we are ready: build format streams */
+ st = av_new_stream(s1, 0);
+ if (!st)
+ return -1;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->time_base.den = 5;
+ st->codec->time_base.num = 1;
+ /* XXX: check if screen size is always valid */
+ st->codec->width = s->screen_width;
+ st->codec->height = s->screen_height;
+ st->codec->pix_fmt = PIX_FMT_RGB24;
+ return 0;
+}
+
+static int gif_read_packet(AVFormatContext * s1,
+ AVPacket * pkt)
+{
+ GifState *s = s1->priv_data;
+ int ret;
+
+ ret = gif_parse_next_image(s);
+ if (ret < 0)
+ return ret;
+
+ /* XXX: avoid copying */
+ if (av_new_packet(pkt, s->screen_width * s->screen_height * 3)) {
+ return AVERROR_IO;
+ }
+ pkt->stream_index = 0;
+ memcpy(pkt->data, s->image_buf, s->screen_width * s->screen_height * 3);
+ return 0;
+}
+
+static int gif_read_close(AVFormatContext *s1)
+{
+ GifState *s = s1->priv_data;
+ av_free(s->image_buf);
+ return 0;
+}
+
+AVInputFormat gif_demuxer =
+{
+ "gif",
+ "gif format",
+ sizeof(GifState),
+ gif_video_probe,
+ gif_read_header,
+ gif_read_packet,
+ gif_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/grab.c b/contrib/ffmpeg/libavformat/grab.c
new file mode 100644
index 000000000..4e85772e5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/grab.c
@@ -0,0 +1,860 @@
+/*
+ * Linux video grab interface
+ * Copyright (c) 2000,2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#define _LINUX_TIME_H 1
+#include <linux/videodev.h>
+#include <time.h>
+
+typedef struct {
+ int fd;
+ int frame_format; /* see VIDEO_PALETTE_xxx */
+ int use_mmap;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ int64_t time_frame;
+ int frame_size;
+ struct video_capability video_cap;
+ struct video_audio audio_saved;
+ uint8_t *video_buf;
+ struct video_mbuf gb_buffers;
+ struct video_mmap gb_buf;
+ int gb_frame;
+
+ /* ATI All In Wonder specific stuff */
+ /* XXX: remove and merge in libavcodec/imgconvert.c */
+ int aiw_enabled;
+ int deint;
+ int halfw;
+ uint8_t *src_mem;
+ uint8_t *lum_m4_mem;
+} VideoData;
+
+static int aiw_init(VideoData *s);
+static int aiw_read_picture(VideoData *s, uint8_t *data);
+static int aiw_close(VideoData *s);
+
+static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ AVStream *st;
+ int width, height;
+ int video_fd, frame_size;
+ int ret, frame_rate, frame_rate_base;
+ int desired_palette, desired_depth;
+ struct video_tuner tuner;
+ struct video_audio audio;
+ struct video_picture pict;
+ const char *video_device;
+ int j;
+
+ if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
+ ap->width, ap->height, ap->time_base.den);
+
+ return -1;
+ }
+
+ width = ap->width;
+ height = ap->height;
+ frame_rate = ap->time_base.den;
+ frame_rate_base = ap->time_base.num;
+
+ if((unsigned)width > 32767 || (unsigned)height > 32767) {
+ av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
+ width, height);
+
+ return -1;
+ }
+
+ st = av_new_stream(s1, 0);
+ if (!st)
+ return -ENOMEM;
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ s->width = width;
+ s->height = height;
+ s->frame_rate = frame_rate;
+ s->frame_rate_base = frame_rate_base;
+
+ video_device = ap->device;
+ if (!video_device)
+ video_device = "/dev/video";
+ video_fd = open(video_device, O_RDWR);
+ if (video_fd < 0) {
+ perror(video_device);
+ goto fail;
+ }
+
+ if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
+ perror("VIDIOCGCAP");
+ goto fail;
+ }
+
+ if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
+ goto fail;
+ }
+
+ desired_palette = -1;
+ desired_depth = -1;
+ if (ap->pix_fmt == PIX_FMT_YUV420P) {
+ desired_palette = VIDEO_PALETTE_YUV420P;
+ desired_depth = 12;
+ } else if (ap->pix_fmt == PIX_FMT_YUV422) {
+ desired_palette = VIDEO_PALETTE_YUV422;
+ desired_depth = 16;
+ } else if (ap->pix_fmt == PIX_FMT_BGR24) {
+ desired_palette = VIDEO_PALETTE_RGB24;
+ desired_depth = 24;
+ }
+
+ /* set tv standard */
+ if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
+ if (!strcasecmp(ap->standard, "pal"))
+ tuner.mode = VIDEO_MODE_PAL;
+ else if (!strcasecmp(ap->standard, "secam"))
+ tuner.mode = VIDEO_MODE_SECAM;
+ else
+ tuner.mode = VIDEO_MODE_NTSC;
+ ioctl(video_fd, VIDIOCSTUNER, &tuner);
+ }
+
+ /* unmute audio */
+ audio.audio = 0;
+ ioctl(video_fd, VIDIOCGAUDIO, &audio);
+ memcpy(&s->audio_saved, &audio, sizeof(audio));
+ audio.flags &= ~VIDEO_AUDIO_MUTE;
+ ioctl(video_fd, VIDIOCSAUDIO, &audio);
+
+ ioctl(video_fd, VIDIOCGPICT, &pict);
+#if 0
+ printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
+ pict.colour,
+ pict.hue,
+ pict.brightness,
+ pict.contrast,
+ pict.whiteness);
+#endif
+ /* try to choose a suitable video format */
+ pict.palette = desired_palette;
+ pict.depth= desired_depth;
+ if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
+ pict.palette=VIDEO_PALETTE_YUV420P;
+ pict.depth=12;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0) {
+ pict.palette=VIDEO_PALETTE_YUV422;
+ pict.depth=16;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0) {
+ pict.palette=VIDEO_PALETTE_RGB24;
+ pict.depth=24;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0)
+ pict.palette=VIDEO_PALETTE_GREY;
+ pict.depth=8;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0)
+ goto fail1;
+ }
+ }
+ }
+
+ ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
+ if (ret < 0) {
+ /* try to use read based access */
+ struct video_window win;
+ int val;
+
+ win.x = 0;
+ win.y = 0;
+ win.width = width;
+ win.height = height;
+ win.chromakey = -1;
+ win.flags = 0;
+
+ ioctl(video_fd, VIDIOCSWIN, &win);
+
+ s->frame_format = pict.palette;
+
+ val = 1;
+ ioctl(video_fd, VIDIOCCAPTURE, &val);
+
+ s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
+ s->use_mmap = 0;
+
+ /* ATI All In Wonder automatic activation */
+ if (!strcmp(s->video_cap.name, "Km")) {
+ if (aiw_init(s) < 0)
+ goto fail;
+ s->aiw_enabled = 1;
+ /* force 420P format because convertion from YUV422 to YUV420P
+ is done in this driver (ugly) */
+ s->frame_format = VIDEO_PALETTE_YUV420P;
+ }
+ } else {
+ s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ perror("mmap");
+ goto fail;
+ }
+ s->gb_frame = 0;
+ s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
+
+ /* start to grab the first frame */
+ s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
+ s->gb_buf.height = height;
+ s->gb_buf.width = width;
+ s->gb_buf.format = pict.palette;
+
+ ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
+ if (ret < 0) {
+ if (errno != EAGAIN) {
+ fail1:
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
+ } else {
+ av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
+ }
+ goto fail;
+ }
+ for (j = 1; j < s->gb_buffers.frames; j++) {
+ s->gb_buf.frame = j;
+ ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
+ }
+ s->frame_format = s->gb_buf.format;
+ s->use_mmap = 1;
+ }
+
+ switch(s->frame_format) {
+ case VIDEO_PALETTE_YUV420P:
+ frame_size = (width * height * 3) / 2;
+ st->codec->pix_fmt = PIX_FMT_YUV420P;
+ break;
+ case VIDEO_PALETTE_YUV422:
+ frame_size = width * height * 2;
+ st->codec->pix_fmt = PIX_FMT_YUV422;
+ break;
+ case VIDEO_PALETTE_RGB24:
+ frame_size = width * height * 3;
+ st->codec->pix_fmt = PIX_FMT_BGR24; /* NOTE: v4l uses BGR24, not RGB24 ! */
+ break;
+ case VIDEO_PALETTE_GREY:
+ frame_size = width * height * 1;
+ st->codec->pix_fmt = PIX_FMT_GRAY8;
+ break;
+ default:
+ goto fail;
+ }
+ s->fd = video_fd;
+ s->frame_size = frame_size;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->time_base.den = frame_rate;
+ st->codec->time_base.num = frame_rate_base;
+ st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
+
+ return 0;
+ fail:
+ if (video_fd >= 0)
+ close(video_fd);
+ av_free(st);
+ return AVERROR_IO;
+}
+
+static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
+{
+ uint8_t *ptr;
+
+ while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
+ (errno == EAGAIN || errno == EINTR));
+
+ ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
+ memcpy(buf, ptr, s->frame_size);
+
+ /* Setup to capture the next frame */
+ s->gb_buf.frame = s->gb_frame;
+ if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
+ if (errno == EAGAIN)
+ av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
+ else
+ perror("VIDIOCMCAPTURE");
+ return AVERROR_IO;
+ }
+
+ /* This is now the grabbing frame */
+ s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
+
+ return s->frame_size;
+}
+
+static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ int64_t curtime, delay;
+ struct timespec ts;
+
+ /* Calculate the time of the next frame */
+ s->time_frame += int64_t_C(1000000);
+
+ /* wait based on the frame rate */
+ for(;;) {
+ curtime = av_gettime();
+ delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
+ if (delay <= 0) {
+ if (delay < int64_t_C(-1000000) * s->frame_rate_base / s->frame_rate) {
+ /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
+ s->time_frame += int64_t_C(1000000);
+ }
+ break;
+ }
+ ts.tv_sec = delay / 1000000;
+ ts.tv_nsec = (delay % 1000000) * 1000;
+ nanosleep(&ts, NULL);
+ }
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+
+ pkt->pts = curtime;
+
+ /* read one frame */
+ if (s->aiw_enabled) {
+ return aiw_read_picture(s, pkt->data);
+ } else if (s->use_mmap) {
+ return v4l_mm_read_picture(s, pkt->data);
+ } else {
+ if (read(s->fd, pkt->data, pkt->size) != pkt->size)
+ return AVERROR_IO;
+ return s->frame_size;
+ }
+}
+
+static int grab_read_close(AVFormatContext *s1)
+{
+ VideoData *s = s1->priv_data;
+
+ if (s->aiw_enabled)
+ aiw_close(s);
+
+ if (s->use_mmap)
+ munmap(s->video_buf, s->gb_buffers.size);
+
+ /* mute audio. we must force it because the BTTV driver does not
+ return its state correctly */
+ s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
+ ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
+
+ close(s->fd);
+ return 0;
+}
+
+AVInputFormat video_grab_device_demuxer = {
+ "video4linux",
+ "video grab",
+ sizeof(VideoData),
+ NULL,
+ grab_read_header,
+ grab_read_packet,
+ grab_read_close,
+ .flags = AVFMT_NOFILE,
+};
+
+/* All in Wonder specific stuff */
+/* XXX: remove and merge in libavcodec/imgconvert.c */
+
+static int aiw_init(VideoData *s)
+{
+ int width, height;
+
+ width = s->width;
+ height = s->height;
+
+ if ((width == s->video_cap.maxwidth && height == s->video_cap.maxheight) ||
+ (width == s->video_cap.maxwidth && height == s->video_cap.maxheight*2) ||
+ (width == s->video_cap.maxwidth/2 && height == s->video_cap.maxheight)) {
+
+ s->deint=0;
+ s->halfw=0;
+ if (height == s->video_cap.maxheight*2) s->deint=1;
+ if (width == s->video_cap.maxwidth/2) s->halfw=1;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "\nIncorrect Grab Size Supplied - Supported Sizes Are:\n");
+ av_log(NULL, AV_LOG_ERROR, " %dx%d %dx%d %dx%d\n\n",
+ s->video_cap.maxwidth,s->video_cap.maxheight,
+ s->video_cap.maxwidth,s->video_cap.maxheight*2,
+ s->video_cap.maxwidth/2,s->video_cap.maxheight);
+ goto fail;
+ }
+
+ if (s->halfw == 0) {
+ s->src_mem = av_malloc(s->width*2);
+ } else {
+ s->src_mem = av_malloc(s->width*4);
+ }
+ if (!s->src_mem) goto fail;
+
+ s->lum_m4_mem = av_malloc(s->width);
+ if (!s->lum_m4_mem)
+ goto fail;
+ return 0;
+ fail:
+ av_freep(&s->src_mem);
+ av_freep(&s->lum_m4_mem);
+ return -1;
+}
+
+#ifdef HAVE_MMX
+#include "libavcodec/i386/mmx.h"
+
+#define LINE_WITH_UV \
+ movq_m2r(ptr[0],mm0); \
+ movq_m2r(ptr[8],mm1); \
+ movq_r2r(mm0, mm4); \
+ punpcklbw_r2r(mm1,mm0); \
+ punpckhbw_r2r(mm1,mm4); \
+ movq_r2r(mm0,mm5); \
+ punpcklbw_r2r(mm4,mm0); \
+ punpckhbw_r2r(mm4,mm5); \
+ movq_r2r(mm0,mm1); \
+ punpcklbw_r2r(mm5,mm1); \
+ movq_r2m(mm1,lum[0]); \
+ movq_m2r(ptr[16],mm2); \
+ movq_m2r(ptr[24],mm1); \
+ movq_r2r(mm2,mm4); \
+ punpcklbw_r2r(mm1,mm2); \
+ punpckhbw_r2r(mm1,mm4); \
+ movq_r2r(mm2,mm3); \
+ punpcklbw_r2r(mm4,mm2); \
+ punpckhbw_r2r(mm4,mm3); \
+ movq_r2r(mm2,mm1); \
+ punpcklbw_r2r(mm3,mm1); \
+ movq_r2m(mm1,lum[8]); \
+ punpckhdq_r2r(mm2,mm0); \
+ punpckhdq_r2r(mm3,mm5); \
+ movq_r2m(mm0,cb[0]); \
+ movq_r2m(mm5,cr[0]);
+
+#define LINE_NO_UV \
+ movq_m2r(ptr[0],mm0);\
+ movq_m2r(ptr[8],mm1);\
+ movq_r2r(mm0, mm4);\
+ punpcklbw_r2r(mm1,mm0); \
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm0,mm5);\
+ punpcklbw_r2r(mm4,mm0);\
+ punpckhbw_r2r(mm4,mm5);\
+ movq_r2r(mm0,mm1);\
+ punpcklbw_r2r(mm5,mm1);\
+ movq_r2m(mm1,lum[0]);\
+ movq_m2r(ptr[16],mm2);\
+ movq_m2r(ptr[24],mm1);\
+ movq_r2r(mm2,mm4);\
+ punpcklbw_r2r(mm1,mm2);\
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm2,mm3);\
+ punpcklbw_r2r(mm4,mm2);\
+ punpckhbw_r2r(mm4,mm3);\
+ movq_r2r(mm2,mm1);\
+ punpcklbw_r2r(mm3,mm1);\
+ movq_r2m(mm1,lum[8]);
+
+#define LINE_WITHUV_AVG \
+ movq_m2r(ptr[0], mm0);\
+ movq_m2r(ptr[8], mm1);\
+ movq_r2r(mm0, mm4);\
+ punpcklbw_r2r(mm1,mm0);\
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm0,mm5);\
+ punpcklbw_r2r(mm4,mm0);\
+ punpckhbw_r2r(mm4,mm5);\
+ movq_r2r(mm0,mm1);\
+ movq_r2r(mm5,mm2);\
+ punpcklbw_r2r(mm7,mm1);\
+ punpcklbw_r2r(mm7,mm2);\
+ paddw_r2r(mm6,mm1);\
+ paddw_r2r(mm2,mm1);\
+ psraw_i2r(1,mm1);\
+ packuswb_r2r(mm7,mm1);\
+ movd_r2m(mm1,lum[0]);\
+ movq_m2r(ptr[16],mm2);\
+ movq_m2r(ptr[24],mm1);\
+ movq_r2r(mm2,mm4);\
+ punpcklbw_r2r(mm1,mm2);\
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm2,mm3);\
+ punpcklbw_r2r(mm4,mm2);\
+ punpckhbw_r2r(mm4,mm3);\
+ movq_r2r(mm2,mm1);\
+ movq_r2r(mm3,mm4);\
+ punpcklbw_r2r(mm7,mm1);\
+ punpcklbw_r2r(mm7,mm4);\
+ paddw_r2r(mm6,mm1);\
+ paddw_r2r(mm4,mm1);\
+ psraw_i2r(1,mm1);\
+ packuswb_r2r(mm7,mm1);\
+ movd_r2m(mm1,lum[4]);\
+ punpckhbw_r2r(mm7,mm0);\
+ punpckhbw_r2r(mm7,mm2);\
+ paddw_r2r(mm6,mm0);\
+ paddw_r2r(mm2,mm0);\
+ psraw_i2r(1,mm0);\
+ packuswb_r2r(mm7,mm0);\
+ punpckhbw_r2r(mm7,mm5);\
+ punpckhbw_r2r(mm7,mm3);\
+ paddw_r2r(mm6,mm5);\
+ paddw_r2r(mm3,mm5);\
+ psraw_i2r(1,mm5);\
+ packuswb_r2r(mm7,mm5);\
+ movd_r2m(mm0,cb[0]);\
+ movd_r2m(mm5,cr[0]);
+
+#define LINE_NOUV_AVG \
+ movq_m2r(ptr[0],mm0);\
+ movq_m2r(ptr[8],mm1);\
+ pand_r2r(mm5,mm0);\
+ pand_r2r(mm5,mm1);\
+ pmaddwd_r2r(mm6,mm0);\
+ pmaddwd_r2r(mm6,mm1);\
+ packssdw_r2r(mm1,mm0);\
+ paddw_r2r(mm6,mm0);\
+ psraw_i2r(1,mm0);\
+ movq_m2r(ptr[16],mm2);\
+ movq_m2r(ptr[24],mm3);\
+ pand_r2r(mm5,mm2);\
+ pand_r2r(mm5,mm3);\
+ pmaddwd_r2r(mm6,mm2);\
+ pmaddwd_r2r(mm6,mm3);\
+ packssdw_r2r(mm3,mm2);\
+ paddw_r2r(mm6,mm2);\
+ psraw_i2r(1,mm2);\
+ packuswb_r2r(mm2,mm0);\
+ movq_r2m(mm0,lum[0]);
+
+#define DEINT_LINE_LUM(ptroff) \
+ movd_m2r(lum_m4[(ptroff)],mm0);\
+ movd_m2r(lum_m3[(ptroff)],mm1);\
+ movd_m2r(lum_m2[(ptroff)],mm2);\
+ movd_m2r(lum_m1[(ptroff)],mm3);\
+ movd_m2r(lum[(ptroff)],mm4);\
+ punpcklbw_r2r(mm7,mm0);\
+ movd_r2m(mm2,lum_m4[(ptroff)]);\
+ punpcklbw_r2r(mm7,mm1);\
+ punpcklbw_r2r(mm7,mm2);\
+ punpcklbw_r2r(mm7,mm3);\
+ punpcklbw_r2r(mm7,mm4);\
+ psllw_i2r(2,mm1);\
+ psllw_i2r(1,mm2);\
+ paddw_r2r(mm6,mm1);\
+ psllw_i2r(2,mm3);\
+ paddw_r2r(mm2,mm1);\
+ paddw_r2r(mm4,mm0);\
+ paddw_r2r(mm3,mm1);\
+ psubusw_r2r(mm0,mm1);\
+ psrlw_i2r(3,mm1);\
+ packuswb_r2r(mm7,mm1);\
+ movd_r2m(mm1,lum_m2[(ptroff)]);
+
+#else
+#include "libavcodec/dsputil.h"
+
+#define LINE_WITH_UV \
+ lum[0]=ptr[0];lum[1]=ptr[2];lum[2]=ptr[4];lum[3]=ptr[6];\
+ cb[0]=ptr[1];cb[1]=ptr[5];\
+ cr[0]=ptr[3];cr[1]=ptr[7];\
+ lum[4]=ptr[8];lum[5]=ptr[10];lum[6]=ptr[12];lum[7]=ptr[14];\
+ cb[2]=ptr[9];cb[3]=ptr[13];\
+ cr[2]=ptr[11];cr[3]=ptr[15];\
+ lum[8]=ptr[16];lum[9]=ptr[18];lum[10]=ptr[20];lum[11]=ptr[22];\
+ cb[4]=ptr[17];cb[5]=ptr[21];\
+ cr[4]=ptr[19];cr[5]=ptr[23];\
+ lum[12]=ptr[24];lum[13]=ptr[26];lum[14]=ptr[28];lum[15]=ptr[30];\
+ cb[6]=ptr[25];cb[7]=ptr[29];\
+ cr[6]=ptr[27];cr[7]=ptr[31];
+
+#define LINE_NO_UV \
+ lum[0]=ptr[0];lum[1]=ptr[2];lum[2]=ptr[4];lum[3]=ptr[6];\
+ lum[4]=ptr[8];lum[5]=ptr[10];lum[6]=ptr[12];lum[7]=ptr[14];\
+ lum[8]=ptr[16];lum[9]=ptr[18];lum[10]=ptr[20];lum[11]=ptr[22];\
+ lum[12]=ptr[24];lum[13]=ptr[26];lum[14]=ptr[28];lum[15]=ptr[30];
+
+#define LINE_WITHUV_AVG \
+ sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
+ sum=(ptr[4]+ptr[6]+1) >> 1;lum[1]=sum; \
+ sum=(ptr[1]+ptr[5]+1) >> 1;cb[0]=sum; \
+ sum=(ptr[3]+ptr[7]+1) >> 1;cr[0]=sum; \
+ sum=(ptr[8]+ptr[10]+1) >> 1;lum[2]=sum; \
+ sum=(ptr[12]+ptr[14]+1) >> 1;lum[3]=sum; \
+ sum=(ptr[9]+ptr[13]+1) >> 1;cb[1]=sum; \
+ sum=(ptr[11]+ptr[15]+1) >> 1;cr[1]=sum; \
+ sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
+ sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
+ sum=(ptr[17]+ptr[21]+1) >> 1;cb[2]=sum; \
+ sum=(ptr[19]+ptr[23]+1) >> 1;cr[2]=sum; \
+ sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
+ sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum; \
+ sum=(ptr[25]+ptr[29]+1) >> 1;cb[3]=sum; \
+ sum=(ptr[27]+ptr[31]+1) >> 1;cr[3]=sum;
+
+#define LINE_NOUV_AVG \
+ sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
+ sum=(ptr[4]+ptr[6]+1) >> 1;lum[1]=sum; \
+ sum=(ptr[8]+ptr[10]+1) >> 1;lum[2]=sum; \
+ sum=(ptr[12]+ptr[14]+1) >> 1;lum[3]=sum; \
+ sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
+ sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
+ sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
+ sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum;
+
+#define DEINT_LINE_LUM(ptroff) \
+ sum=(-lum_m4[(ptroff)]+(lum_m3[(ptroff)]<<2)+(lum_m2[(ptroff)]<<1)+(lum_m1[(ptroff)]<<2)-lum[(ptroff)]); \
+ lum_m4[(ptroff)]=lum_m2[(ptroff)];\
+ lum_m2[(ptroff)]=cm[(sum+4)>>3];\
+ sum=(-lum_m4[(ptroff)+1]+(lum_m3[(ptroff)+1]<<2)+(lum_m2[(ptroff)+1]<<1)+(lum_m1[(ptroff)+1]<<2)-lum[(ptroff)+1]); \
+ lum_m4[(ptroff)+1]=lum_m2[(ptroff)+1];\
+ lum_m2[(ptroff)+1]=cm[(sum+4)>>3];\
+ sum=(-lum_m4[(ptroff)+2]+(lum_m3[(ptroff)+2]<<2)+(lum_m2[(ptroff)+2]<<1)+(lum_m1[(ptroff)+2]<<2)-lum[(ptroff)+2]); \
+ lum_m4[(ptroff)+2]=lum_m2[(ptroff)+2];\
+ lum_m2[(ptroff)+2]=cm[(sum+4)>>3];\
+ sum=(-lum_m4[(ptroff)+3]+(lum_m3[(ptroff)+3]<<2)+(lum_m2[(ptroff)+3]<<1)+(lum_m1[(ptroff)+3]<<2)-lum[(ptroff)+3]); \
+ lum_m4[(ptroff)+3]=lum_m2[(ptroff)+3];\
+ lum_m2[(ptroff)+3]=cm[(sum+4)>>3];
+
+#endif
+
+
+/* Read two fields separately. */
+static int aiw_read_picture(VideoData *s, uint8_t *data)
+{
+ uint8_t *ptr, *lum, *cb, *cr;
+ int h;
+#ifndef HAVE_MMX
+ int sum;
+#endif
+ uint8_t* src = s->src_mem;
+ uint8_t *ptrend = &src[s->width*2];
+ lum=data;
+ cb=&lum[s->width*s->height];
+ cr=&cb[(s->width*s->height)/4];
+ if (s->deint == 0 && s->halfw == 0) {
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < s->height-2; h+=2) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ read(s->fd,src,s->width*2);
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
+ LINE_NO_UV
+ }
+ read(s->fd,src,s->width*2);
+ }
+ /*
+ * Do last two lines
+ */
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ read(s->fd,src,s->width*2);
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
+ LINE_NO_UV
+ }
+ /* drop second field */
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < s->height - 1; h++) {
+ read(s->fd,src,s->width*2);
+ }
+ } else if (s->halfw == 1) {
+#ifdef HAVE_MMX
+ mmx_t rounder;
+ mmx_t masker;
+ rounder.uw[0]=1;
+ rounder.uw[1]=1;
+ rounder.uw[2]=1;
+ rounder.uw[3]=1;
+ masker.ub[0]=0xff;
+ masker.ub[1]=0;
+ masker.ub[2]=0xff;
+ masker.ub[3]=0;
+ masker.ub[4]=0xff;
+ masker.ub[5]=0;
+ masker.ub[6]=0xff;
+ masker.ub[7]=0;
+ pxor_r2r(mm7,mm7);
+ movq_m2r(rounder,mm6);
+#endif
+ while (read(s->fd,src,s->width*4) < 0) {
+ usleep(100);
+ }
+ ptrend = &src[s->width*4];
+ for (h = 0; h < s->height-2; h+=2) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8, cb+=4, cr+=4) {
+ LINE_WITHUV_AVG
+ }
+ read(s->fd,src,s->width*4);
+#ifdef HAVE_MMX
+ movq_m2r(masker,mm5);
+#endif
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8) {
+ LINE_NOUV_AVG
+ }
+ read(s->fd,src,s->width*4);
+ }
+ /*
+ * Do last two lines
+ */
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8, cb+=4, cr+=4) {
+ LINE_WITHUV_AVG
+ }
+ read(s->fd,src,s->width*4);
+#ifdef HAVE_MMX
+ movq_m2r(masker,mm5);
+#endif
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8) {
+ LINE_NOUV_AVG
+ }
+ /* drop second field */
+ while (read(s->fd,src,s->width*4) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < s->height - 1; h++) {
+ read(s->fd,src,s->width*4);
+ }
+ } else {
+ uint8_t *lum_m1, *lum_m2, *lum_m3, *lum_m4;
+#ifdef HAVE_MMX
+ mmx_t rounder;
+ rounder.uw[0]=4;
+ rounder.uw[1]=4;
+ rounder.uw[2]=4;
+ rounder.uw[3]=4;
+ movq_m2r(rounder,mm6);
+ pxor_r2r(mm7,mm7);
+#else
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+#endif
+
+ /* read two fields and deinterlace them */
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < (s->height/2)-2; h+=2) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ read(s->fd,src,s->width*2);
+ /* skip a luminance line - will be filled in later */
+ lum += s->width;
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ /* skip a luminance line - will be filled in later */
+ lum += s->width;
+ read(s->fd,src,s->width*2);
+ }
+ /*
+ * Do last two lines
+ */
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ /* skip a luminance line - will be filled in later */
+ lum += s->width;
+ read(s->fd,src,s->width*2);
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ /*
+ *
+ * SECOND FIELD
+ *
+ */
+ lum=&data[s->width];
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(10);
+ }
+ /* First (and last) two lines not interlaced */
+ for (h = 0; h < 2; h++) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
+ LINE_NO_UV
+ }
+ read(s->fd,src,s->width*2);
+ /* skip a luminance line */
+ lum += s->width;
+ }
+ lum_m1=&lum[-s->width];
+ lum_m2=&lum_m1[-s->width];
+ lum_m3=&lum_m2[-s->width];
+ memmove(s->lum_m4_mem,&lum_m3[-s->width],s->width);
+ for (; h < (s->height/2)-1; h++) {
+ lum_m4=s->lum_m4_mem;
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16,lum_m1+=16,lum_m2+=16,lum_m3+=16,lum_m4+=16) {
+ LINE_NO_UV
+
+ DEINT_LINE_LUM(0)
+ DEINT_LINE_LUM(4)
+ DEINT_LINE_LUM(8)
+ DEINT_LINE_LUM(12)
+ }
+ read(s->fd,src,s->width*2);
+ /* skip a luminance line */
+ lum += s->width;
+ lum_m1 += s->width;
+ lum_m2 += s->width;
+ lum_m3 += s->width;
+ // lum_m4 += s->width;
+ }
+ /*
+ * Do last line
+ */
+ lum_m4=s->lum_m4_mem;
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, lum_m1+=16, lum_m2+=16, lum_m3+=16, lum_m4+=16) {
+ LINE_NO_UV
+
+ DEINT_LINE_LUM(0)
+ DEINT_LINE_LUM(4)
+ DEINT_LINE_LUM(8)
+ DEINT_LINE_LUM(12)
+ }
+ }
+#ifdef HAVE_MMX
+ emms();
+#endif
+ return s->frame_size;
+}
+
+static int aiw_close(VideoData *s)
+{
+ av_freep(&s->lum_m4_mem);
+ av_freep(&s->src_mem);
+ return 0;
+}
diff --git a/contrib/ffmpeg/libavformat/grab_bktr.c b/contrib/ffmpeg/libavformat/grab_bktr.c
new file mode 100644
index 000000000..214599490
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/grab_bktr.c
@@ -0,0 +1,330 @@
+/*
+ * *BSD video grab interface
+ * Copyright (c) 2002 Steve O'Hara-Smith
+ * based on
+ * Linux video grab interface
+ * Copyright (c) 2000,2001 Gerard Lantau.
+ * and
+ * simple_grab.c Copyright (c) 1999 Roger Hardiman
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#if defined(__FreeBSD__)
+# if __FreeBSD__ >= 502100
+# include <dev/bktr/ioctl_meteor.h>
+# include <dev/bktr/ioctl_bt848.h>
+# else
+# include <machine/ioctl_meteor.h>
+# include <machine/ioctl_bt848.h>
+# endif
+#elif defined(__FreeBSD_kernel__)
+# include <dev/bktr/ioctl_meteor.h>
+# include <dev/bktr/ioctl_bt848.h>
+#elif defined(__DragonFly__)
+# include <dev/video/meteor/ioctl_meteor.h>
+# include <dev/video/bktr/ioctl_bt848.h>
+#else
+# include <dev/ic/bt8xx.h>
+#endif
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <signal.h>
+
+typedef struct {
+ int video_fd;
+ int tuner_fd;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ u_int64_t per_frame;
+} VideoData;
+
+
+#define PAL 1
+#define PALBDGHI 1
+#define NTSC 2
+#define NTSCM 2
+#define SECAM 3
+#define PALN 4
+#define PALM 5
+#define NTSCJ 6
+
+/* PAL is 768 x 576. NTSC is 640 x 480 */
+#define PAL_HEIGHT 576
+#define SECAM_HEIGHT 576
+#define NTSC_HEIGHT 480
+
+#ifndef VIDEO_FORMAT
+#define VIDEO_FORMAT NTSC
+#endif
+
+static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
+ METEOR_DEV3, METEOR_DEV_SVIDEO };
+
+uint8_t *video_buf;
+size_t video_buf_size;
+u_int64_t last_frame_time;
+volatile sig_atomic_t nsignals;
+
+
+static void catchsignal(int signal)
+{
+ nsignals++;
+ return;
+}
+
+static int bktr_init(const char *video_device, int width, int height,
+ int format, int *video_fd, int *tuner_fd, int idev, double frequency)
+{
+ struct meteor_geomet geo;
+ int h_max;
+ long ioctl_frequency;
+ char *arg;
+ int c;
+ struct sigaction act, old;
+
+ if (idev < 0 || idev > 4)
+ {
+ arg = getenv ("BKTR_DEV");
+ if (arg)
+ idev = atoi (arg);
+ if (idev < 0 || idev > 4)
+ idev = 1;
+ }
+
+ if (format < 1 || format > 6)
+ {
+ arg = getenv ("BKTR_FORMAT");
+ if (arg)
+ format = atoi (arg);
+ if (format < 1 || format > 6)
+ format = VIDEO_FORMAT;
+ }
+
+ if (frequency <= 0)
+ {
+ arg = getenv ("BKTR_FREQUENCY");
+ if (arg)
+ frequency = atof (arg);
+ if (frequency <= 0)
+ frequency = 0.0;
+ }
+
+ memset(&act, 0, sizeof(act));
+ sigemptyset(&act.sa_mask);
+ act.sa_handler = catchsignal;
+ sigaction(SIGUSR1, &act, &old);
+
+ *tuner_fd = open("/dev/tuner0", O_RDONLY);
+ if (*tuner_fd < 0)
+ perror("Warning: Tuner not opened, continuing");
+
+ *video_fd = open(video_device, O_RDONLY);
+ if (*video_fd < 0) {
+ perror(video_device);
+ return -1;
+ }
+
+ geo.rows = height;
+ geo.columns = width;
+ geo.frames = 1;
+ geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
+
+ switch (format) {
+ case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
+ case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
+ case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
+ case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
+ case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
+ case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
+ default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
+ }
+
+ if (height <= h_max / 2)
+ geo.oformat |= METEOR_GEO_EVEN_ONLY;
+
+ if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
+ perror("METEORSETGEO");
+ return -1;
+ }
+
+ if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
+ perror("BT848SFMT");
+ return -1;
+ }
+
+ c = bktr_dev[idev];
+ if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
+ perror("METEORSINPUT");
+ return -1;
+ }
+
+ video_buf_size = width * height * 12 / 8;
+
+ video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
+ PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
+ if (video_buf == MAP_FAILED) {
+ perror("mmap");
+ return -1;
+ }
+
+ if (frequency != 0.0) {
+ ioctl_frequency = (unsigned long)(frequency*16);
+ if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
+ perror("TVTUNER_SETFREQ");
+ }
+
+ c = AUDIO_UNMUTE;
+ if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
+ perror("TVTUNER_SAUDIO");
+
+ c = METEOR_CAP_CONTINOUS;
+ ioctl(*video_fd, METEORCAPTUR, &c);
+
+ c = SIGUSR1;
+ ioctl(*video_fd, METEORSSIGNAL, &c);
+
+ return 0;
+}
+
+static void bktr_getframe(u_int64_t per_frame)
+{
+ u_int64_t curtime;
+
+ curtime = av_gettime();
+ if (!last_frame_time
+ || ((last_frame_time + per_frame) > curtime)) {
+ if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
+ if (!nsignals)
+ av_log(NULL, AV_LOG_INFO,
+ "SLEPT NO signals - %d microseconds late\n",
+ (int)(av_gettime() - last_frame_time - per_frame));
+ }
+ }
+ nsignals = 0;
+ last_frame_time = curtime;
+}
+
+
+/* note: we support only one picture read at a time */
+static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+
+ if (av_new_packet(pkt, video_buf_size) < 0)
+ return -EIO;
+
+ bktr_getframe(s->per_frame);
+
+ pkt->pts = av_gettime();
+ memcpy(pkt->data, video_buf, video_buf_size);
+
+ return video_buf_size;
+}
+
+static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ AVStream *st;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ int format = -1;
+ const char *video_device;
+
+ if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
+ return -1;
+
+ width = ap->width;
+ height = ap->height;
+ frame_rate = ap->time_base.den;
+ frame_rate_base = ap->time_base.num;
+
+ video_device = ap->device;
+ if (!video_device)
+ video_device = "/dev/bktr0";
+
+ st = av_new_stream(s1, 0);
+ if (!st)
+ return -ENOMEM;
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
+
+ s->width = width;
+ s->height = height;
+ s->frame_rate = frame_rate;
+ s->frame_rate_base = frame_rate_base;
+ s->per_frame = ((u_int64_t)1000000 * s->frame_rate_base) / s->frame_rate;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->pix_fmt = PIX_FMT_YUV420P;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->time_base.den = frame_rate;
+ st->codec->time_base.num = frame_rate_base;
+
+ if (ap->standard) {
+ if (!strcasecmp(ap->standard, "pal"))
+ format = PAL;
+ else if (!strcasecmp(ap->standard, "secam"))
+ format = SECAM;
+ else if (!strcasecmp(ap->standard, "ntsc"))
+ format = NTSC;
+ }
+
+ if (bktr_init(video_device, width, height, format,
+ &(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0)
+ return -EIO;
+
+ nsignals = 0;
+ last_frame_time = 0;
+
+ return 0;
+}
+
+static int grab_read_close(AVFormatContext *s1)
+{
+ VideoData *s = s1->priv_data;
+ int c;
+
+ c = METEOR_CAP_STOP_CONT;
+ ioctl(s->video_fd, METEORCAPTUR, &c);
+ close(s->video_fd);
+
+ c = AUDIO_MUTE;
+ ioctl(s->tuner_fd, BT848_SAUDIO, &c);
+ close(s->tuner_fd);
+
+ munmap((caddr_t)video_buf, video_buf_size);
+
+ return 0;
+}
+
+AVInputFormat video_grab_device_demuxer = {
+ "bktr",
+ "video grab",
+ sizeof(VideoData),
+ NULL,
+ grab_read_header,
+ grab_read_packet,
+ grab_read_close,
+ .flags = AVFMT_NOFILE,
+};
diff --git a/contrib/ffmpeg/libavformat/gxf.c b/contrib/ffmpeg/libavformat/gxf.c
new file mode 100644
index 000000000..897cdade0
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gxf.c
@@ -0,0 +1,525 @@
+/*
+ * GXF demuxer.
+ * Copyright (c) 2006 Reimar Doeffinger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "common.h"
+
+typedef enum {
+ PKT_MAP = 0xbc,
+ PKT_MEDIA = 0xbf,
+ PKT_EOS = 0xfb,
+ PKT_FLT = 0xfc,
+ PKT_UMF = 0xfd
+} pkt_type_t;
+
+typedef enum {
+ MAT_NAME = 0x40,
+ MAT_FIRST_FIELD = 0x41,
+ MAT_LAST_FIELD = 0x42,
+ MAT_MARK_IN = 0x43,
+ MAT_MARK_OUT = 0x44,
+ MAT_SIZE = 0x45
+} mat_tag_t;
+
+typedef enum {
+ TRACK_NAME = 0x4c,
+ TRACK_AUX = 0x4d,
+ TRACK_VER = 0x4e,
+ TRACK_MPG_AUX = 0x4f,
+ TRACK_FPS = 0x50,
+ TRACK_LINES = 0x51,
+ TRACK_FPF = 0x52
+} track_tag_t;
+
+typedef struct {
+ int64_t first_field;
+ int64_t last_field;
+ AVRational frames_per_second;
+ int32_t fields_per_frame;
+} st_info_t;
+
+/**
+ * \brief parses a packet header, extracting type and length
+ * \param pb ByteIOContext to read header from
+ * \param type detected packet type is stored here
+ * \param length detected packet length, excluding header is stored here
+ * \return 0 if header not found or contains invalid data, 1 otherwise
+ */
+static int parse_packet_header(ByteIOContext *pb, pkt_type_t *type, int *length) {
+ if (get_be32(pb))
+ return 0;
+ if (get_byte(pb) != 1)
+ return 0;
+ *type = get_byte(pb);
+ *length = get_be32(pb);
+ if ((*length >> 24) || *length < 16)
+ return 0;
+ *length -= 16;
+ if (get_be32(pb))
+ return 0;
+ if (get_byte(pb) != 0xe1)
+ return 0;
+ if (get_byte(pb) != 0xe2)
+ return 0;
+ return 1;
+}
+
+/**
+ * \brief check if file starts with a PKT_MAP header
+ */
+static int gxf_probe(AVProbeData *p) {
+ static const uint8_t startcode[] = {0, 0, 0, 0, 1, 0xbc}; // start with map packet
+ static const uint8_t endcode[] = {0, 0, 0, 0, 0xe1, 0xe2};
+ if (p->buf_size < 16)
+ return 0;
+ if (!memcmp(p->buf, startcode, sizeof(startcode)) &&
+ !memcmp(&p->buf[16 - sizeof(endcode)], endcode, sizeof(endcode)))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+/**
+ * \brief gets the stream index for the track with the specified id, creates new
+ * stream if not found
+ * \param stream id of stream to find / add
+ * \param format stream format identifier
+ */
+static int get_sindex(AVFormatContext *s, int id, int format) {
+ int i;
+ AVStream *st = NULL;
+ for (i = 0; i < s->nb_streams; i++) {
+ if (s->streams[i]->id == id)
+ return i;
+ }
+ st = av_new_stream(s, id);
+ switch (format) {
+ case 3:
+ case 4:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MJPEG;
+ break;
+ case 13:
+ case 15:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DVVIDEO;
+ break;
+ case 14:
+ case 16:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DVVIDEO;
+ break;
+ case 11:
+ case 12:
+ case 20:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
+ st->need_parsing = 2; // get keyframe flag etc.
+ break;
+ case 22:
+ case 23:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MPEG1VIDEO;
+ st->need_parsing = 2; // get keyframe flag etc.
+ break;
+ case 9:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S24LE;
+ st->codec->channels = 1;
+ st->codec->sample_rate = 48000;
+ st->codec->bit_rate = 3 * 1 * 48000 * 8;
+ st->codec->block_align = 3 * 1;
+ st->codec->bits_per_sample = 24;
+ break;
+ case 10:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ st->codec->channels = 1;
+ st->codec->sample_rate = 48000;
+ st->codec->bit_rate = 2 * 1 * 48000 * 8;
+ st->codec->block_align = 2 * 1;
+ st->codec->bits_per_sample = 16;
+ break;
+ case 17:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_AC3;
+ st->codec->channels = 2;
+ st->codec->sample_rate = 48000;
+ break;
+ // timecode tracks:
+ case 7:
+ case 8:
+ case 24:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_id = CODEC_ID_NONE;
+ break;
+ default:
+ st->codec->codec_type = CODEC_TYPE_UNKNOWN;
+ st->codec->codec_id = CODEC_ID_NONE;
+ break;
+ }
+ return s->nb_streams - 1;
+}
+
+/**
+ * \brief filters out interesting tags from material information.
+ * \param len lenght of tag section, will be adjusted to contain remaining bytes
+ * \param si struct to store collected information into
+ */
+static void gxf_material_tags(ByteIOContext *pb, int *len, st_info_t *si) {
+ si->first_field = AV_NOPTS_VALUE;
+ si->last_field = AV_NOPTS_VALUE;
+ while (*len >= 2) {
+ mat_tag_t tag = get_byte(pb);
+ int tlen = get_byte(pb);
+ *len -= 2;
+ if (tlen > *len)
+ return;
+ *len -= tlen;
+ if (tlen == 4) {
+ uint32_t value = get_be32(pb);
+ if (tag == MAT_FIRST_FIELD)
+ si->first_field = value;
+ else if (tag == MAT_LAST_FIELD)
+ si->last_field = value;
+ } else
+ url_fskip(pb, tlen);
+ }
+}
+
+/**
+ * \brief convert fps tag value to AVRational fps
+ * \param fps fps value from tag
+ * \return fps as AVRational, or 0 / 0 if unknown
+ */
+static AVRational fps_tag2avr(int32_t fps) {
+ extern const AVRational ff_frame_rate_tab[];
+ if (fps < 1 || fps > 9) fps = 9;
+ return ff_frame_rate_tab[9 - fps]; // values have opposite order
+}
+
+/**
+ * \brief convert UMF attributes flags to AVRational fps
+ * \param fps fps value from flags
+ * \return fps as AVRational, or 0 / 0 if unknown
+ */
+static AVRational fps_umf2avr(uint32_t flags) {
+ static const AVRational map[] = {{50, 1}, {60000, 1001}, {24, 1},
+ {25, 1}, {30000, 1001}};
+ int idx = av_log2((flags & 0x7c0) >> 6);
+ return map[idx];
+}
+
+/**
+ * \brief filters out interesting tags from track information.
+ * \param len length of tag section, will be adjusted to contain remaining bytes
+ * \param si struct to store collected information into
+ */
+static void gxf_track_tags(ByteIOContext *pb, int *len, st_info_t *si) {
+ si->frames_per_second = (AVRational){0, 0};
+ si->fields_per_frame = 0;
+ while (*len >= 2) {
+ track_tag_t tag = get_byte(pb);
+ int tlen = get_byte(pb);
+ *len -= 2;
+ if (tlen > *len)
+ return;
+ *len -= tlen;
+ if (tlen == 4) {
+ uint32_t value = get_be32(pb);
+ if (tag == TRACK_FPS)
+ si->frames_per_second = fps_tag2avr(value);
+ else if (tag == TRACK_FPF && (value == 1 || value == 2))
+ si->fields_per_frame = value;
+ } else
+ url_fskip(pb, tlen);
+ }
+}
+
+/**
+ * \brief read index from FLT packet into stream 0 av_index
+ */
+static void gxf_read_index(AVFormatContext *s, int pkt_len) {
+ ByteIOContext *pb = &s->pb;
+ AVStream *st = s->streams[0];
+ uint32_t fields_per_map = get_le32(pb);
+ uint32_t map_cnt = get_le32(pb);
+ int i;
+ pkt_len -= 8;
+ if (map_cnt > 1000) {
+ av_log(s, AV_LOG_ERROR, "GXF: too many index entries %u (%x)\n", map_cnt, map_cnt);
+ map_cnt = 1000;
+ }
+ if (pkt_len < 4 * map_cnt) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid index length\n");
+ url_fskip(pb, pkt_len);
+ return;
+ }
+ pkt_len -= 4 * map_cnt;
+ av_add_index_entry(st, 0, 0, 0, 0, 0);
+ for (i = 0; i < map_cnt; i++)
+ av_add_index_entry(st, (uint64_t)get_le32(pb) * 1024,
+ i * (uint64_t)fields_per_map + 1, 0, 0, 0);
+ url_fskip(pb, pkt_len);
+}
+
+static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) {
+ ByteIOContext *pb = &s->pb;
+ pkt_type_t pkt_type;
+ int map_len;
+ int len;
+ AVRational main_timebase = {0, 0};
+ st_info_t si;
+ int i;
+ if (!parse_packet_header(pb, &pkt_type, &map_len) || pkt_type != PKT_MAP) {
+ av_log(s, AV_LOG_ERROR, "GXF: map packet not found\n");
+ return 0;
+ }
+ map_len -= 2;
+ if (get_byte(pb) != 0x0e0 || get_byte(pb) != 0xff) {
+ av_log(s, AV_LOG_ERROR, "GXF: unknown version or invalid map preamble\n");
+ return 0;
+ }
+ map_len -= 2;
+ len = get_be16(pb); // length of material data section
+ if (len > map_len) {
+ av_log(s, AV_LOG_ERROR, "GXF: material data longer than map data\n");
+ return 0;
+ }
+ map_len -= len;
+ gxf_material_tags(pb, &len, &si);
+ url_fskip(pb, len);
+ map_len -= 2;
+ len = get_be16(pb); // length of track description
+ if (len > map_len) {
+ av_log(s, AV_LOG_ERROR, "GXF: track description longer than map data\n");
+ return 0;
+ }
+ map_len -= len;
+ while (len > 0) {
+ int track_type, track_id, track_len;
+ AVStream *st;
+ int idx;
+ len -= 4;
+ track_type = get_byte(pb);
+ track_id = get_byte(pb);
+ track_len = get_be16(pb);
+ len -= track_len;
+ gxf_track_tags(pb, &track_len, &si);
+ url_fskip(pb, track_len);
+ if (!(track_type & 0x80)) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid track type %x\n", track_type);
+ continue;
+ }
+ track_type &= 0x7f;
+ if ((track_id & 0xc0) != 0xc0) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid track id %x\n", track_id);
+ continue;
+ }
+ track_id &= 0x3f;
+ idx = get_sindex(s, track_id, track_type);
+ if (idx < 0) continue;
+ st = s->streams[idx];
+ if (!main_timebase.num || !main_timebase.den) {
+ main_timebase.num = si.frames_per_second.den;
+ main_timebase.den = si.frames_per_second.num * si.fields_per_frame;
+ }
+ st->start_time = si.first_field;
+ if (si.first_field != AV_NOPTS_VALUE && si.last_field != AV_NOPTS_VALUE)
+ st->duration = si.last_field - si.first_field;
+ }
+ if (len < 0)
+ av_log(s, AV_LOG_ERROR, "GXF: invalid track description length specified\n");
+ if (map_len)
+ url_fskip(pb, map_len);
+ if (!parse_packet_header(pb, &pkt_type, &len)) {
+ av_log(s, AV_LOG_ERROR, "GXF: sync lost in header\n");
+ return -1;
+ }
+ if (pkt_type == PKT_FLT) {
+ gxf_read_index(s, len);
+ if (!parse_packet_header(pb, &pkt_type, &len)) {
+ av_log(s, AV_LOG_ERROR, "GXF: sync lost in header\n");
+ return -1;
+ }
+ }
+ if (pkt_type == PKT_UMF) {
+ if (len >= 9) {
+ AVRational fps;
+ len -= 9;
+ url_fskip(pb, 5);
+ fps = fps_umf2avr(get_le32(pb));
+ if (!main_timebase.num || !main_timebase.den) {
+ // this may not always be correct, but simply the best we can get
+ main_timebase.num = fps.den;
+ main_timebase.den = fps.num;
+ }
+ } else
+ av_log(s, AV_LOG_INFO, "GXF: UMF packet too short\n");
+ } else
+ av_log(s, AV_LOG_INFO, "GXF: UMF packet missing\n");
+ url_fskip(pb, len);
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ if (main_timebase.num && main_timebase.den)
+ st->time_base = main_timebase;
+ else {
+ st->start_time = st->duration = AV_NOPTS_VALUE;
+ }
+ }
+ return 0;
+}
+
+#define READ_ONE() \
+ { \
+ if (!max_interval-- || url_feof(pb)) \
+ goto out; \
+ tmp = tmp << 8 | get_byte(pb); \
+ }
+
+/**
+ * \brief resync the stream on the next media packet with specified properties
+ * \param max_interval how many bytes to search for matching packet at most
+ * \param track track id the media packet must belong to, -1 for any
+ * \param timestamp minimum timestamp (== field number) the packet must have, -1 for any
+ * \return timestamp of packet found
+ */
+static int64_t gxf_resync_media(AVFormatContext *s, uint64_t max_interval, int track, int timestamp) {
+ uint32_t tmp;
+ uint64_t last_pos;
+ uint64_t last_found_pos = 0;
+ int cur_track;
+ int64_t cur_timestamp = AV_NOPTS_VALUE;
+ int len;
+ ByteIOContext *pb = &s->pb;
+ pkt_type_t type;
+ tmp = get_be32(pb);
+start:
+ while (tmp)
+ READ_ONE();
+ READ_ONE();
+ if (tmp != 1)
+ goto start;
+ last_pos = url_ftell(pb);
+ url_fseek(pb, -5, SEEK_CUR);
+ if (!parse_packet_header(pb, &type, &len) || type != PKT_MEDIA) {
+ url_fseek(pb, last_pos, SEEK_SET);
+ goto start;
+ }
+ get_byte(pb);
+ cur_track = get_byte(pb);
+ cur_timestamp = get_be32(pb);
+ last_found_pos = url_ftell(pb) - 16 - 6;
+ if ((track >= 0 && track != cur_track) || (timestamp >= 0 && timestamp > cur_timestamp)) {
+ url_fseek(pb, last_pos, SEEK_SET);
+ goto start;
+ }
+out:
+ if (last_found_pos)
+ url_fseek(pb, last_found_pos, SEEK_SET);
+ return cur_timestamp;
+}
+
+static int gxf_packet(AVFormatContext *s, AVPacket *pkt) {
+ ByteIOContext *pb = &s->pb;
+ pkt_type_t pkt_type;
+ int pkt_len;
+ while (!url_feof(pb)) {
+ int track_type, track_id, ret;
+ int field_nr;
+ if (!parse_packet_header(pb, &pkt_type, &pkt_len)) {
+ if (!url_feof(pb))
+ av_log(s, AV_LOG_ERROR, "GXF: sync lost\n");
+ return -1;
+ }
+ if (pkt_type == PKT_FLT) {
+ gxf_read_index(s, pkt_len);
+ continue;
+ }
+ if (pkt_type != PKT_MEDIA) {
+ url_fskip(pb, pkt_len);
+ continue;
+ }
+ if (pkt_len < 16) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid media packet length\n");
+ continue;
+ }
+ pkt_len -= 16;
+ track_type = get_byte(pb);
+ track_id = get_byte(pb);
+ field_nr = get_be32(pb);
+ get_be32(pb); // field information
+ get_be32(pb); // "timeline" field number
+ get_byte(pb); // flags
+ get_byte(pb); // reserved
+ // NOTE: there is also data length information in the
+ // field information, it might be better to take this into account
+ // as well.
+ ret = av_get_packet(pb, pkt, pkt_len);
+ pkt->stream_index = get_sindex(s, track_id, track_type);
+ pkt->dts = field_nr;
+ return ret;
+ }
+ return AVERROR_IO;
+}
+
+static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
+ uint64_t pos;
+ uint64_t maxlen = 100 * 1024 * 1024;
+ AVStream *st = s->streams[0];
+ int64_t start_time = s->streams[stream_index]->start_time;
+ int64_t found;
+ int idx;
+ if (timestamp < start_time) timestamp = start_time;
+ idx = av_index_search_timestamp(st, timestamp - start_time,
+ AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
+ if (idx < 0)
+ return -1;
+ pos = st->index_entries[idx].pos;
+ if (idx < st->nb_index_entries - 2)
+ maxlen = st->index_entries[idx + 2].pos - pos;
+ maxlen = FFMAX(maxlen, 200 * 1024);
+ url_fseek(&s->pb, pos, SEEK_SET);
+ found = gxf_resync_media(s, maxlen, -1, timestamp);
+ if (FFABS(found - timestamp) > 4)
+ return -1;
+ return 0;
+}
+
+static int64_t gxf_read_timestamp(AVFormatContext *s, int stream_index,
+ int64_t *pos, int64_t pos_limit) {
+ ByteIOContext *pb = &s->pb;
+ int64_t res;
+ url_fseek(pb, *pos, SEEK_SET);
+ res = gxf_resync_media(s, pos_limit - *pos, -1, -1);
+ *pos = url_ftell(pb);
+ return res;
+}
+
+AVInputFormat gxf_demuxer = {
+ "gxf",
+ "GXF format",
+ 0,
+ gxf_probe,
+ gxf_header,
+ gxf_packet,
+ NULL,
+ gxf_seek,
+ gxf_read_timestamp,
+};
diff --git a/contrib/ffmpeg/libavformat/gxf.h b/contrib/ffmpeg/libavformat/gxf.h
new file mode 100644
index 000000000..0e2a31ca4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gxf.h
@@ -0,0 +1,34 @@
+/*
+ * GXF demuxer
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_GXF_H
+#define FFMPEG_GXF_H
+
+/* gxf.c */
+typedef enum {
+ PKT_MAP = 0xbc,
+ PKT_MEDIA = 0xbf,
+ PKT_EOS = 0xfb,
+ PKT_FLT = 0xfc,
+ PKT_UMF = 0xfd
+} pkt_type_t;
+
+#endif /* FFMPEG_GXF_H */
diff --git a/contrib/ffmpeg/libavformat/gxfenc.c b/contrib/ffmpeg/libavformat/gxfenc.c
new file mode 100644
index 000000000..fef5ec104
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gxfenc.c
@@ -0,0 +1,829 @@
+/*
+ * GXF muxer.
+ * Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "gxf.h"
+#include "riff.h"
+#include "fifo.h"
+
+#define GXF_AUDIO_PACKET_SIZE 65536
+
+typedef struct GXFStreamContext {
+ AVCodecContext *codec;
+ AVFifoBuffer audio_buffer;
+ uint32_t track_type;
+ uint32_t sample_size;
+ uint32_t sample_rate;
+ uint16_t media_type;
+ uint16_t media_info;
+ uint8_t index;
+ int frame_rate_index;
+ int lines_index;
+ int fields;
+ int iframes;
+ int pframes;
+ int bframes;
+ int p_per_gop;
+ int b_per_gop;
+ int first_gop_closed;
+ int64_t current_dts;
+ int dts_delay;
+} GXFStreamContext;
+
+typedef struct GXFContext {
+ uint32_t nb_frames;
+ uint32_t material_flags;
+ uint16_t audio_tracks;
+ uint16_t mpeg_tracks;
+ int64_t creation_time;
+ uint32_t umf_start_offset;
+ uint32_t umf_track_offset;
+ uint32_t umf_media_offset;
+ uint32_t umf_user_data_offset;
+ uint32_t umf_user_data_size;
+ uint32_t umf_length;
+ uint16_t umf_track_size;
+ uint16_t umf_media_size;
+ int audio_written;
+ int sample_rate;
+ int flags;
+ AVFormatContext *fc;
+ GXFStreamContext streams[48];
+} GXFContext;
+
+typedef struct GXF_Lines {
+ int height;
+ int index;
+} GXF_Lines;
+
+
+/* FIXME check if it is relevant */
+static const GXF_Lines gxf_lines_tab[] = {
+ { 480, 1 }, /* NTSC */
+ { 512, 1 }, /* NTSC + VBI */
+ { 576, 2 }, /* PAL */
+ { 608, 2 }, /* PAL + VBI */
+ { 1080, 4 },
+ { 720, 6 },
+};
+
+static const CodecTag gxf_media_types[] = {
+ { CODEC_ID_MJPEG , 3 }, /* NTSC */
+ { CODEC_ID_MJPEG , 4 }, /* PAL */
+ { CODEC_ID_PCM_S24LE , 9 },
+ { CODEC_ID_PCM_S16LE , 10 },
+ { CODEC_ID_MPEG2VIDEO, 11 }, /* NTSC */
+ { CODEC_ID_MPEG2VIDEO, 12 }, /* PAL */
+ { CODEC_ID_DVVIDEO , 13 }, /* NTSC */
+ { CODEC_ID_DVVIDEO , 14 }, /* PAL */
+ { CODEC_ID_DVVIDEO , 15 }, /* 50M NTSC */
+ { CODEC_ID_DVVIDEO , 16 }, /* 50M PAL */
+ { CODEC_ID_AC3 , 17 },
+ //{ CODEC_ID_NONE, , 18 }, /* Non compressed 24 bit audio */
+ { CODEC_ID_MPEG2VIDEO, 20 }, /* MPEG HD */
+ { CODEC_ID_MPEG1VIDEO, 22 }, /* NTSC */
+ { CODEC_ID_MPEG1VIDEO, 23 }, /* PAL */
+ { 0, 0 },
+};
+
+#define SERVER_PATH "/space/"
+#define ES_NAME_PATTERN "ES."
+
+static int gxf_find_lines_index(GXFStreamContext *ctx)
+{
+ int i;
+
+ for (i = 0; i < 6; ++i) {
+ if (ctx->codec->height == gxf_lines_tab[i].height) {
+ ctx->lines_index = gxf_lines_tab[i].index;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void gxf_write_padding(ByteIOContext *pb, offset_t to_pad)
+{
+ for (; to_pad > 0; to_pad--) {
+ put_byte(pb, 0);
+ }
+}
+
+static offset_t updatePacketSize(ByteIOContext *pb, offset_t pos)
+{
+ offset_t curpos;
+ int size;
+
+ size = url_ftell(pb) - pos;
+ if (size % 4) {
+ gxf_write_padding(pb, 4 - size % 4);
+ size = url_ftell(pb) - pos;
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, pos + 6, SEEK_SET);
+ put_be32(pb, size);
+ url_fseek(pb, curpos, SEEK_SET);
+ return curpos - pos;
+}
+
+static offset_t updateSize(ByteIOContext *pb, offset_t pos)
+{
+ offset_t curpos;
+
+ curpos = url_ftell(pb);
+ url_fseek(pb, pos, SEEK_SET);
+ put_be16(pb, curpos - pos - 2);
+ url_fseek(pb, curpos, SEEK_SET);
+ return curpos - pos;
+}
+
+static void gxf_write_packet_header(ByteIOContext *pb, pkt_type_t type)
+{
+ put_be32(pb, 0); /* packet leader for synchro */
+ put_byte(pb, 1);
+ put_byte(pb, type); /* map packet */
+ put_be32(pb, 0); /* size */
+ put_be32(pb, 0); /* reserved */
+ put_byte(pb, 0xE1); /* trailer 1 */
+ put_byte(pb, 0xE2); /* trailer 2 */
+}
+
+static int gxf_write_mpeg_auxiliary(ByteIOContext *pb, GXFStreamContext *ctx)
+{
+ char buffer[1024];
+ int size;
+
+ if (ctx->iframes) {
+ ctx->p_per_gop = ctx->pframes / ctx->iframes;
+ if (ctx->pframes % ctx->iframes)
+ ctx->p_per_gop++;
+ if (ctx->pframes)
+ ctx->b_per_gop = ctx->bframes / ctx->pframes;
+ if (ctx->p_per_gop > 9)
+ ctx->p_per_gop = 9; /* ensure value won't take more than one char */
+ if (ctx->b_per_gop > 9)
+ ctx->b_per_gop = 9; /* ensure value won't take more than one char */
+ }
+ size = snprintf(buffer, 1024, "Ver 1\nBr %.6f\nIpg 1\nPpi %d\nBpiop %d\n"
+ "Pix 0\nCf %d\nCg %d\nSl 7\nnl16 %d\nVi 1\nf1 1\n",
+ (float)ctx->codec->bit_rate, ctx->p_per_gop, ctx->b_per_gop,
+ ctx->codec->pix_fmt == PIX_FMT_YUV422P ? 2 : 1, ctx->first_gop_closed == 1,
+ ctx->codec->height / 16);
+ put_byte(pb, 0x4F);
+ put_byte(pb, size + 1);
+ put_buffer(pb, (uint8_t *)buffer, size + 1);
+ return size + 3;
+}
+
+static int gxf_write_timecode_auxiliary(ByteIOContext *pb, GXFStreamContext *ctx)
+{
+ /* FIXME implement that */
+ put_byte(pb, 0); /* fields */
+ put_byte(pb, 0); /* seconds */
+ put_byte(pb, 0); /* minutes */
+ put_byte(pb, 0); /* flags + hours */
+ /* reserved */
+ put_be32(pb, 0);
+ return 8;
+}
+
+static int gxf_write_track_description(ByteIOContext *pb, GXFStreamContext *stream)
+{
+ offset_t pos;
+
+ /* track description section */
+ put_byte(pb, stream->media_type + 0x80);
+ put_byte(pb, stream->index + 0xC0);
+
+ pos = url_ftell(pb);
+ put_be16(pb, 0); /* size */
+
+ /* media file name */
+ put_byte(pb, 0x4C);
+ put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
+ put_tag(pb, ES_NAME_PATTERN);
+ put_be16(pb, stream->media_info);
+ put_byte(pb, 0);
+
+ if (stream->codec->codec_id != CODEC_ID_MPEG2VIDEO) {
+ /* auxiliary information */
+ put_byte(pb, 0x4D);
+ put_byte(pb, 8);
+ if (stream->codec->codec_id == CODEC_ID_NONE)
+ gxf_write_timecode_auxiliary(pb, stream);
+ else
+ put_le64(pb, 0);
+ }
+
+ /* file system version */
+ put_byte(pb, 0x4E);
+ put_byte(pb, 4);
+ put_be32(pb, 0);
+
+ if (stream->codec->codec_id == CODEC_ID_MPEG2VIDEO)
+ gxf_write_mpeg_auxiliary(pb, stream);
+
+ /* frame rate */
+ put_byte(pb, 0x50);
+ put_byte(pb, 4);
+ put_be32(pb, stream->frame_rate_index);
+
+ /* lines per frame */
+ put_byte(pb, 0x51);
+ put_byte(pb, 4);
+ put_be32(pb, stream->lines_index);
+
+ /* fields per frame */
+ put_byte(pb, 0x52);
+ put_byte(pb, 4);
+ put_be32(pb, stream->fields);
+
+ return updateSize(pb, pos);
+}
+
+static int gxf_write_material_data_section(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos;
+ const char *filename = strrchr(ctx->fc->filename, '/');
+
+ pos = url_ftell(pb);
+ put_be16(pb, 0); /* size */
+
+ /* name */
+ if (filename)
+ filename++;
+ else
+ filename = ctx->fc->filename;
+ put_byte(pb, 0x40);
+ put_byte(pb, strlen(SERVER_PATH) + strlen(filename) + 1);
+ put_tag(pb, SERVER_PATH);
+ put_tag(pb, filename);
+ put_byte(pb, 0);
+
+ /* first field */
+ put_byte(pb, 0x41);
+ put_byte(pb, 4);
+ put_be32(pb, 0);
+
+ /* last field */
+ put_byte(pb, 0x42);
+ put_byte(pb, 4);
+ put_be32(pb, ctx->nb_frames);
+
+ /* reserved */
+ put_byte(pb, 0x43);
+ put_byte(pb, 4);
+ put_be32(pb, 0);
+
+ put_byte(pb, 0x44);
+ put_byte(pb, 4);
+ put_be32(pb, ctx->nb_frames);
+
+ /* estimated size */
+ put_byte(pb, 0x45);
+ put_byte(pb, 4);
+ put_be32(pb, url_fsize(pb) / 1024);
+
+ return updateSize(pb, pos);
+}
+
+static int gxf_write_track_description_section(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos;
+ int i;
+
+ pos = url_ftell(pb);
+ put_be16(pb, 0); /* size */
+ for (i = 0; i < ctx->fc->nb_streams; ++i)
+ gxf_write_track_description(pb, &ctx->streams[i]);
+ return updateSize(pb, pos);
+}
+
+static int gxf_write_map_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+
+ gxf_write_packet_header(pb, PKT_MAP);
+
+ /* preamble */
+ put_byte(pb, 0xE0); /* version */
+ put_byte(pb, 0xFF); /* reserved */
+
+ gxf_write_material_data_section(pb, ctx);
+ gxf_write_track_description_section(pb, ctx);
+
+ return updatePacketSize(pb, pos);
+}
+
+#if 0
+static int gxf_write_flt_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+ int i;
+
+ gxf_write_packet_header(pb, PKT_FLT);
+
+ put_le32(pb, 1000); /* number of fields */
+ put_le32(pb, 0); /* number of active flt entries */
+
+ for (i = 0; i < 1000; ++i) {
+ put_le32(pb, 0);
+ }
+ return updatePacketSize(pb, pos);
+}
+#endif
+
+static int gxf_write_umf_material_description(ByteIOContext *pb, GXFContext *ctx)
+{
+ put_le32(pb, ctx->flags);
+ put_le32(pb, ctx->nb_frames); /* length of the longest track */
+ put_le32(pb, ctx->nb_frames); /* length of the shortest track */
+ put_le32(pb, 0); /* mark in */
+ put_le32(pb, ctx->nb_frames); /* mark out */
+ put_le32(pb, 0); /* timecode mark in */
+ put_le32(pb, ctx->nb_frames); /* timecode mark out */
+ put_le64(pb, ctx->fc->timestamp); /* modification time */
+ put_le64(pb, ctx->fc->timestamp); /* creation time */
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, ctx->audio_tracks);
+ put_le16(pb, 0); /* timecode track count */
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, ctx->mpeg_tracks);
+ return 48;
+}
+
+static int gxf_write_umf_payload(ByteIOContext *pb, GXFContext *ctx)
+{
+ put_le32(pb, ctx->umf_length); /* total length of the umf data */
+ put_le32(pb, 3); /* version */
+ put_le32(pb, ctx->fc->nb_streams);
+ put_le32(pb, ctx->umf_track_offset); /* umf track section offset */
+ put_le32(pb, ctx->umf_track_size);
+ put_le32(pb, ctx->fc->nb_streams);
+ put_le32(pb, ctx->umf_media_offset);
+ put_le32(pb, ctx->umf_media_size);
+ put_le32(pb, ctx->umf_user_data_offset); /* user data offset */
+ put_le32(pb, ctx->umf_user_data_size); /* user data size */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ return 48;
+}
+
+static int gxf_write_umf_track_description(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+ int tracks[255]={0};
+ int i;
+
+ ctx->umf_track_offset = pos - ctx->umf_start_offset;
+ for (i = 0; i < ctx->fc->nb_streams; ++i) {
+ AVStream *st = ctx->fc->streams[i];
+ GXFStreamContext *sc = &ctx->streams[i];
+ int id = 0;
+
+ switch (st->codec->codec_id) {
+ case CODEC_ID_MPEG1VIDEO: id= 'L'; break;
+ case CODEC_ID_MPEG2VIDEO: id= 'M'; break;
+ case CODEC_ID_PCM_S16LE: id= 'A'; break;
+ case CODEC_ID_DVVIDEO: id= sc->track_type == 6 ? 'E' : 'D'; break;
+ case CODEC_ID_MJPEG: id= 'V'; break;
+ default: break;
+ }
+ sc->media_info= id << 8;
+ /* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */
+ sc->media_info |= '0' + (tracks[id]++);
+ put_le16(pb, sc->media_info);
+ put_le16(pb, 1);
+ }
+ return url_ftell(pb) - pos;
+}
+
+static int gxf_write_umf_media_mpeg(ByteIOContext *pb, GXFStreamContext *stream)
+{
+ if (stream->codec->pix_fmt == PIX_FMT_YUV422P)
+ put_le32(pb, 2);
+ else
+ put_le32(pb, 1); /* default to 420 */
+ put_le32(pb, stream->first_gop_closed == 1); /* closed = 1, open = 0, unknown = 255 */
+ put_le32(pb, 3); /* top = 1, bottom = 2, frame = 3, unknown = 0 */
+ put_le32(pb, 1); /* I picture per GOP */
+ put_le32(pb, stream->p_per_gop);
+ put_le32(pb, stream->b_per_gop);
+ if (stream->codec->codec_id == CODEC_ID_MPEG2VIDEO)
+ put_le32(pb, 2);
+ else if (stream->codec->codec_id == CODEC_ID_MPEG1VIDEO)
+ put_le32(pb, 1);
+ else
+ put_le32(pb, 0);
+ put_le32(pb, 0); /* reserved */
+ return 32;
+}
+
+static int gxf_write_umf_media_timecode(ByteIOContext *pb, GXFStreamContext *track)
+{
+ /* FIXME implement */
+ put_be32(pb, 0); /* drop frame flag */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ return 32;
+}
+
+static int gxf_write_umf_media_dv(ByteIOContext *pb, GXFStreamContext *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ put_be32(pb, 0);
+ }
+ return 32;
+}
+
+static int gxf_write_umf_media_audio(ByteIOContext *pb, GXFStreamContext *track)
+{
+ put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
+ put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
+ put_le32(pb, 0); /* number of fields over which to ramp up sound level */
+ put_le32(pb, 0); /* number of fields over which to ramp down sound level */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ return 32;
+}
+
+#if 0
+static int gxf_write_umf_media_mjpeg(ByteIOContext *pb, GXFStreamContext *track)
+{
+ put_be64(pb, 0); /* FIXME FLOAT max chroma quant level */
+ put_be64(pb, 0); /* FIXME FLOAT max luma quant level */
+ put_be64(pb, 0); /* FIXME FLOAT min chroma quant level */
+ put_be64(pb, 0); /* FIXME FLOAT min luma quant level */
+ return 32;
+}
+#endif
+
+static int gxf_write_umf_media_description(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos;
+ int i;
+
+ pos = url_ftell(pb);
+ ctx->umf_media_offset = pos - ctx->umf_start_offset;
+ for (i = 0; i < ctx->fc->nb_streams; ++i) {
+ GXFStreamContext *sc = &ctx->streams[i];
+ char buffer[88];
+ offset_t startpos, curpos;
+ int path_size = strlen(ES_NAME_PATTERN);
+
+ memset(buffer, 0, 88);
+ startpos = url_ftell(pb);
+ put_le16(pb, 0); /* length */
+ put_le16(pb, sc->media_info);
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, 0); /* reserved */
+ put_le32(pb, ctx->nb_frames);
+ put_le32(pb, 0); /* attributes rw, ro */
+ put_le32(pb, 0); /* mark in */
+ put_le32(pb, ctx->nb_frames); /* mark out */
+ strncpy(buffer, ES_NAME_PATTERN, path_size);
+ put_buffer(pb, (uint8_t *)buffer, path_size);
+ put_be16(pb, sc->media_info);
+ put_buffer(pb, (uint8_t *)buffer + path_size + 2, 88 - path_size - 2);
+ put_le32(pb, sc->track_type);
+ put_le32(pb, sc->sample_rate);
+ put_le32(pb, sc->sample_size);
+ put_le32(pb, 0); /* reserved */
+ switch (sc->codec->codec_id) {
+ case CODEC_ID_MPEG2VIDEO:
+ gxf_write_umf_media_mpeg(pb, sc);
+ break;
+ case CODEC_ID_PCM_S16LE:
+ gxf_write_umf_media_audio(pb, sc);
+ break;
+ case CODEC_ID_DVVIDEO:
+ gxf_write_umf_media_dv(pb, sc);
+ break;
+ default:
+ gxf_write_umf_media_timecode(pb, sc); /* 8 0bytes */
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, startpos, SEEK_SET);
+ put_le16(pb, curpos - startpos);
+ url_fseek(pb, curpos, SEEK_SET);
+ }
+ return url_ftell(pb) - pos;
+}
+
+static int gxf_write_umf_user_data(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+ ctx->umf_user_data_offset = pos - ctx->umf_start_offset;
+ put_le32(pb, 20);
+ put_le32(pb, 0);
+ put_le16(pb, 0);
+ put_le16(pb, 0);
+ put_le32(pb, 0);
+ put_byte(pb, 0);
+ put_byte(pb, 0);
+ put_byte(pb, 0);
+ put_byte(pb, 0);
+ return 20;
+}
+
+static int gxf_write_umf_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+
+ gxf_write_packet_header(pb, PKT_UMF);
+
+ /* preamble */
+ put_byte(pb, 3); /* first and last (only) packet */
+ put_be32(pb, ctx->umf_length); /* data length */
+
+ ctx->umf_start_offset = url_ftell(pb);
+ gxf_write_umf_payload(pb, ctx);
+ gxf_write_umf_material_description(pb, ctx);
+ ctx->umf_track_size = gxf_write_umf_track_description(pb, ctx);
+ ctx->umf_media_size = gxf_write_umf_media_description(pb, ctx);
+ ctx->umf_user_data_size = gxf_write_umf_user_data(pb, ctx);
+ ctx->umf_length = url_ftell(pb) - ctx->umf_start_offset;
+ return updatePacketSize(pb, pos);
+}
+
+static int gxf_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ GXFContext *gxf = s->priv_data;
+ int i;
+
+ gxf->fc = s;
+ gxf->flags |= 0x00080000; /* material is simple clip */
+ for (i = 0; i < s->nb_streams; ++i) {
+ AVStream *st = s->streams[i];
+ GXFStreamContext *sc = &gxf->streams[i];
+
+ sc->codec = st->codec;
+ sc->index = i;
+ sc->media_type = codec_get_tag(gxf_media_types, sc->codec->codec_id);
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if (st->codec->codec_id != CODEC_ID_PCM_S16LE) {
+ av_log(s, AV_LOG_ERROR, "only 16 BIT PCM LE allowed for now\n");
+ return -1;
+ }
+ if (st->codec->sample_rate != 48000) {
+ av_log(s, AV_LOG_ERROR, "only 48000hz sampling rate is allowed\n");
+ return -1;
+ }
+ if (st->codec->channels != 1) {
+ av_log(s, AV_LOG_ERROR, "only mono tracks are allowed\n");
+ return -1;
+ }
+ sc->track_type = 2;
+ sc->sample_rate = st->codec->sample_rate;
+ av_set_pts_info(st, 64, 1, sc->sample_rate);
+ sc->sample_size = 16;
+ sc->frame_rate_index = -2;
+ sc->lines_index = -2;
+ sc->fields = -2;
+ gxf->audio_tracks++;
+ gxf->flags |= 0x04000000; /* audio is 16 bit pcm */
+ av_fifo_init(&sc->audio_buffer, 3*GXF_AUDIO_PACKET_SIZE);
+ } else if (sc->codec->codec_type == CODEC_TYPE_VIDEO) {
+ /* FIXME check from time_base ? */
+ if (sc->codec->height == 480 || sc->codec->height == 512) { /* NTSC or NTSC+VBI */
+ sc->frame_rate_index = 5;
+ sc->sample_rate = 60;
+ gxf->flags |= 0x00000080;
+ } else { /* assume PAL */
+ sc->frame_rate_index = 6;
+ sc->media_type++;
+ sc->sample_rate = 50;
+ gxf->flags |= 0x00000040;
+ }
+ gxf->sample_rate = sc->sample_rate;
+ av_set_pts_info(st, 64, 1, sc->sample_rate);
+ if (gxf_find_lines_index(sc) < 0)
+ sc->lines_index = -1;
+ sc->sample_size = st->codec->bit_rate;
+ sc->fields = 2; /* interlaced */
+ switch (sc->codec->codec_id) {
+ case CODEC_ID_MPEG2VIDEO:
+ sc->first_gop_closed = -1;
+ sc->track_type = 4;
+ gxf->mpeg_tracks++;
+ gxf->flags |= 0x00008000;
+ break;
+ case CODEC_ID_DVVIDEO:
+ if (sc->codec->pix_fmt == PIX_FMT_YUV422P) {
+ sc->media_type += 2;
+ sc->track_type = 6;
+ gxf->flags |= 0x00002000;
+ } else {
+ sc->track_type = 5;
+ gxf->flags |= 0x00001000;
+ }
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "video codec not supported\n");
+ return -1;
+ }
+ }
+ }
+ gxf_write_map_packet(pb, gxf);
+ //gxf_write_flt_packet(pb, gxf);
+ gxf_write_umf_packet(pb, gxf);
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int gxf_write_eos_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+
+ gxf_write_packet_header(pb, PKT_EOS);
+ return updatePacketSize(pb, pos);
+}
+
+static int gxf_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ GXFContext *gxf = s->priv_data;
+ offset_t end;
+ int i;
+
+ for (i = 0; i < s->nb_streams; ++i) {
+ if (s->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
+ av_fifo_free(&gxf->streams[i].audio_buffer);
+ }
+ if (s->streams[i]->codec->frame_number > gxf->nb_frames)
+ gxf->nb_frames = 2 * s->streams[i]->codec->frame_number;
+ }
+
+ gxf_write_eos_packet(pb, gxf);
+ end = url_ftell(pb);
+ url_fseek(pb, 0, SEEK_SET);
+ /* overwrite map and umf packets with new values */
+ gxf_write_map_packet(pb, gxf);
+ //gxf_write_flt_packet(pb, gxf);
+ gxf_write_umf_packet(pb, gxf);
+ url_fseek(pb, end, SEEK_SET);
+ return 0;
+}
+
+static int gxf_parse_mpeg_frame(GXFStreamContext *sc, const uint8_t *buf, int size)
+{
+ uint32_t c=-1;
+ int i;
+ for(i=0; i<size-4 && c!=0x100; i++){
+ c = (c<<8) + buf[i];
+ if(c == 0x1B8 && sc->first_gop_closed == -1) /* GOP start code */
+ sc->first_gop_closed= (buf[i+4]>>6)&1;
+ }
+ return (buf[i+1]>>3)&7;
+}
+
+static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size)
+{
+ GXFStreamContext *sc = &ctx->streams[pkt->stream_index];
+ int64_t dts = av_rescale(pkt->dts, ctx->sample_rate, sc->sample_rate);
+
+ put_byte(pb, sc->media_type);
+ put_byte(pb, sc->index);
+ put_be32(pb, dts);
+ if (sc->codec->codec_type == CODEC_TYPE_AUDIO) {
+ put_be16(pb, 0);
+ put_be16(pb, size / 2);
+ } else if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
+ int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
+ if (frame_type == FF_I_TYPE) {
+ put_byte(pb, 0x0d);
+ sc->iframes++;
+ } else if (frame_type == FF_B_TYPE) {
+ put_byte(pb, 0x0f);
+ sc->bframes++;
+ } else {
+ put_byte(pb, 0x0e);
+ sc->pframes++;
+ }
+ put_be24(pb, size);
+ } else if (sc->codec->codec_id == CODEC_ID_DVVIDEO) {
+ put_byte(pb, size / 4096);
+ put_be24(pb, 0);
+ } else
+ put_be32(pb, size);
+ put_be32(pb, dts);
+ put_byte(pb, 1); /* flags */
+ put_byte(pb, 0); /* reserved */
+ return 16;
+}
+
+static int gxf_write_media_packet(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt)
+{
+ GXFStreamContext *sc = &ctx->streams[pkt->stream_index];
+ offset_t pos = url_ftell(pb);
+ int padding = 0;
+
+ gxf_write_packet_header(pb, PKT_MEDIA);
+ if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO && pkt->size % 4) /* MPEG-2 frames must be padded */
+ padding = 4 - pkt->size % 4;
+ else if (sc->codec->codec_type == CODEC_TYPE_AUDIO)
+ padding = GXF_AUDIO_PACKET_SIZE - pkt->size;
+ gxf_write_media_preamble(pb, ctx, pkt, pkt->size + padding);
+ put_buffer(pb, pkt->data, pkt->size);
+ gxf_write_padding(pb, padding);
+ return updatePacketSize(pb, pos);
+}
+
+static int gxf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ GXFContext *gxf = s->priv_data;
+
+ gxf_write_media_packet(&s->pb, gxf, pkt);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int gxf_new_audio_packet(GXFContext *gxf, GXFStreamContext *sc, AVPacket *pkt, int flush)
+{
+ int size = flush ? av_fifo_size(&sc->audio_buffer) : GXF_AUDIO_PACKET_SIZE;
+
+ if (!size)
+ return 0;
+ av_new_packet(pkt, size);
+ av_fifo_read(&sc->audio_buffer, pkt->data, size);
+ pkt->stream_index = sc->index;
+ pkt->dts = sc->current_dts;
+ sc->current_dts += size / 2; /* we only support 16 bit pcm mono for now */
+ return size;
+}
+
+static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
+{
+ GXFContext *gxf = s->priv_data;
+ AVPacket new_pkt;
+ int i;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ GXFStreamContext *sc = &gxf->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if (pkt && pkt->stream_index == i) {
+ av_fifo_write(&sc->audio_buffer, pkt->data, pkt->size);
+ pkt = NULL;
+ }
+ if (flush || av_fifo_size(&sc->audio_buffer) >= GXF_AUDIO_PACKET_SIZE) {
+ if (!pkt && gxf_new_audio_packet(gxf, sc, &new_pkt, flush) > 0) {
+ pkt = &new_pkt;
+ break; /* add pkt right now into list */
+ }
+ }
+ } else if (pkt) {
+ /* adjust dts if negative */
+ if (pkt->dts < 0 && !sc->dts_delay) {
+ /* XXX: rescale if codec time base is different from stream time base */
+ sc->dts_delay = av_rescale_q(pkt->dts, st->codec->time_base, st->time_base);
+ pkt->dts = sc->dts_delay; /* set to 0 */
+ }
+ pkt->dts -= sc->dts_delay;
+ }
+ }
+ return av_interleave_packet_per_dts(s, out, pkt, flush);
+}
+
+AVOutputFormat gxf_muxer = {
+ "gxf",
+ "GXF format",
+ NULL,
+ "gxf",
+ sizeof(GXFContext),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_MPEG2VIDEO,
+ gxf_write_header,
+ gxf_write_packet,
+ gxf_write_trailer,
+ 0,
+ NULL,
+ gxf_interleave_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/http.c b/contrib/ffmpeg/libavformat/http.c
new file mode 100644
index 000000000..34dd5031a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/http.c
@@ -0,0 +1,289 @@
+/*
+ * HTTP protocol for ffmpeg client
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#include "base64.h"
+
+/* XXX: POST protocol is not completly implemented because ffmpeg use
+ only a subset of it */
+
+//#define DEBUG
+
+/* used for protocol handling */
+#define BUFFER_SIZE 1024
+#define URL_SIZE 4096
+
+typedef struct {
+ URLContext *hd;
+ unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end;
+ int line_count;
+ int http_code;
+ char location[URL_SIZE];
+} HTTPContext;
+
+static int http_connect(URLContext *h, const char *path, const char *hoststr,
+ const char *auth);
+static int http_write(URLContext *h, uint8_t *buf, int size);
+
+
+/* return non zero if error */
+static int http_open(URLContext *h, const char *uri, int flags)
+{
+ const char *path, *proxy_path;
+ char hostname[1024], hoststr[1024];
+ char auth[1024];
+ char path1[1024];
+ char buf[1024];
+ int port, use_proxy, err;
+ HTTPContext *s;
+ URLContext *hd = NULL;
+
+ h->is_streamed = 1;
+
+ s = av_malloc(sizeof(HTTPContext));
+ if (!s) {
+ return -ENOMEM;
+ }
+ h->priv_data = s;
+
+ proxy_path = getenv("http_proxy");
+ use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
+ strstart(proxy_path, "http://", NULL);
+
+ /* fill the dest addr */
+ redo:
+ /* needed in any case to build the host string */
+ url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
+ path1, sizeof(path1), uri);
+ if (port > 0) {
+ snprintf(hoststr, sizeof(hoststr), "%s:%d", hostname, port);
+ } else {
+ pstrcpy(hoststr, sizeof(hoststr), hostname);
+ }
+
+ if (use_proxy) {
+ url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
+ NULL, 0, proxy_path);
+ path = uri;
+ } else {
+ if (path1[0] == '\0')
+ path = "/";
+ else
+ path = path1;
+ }
+ if (port < 0)
+ port = 80;
+
+ snprintf(buf, sizeof(buf), "tcp://%s:%d", hostname, port);
+ err = url_open(&hd, buf, URL_RDWR);
+ if (err < 0)
+ goto fail;
+
+ s->hd = hd;
+ if (http_connect(h, path, hoststr, auth) < 0)
+ goto fail;
+ if (s->http_code == 303 && s->location[0] != '\0') {
+ /* url moved, get next */
+ uri = s->location;
+ url_close(hd);
+ goto redo;
+ }
+ return 0;
+ fail:
+ if (hd)
+ url_close(hd);
+ av_free(s);
+ return AVERROR_IO;
+}
+
+static int http_getc(HTTPContext *s)
+{
+ int len;
+ if (s->buf_ptr >= s->buf_end) {
+ len = url_read(s->hd, s->buffer, BUFFER_SIZE);
+ if (len < 0) {
+ return AVERROR_IO;
+ } else if (len == 0) {
+ return -1;
+ } else {
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer + len;
+ }
+ }
+ return *s->buf_ptr++;
+}
+
+static int process_line(HTTPContext *s, char *line, int line_count)
+{
+ char *tag, *p;
+
+ /* end of header */
+ if (line[0] == '\0')
+ return 0;
+
+ p = line;
+ if (line_count == 0) {
+ while (!isspace(*p) && *p != '\0')
+ p++;
+ while (isspace(*p))
+ p++;
+ s->http_code = strtol(p, NULL, 10);
+#ifdef DEBUG
+ printf("http_code=%d\n", s->http_code);
+#endif
+ } else {
+ while (*p != '\0' && *p != ':')
+ p++;
+ if (*p != ':')
+ return 1;
+
+ *p = '\0';
+ tag = line;
+ p++;
+ while (isspace(*p))
+ p++;
+ if (!strcmp(tag, "Location")) {
+ strcpy(s->location, p);
+ }
+ }
+ return 1;
+}
+
+static int http_connect(URLContext *h, const char *path, const char *hoststr,
+ const char *auth)
+{
+ HTTPContext *s = h->priv_data;
+ int post, err, ch;
+ char line[1024], *q;
+ char *auth_b64;
+
+
+ /* send http header */
+ post = h->flags & URL_WRONLY;
+
+ auth_b64 = av_base64_encode((uint8_t *)auth, strlen(auth));
+ snprintf(s->buffer, sizeof(s->buffer),
+ "%s %s HTTP/1.0\r\n"
+ "User-Agent: %s\r\n"
+ "Accept: */*\r\n"
+ "Host: %s\r\n"
+ "Authorization: Basic %s\r\n"
+ "\r\n",
+ post ? "POST" : "GET",
+ path,
+ LIBAVFORMAT_IDENT,
+ hoststr,
+ auth_b64);
+
+ av_freep(&auth_b64);
+ if (http_write(h, s->buffer, strlen(s->buffer)) < 0)
+ return AVERROR_IO;
+
+ /* init input buffer */
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer;
+ s->line_count = 0;
+ s->location[0] = '\0';
+ if (post) {
+ sleep(1);
+ return 0;
+ }
+
+ /* wait for header */
+ q = line;
+ for(;;) {
+ ch = http_getc(s);
+ if (ch < 0)
+ return AVERROR_IO;
+ if (ch == '\n') {
+ /* process line */
+ if (q > line && q[-1] == '\r')
+ q--;
+ *q = '\0';
+#ifdef DEBUG
+ printf("header='%s'\n", line);
+#endif
+ err = process_line(s, line, s->line_count);
+ if (err < 0)
+ return err;
+ if (err == 0)
+ return 0;
+ s->line_count++;
+ q = line;
+ } else {
+ if ((q - line) < sizeof(line) - 1)
+ *q++ = ch;
+ }
+ }
+}
+
+
+static int http_read(URLContext *h, uint8_t *buf, int size)
+{
+ HTTPContext *s = h->priv_data;
+ int len;
+
+ /* read bytes from input buffer first */
+ len = s->buf_end - s->buf_ptr;
+ if (len > 0) {
+ if (len > size)
+ len = size;
+ memcpy(buf, s->buf_ptr, len);
+ s->buf_ptr += len;
+ } else {
+ len = url_read(s->hd, buf, size);
+ }
+ return len;
+}
+
+/* used only when posting data */
+static int http_write(URLContext *h, uint8_t *buf, int size)
+{
+ HTTPContext *s = h->priv_data;
+ return url_write(s->hd, buf, size);
+}
+
+static int http_close(URLContext *h)
+{
+ HTTPContext *s = h->priv_data;
+ url_close(s->hd);
+ av_free(s);
+ return 0;
+}
+
+URLProtocol http_protocol = {
+ "http",
+ http_open,
+ http_read,
+ http_write,
+ NULL, /* seek */
+ http_close,
+};
diff --git a/contrib/ffmpeg/libavformat/idcin.c b/contrib/ffmpeg/libavformat/idcin.c
new file mode 100644
index 000000000..48d1e250d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/idcin.c
@@ -0,0 +1,301 @@
+/*
+ * Id Quake II CIN File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file idcin.c
+ * Id Quake II CIN file demuxer by Mike Melanson (melanson@pcisys.net)
+ * For more information about the Id CIN format, visit:
+ * http://www.csse.monash.edu.au/~timf/
+ *
+ * CIN is a somewhat quirky and ill-defined format. Here are some notes
+ * for anyone trying to understand the technical details of this format:
+ *
+ * The format has no definite file signature. This is problematic for a
+ * general-purpose media player that wants to automatically detect file
+ * types. However, a CIN file does start with 5 32-bit numbers that
+ * specify audio and video parameters. This demuxer gets around the lack
+ * of file signature by performing sanity checks on those parameters.
+ * Probabalistically, this is a reasonable solution since the number of
+ * valid combinations of the 5 parameters is a very small subset of the
+ * total 160-bit number space.
+ *
+ * Refer to the function idcin_probe() for the precise A/V parameters
+ * that this demuxer allows.
+ *
+ * Next, each audio and video frame has a duration of 1/14 sec. If the
+ * audio sample rate is a multiple of the common frequency 22050 Hz it will
+ * divide evenly by 14. However, if the sample rate is 11025 Hz:
+ * 11025 (samples/sec) / 14 (frames/sec) = 787.5 (samples/frame)
+ * The way the CIN stores audio in this case is by storing 787 sample
+ * frames in the first audio frame and 788 sample frames in the second
+ * audio frame. Therefore, the total number of bytes in an audio frame
+ * is given as:
+ * audio frame #0: 787 * (bytes/sample) * (# channels) bytes in frame
+ * audio frame #1: 788 * (bytes/sample) * (# channels) bytes in frame
+ * audio frame #2: 787 * (bytes/sample) * (# channels) bytes in frame
+ * audio frame #3: 788 * (bytes/sample) * (# channels) bytes in frame
+ *
+ * Finally, not all Id CIN creation tools agree on the resolution of the
+ * color palette, apparently. Some creation tools specify red, green, and
+ * blue palette components in terms of 6-bit VGA color DAC values which
+ * range from 0..63. Other tools specify the RGB components as full 8-bit
+ * values that range from 0..255. Since there are no markers in the file to
+ * differentiate between the two variants, this demuxer uses the following
+ * heuristic:
+ * - load the 768 palette bytes from disk
+ * - assume that they will need to be shifted left by 2 bits to
+ * transform them from 6-bit values to 8-bit values
+ * - scan through all 768 palette bytes
+ * - if any bytes exceed 63, do not shift the bytes at all before
+ * transmitting them to the video decoder
+ */
+
+#include "avformat.h"
+
+#define HUFFMAN_TABLE_SIZE (64 * 1024)
+#define FRAME_PTS_INC (90000 / 14)
+
+typedef struct IdcinDemuxContext {
+ int video_stream_index;
+ int audio_stream_index;
+ int audio_chunk_size1;
+ int audio_chunk_size2;
+
+ /* demux state variables */
+ int current_audio_chunk;
+ int next_chunk_is_video;
+ int audio_present;
+
+ int64_t pts;
+
+ AVPaletteControl palctrl;
+} IdcinDemuxContext;
+
+static int idcin_probe(AVProbeData *p)
+{
+ unsigned int number;
+
+ /*
+ * This is what you could call a "probabilistic" file check: Id CIN
+ * files don't have a definite file signature. In lieu of such a marker,
+ * perform sanity checks on the 5 32-bit header fields:
+ * width, height: greater than 0, less than or equal to 1024
+ * audio sample rate: greater than or equal to 8000, less than or
+ * equal to 48000, or 0 for no audio
+ * audio sample width (bytes/sample): 0 for no audio, or 1 or 2
+ * audio channels: 0 for no audio, or 1 or 2
+ */
+
+ /* cannot proceed without 20 bytes */
+ if (p->buf_size < 20)
+ return 0;
+
+ /* check the video width */
+ number = LE_32(&p->buf[0]);
+ if ((number == 0) || (number > 1024))
+ return 0;
+
+ /* check the video height */
+ number = LE_32(&p->buf[4]);
+ if ((number == 0) || (number > 1024))
+ return 0;
+
+ /* check the audio sample rate */
+ number = LE_32(&p->buf[8]);
+ if ((number != 0) && ((number < 8000) | (number > 48000)))
+ return 0;
+
+ /* check the audio bytes/sample */
+ number = LE_32(&p->buf[12]);
+ if (number > 2)
+ return 0;
+
+ /* check the audio channels */
+ number = LE_32(&p->buf[16]);
+ if (number > 2)
+ return 0;
+
+ /* return half certainly since this check is a bit sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int idcin_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ IdcinDemuxContext *idcin = (IdcinDemuxContext *)s->priv_data;
+ AVStream *st;
+ unsigned int width, height;
+ unsigned int sample_rate, bytes_per_sample, channels;
+
+ /* get the 5 header parameters */
+ width = get_le32(pb);
+ height = get_le32(pb);
+ sample_rate = get_le32(pb);
+ bytes_per_sample = get_le32(pb);
+ channels = get_le32(pb);
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ idcin->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_IDCIN;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = width;
+ st->codec->height = height;
+
+ /* load up the Huffman tables into extradata */
+ st->codec->extradata_size = HUFFMAN_TABLE_SIZE;
+ st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE);
+ if (get_buffer(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
+ HUFFMAN_TABLE_SIZE)
+ return AVERROR_IO;
+ /* save a reference in order to transport the palette */
+ st->codec->palctrl = &idcin->palctrl;
+
+ /* if sample rate is 0, assume no audio */
+ if (sample_rate) {
+ idcin->audio_present = 1;
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ idcin->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = 1;
+ st->codec->channels = channels;
+ st->codec->sample_rate = sample_rate;
+ st->codec->bits_per_sample = bytes_per_sample * 8;
+ st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels;
+ st->codec->block_align = bytes_per_sample * channels;
+ if (bytes_per_sample == 1)
+ st->codec->codec_id = CODEC_ID_PCM_U8;
+ else
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+
+ if (sample_rate % 14 != 0) {
+ idcin->audio_chunk_size1 = (sample_rate / 14) *
+ bytes_per_sample * channels;
+ idcin->audio_chunk_size2 = (sample_rate / 14 + 1) *
+ bytes_per_sample * channels;
+ } else {
+ idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =
+ (sample_rate / 14) * bytes_per_sample * channels;
+ }
+ idcin->current_audio_chunk = 0;
+ } else
+ idcin->audio_present = 1;
+
+ idcin->next_chunk_is_video = 1;
+ idcin->pts = 0;
+
+ return 0;
+}
+
+static int idcin_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret;
+ unsigned int command;
+ unsigned int chunk_size;
+ IdcinDemuxContext *idcin = (IdcinDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int i;
+ int palette_scale;
+ unsigned char r, g, b;
+ unsigned char palette_buffer[768];
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+
+ if (idcin->next_chunk_is_video) {
+ command = get_le32(pb);
+ if (command == 2) {
+ return AVERROR_IO;
+ } else if (command == 1) {
+ /* trigger a palette change */
+ idcin->palctrl.palette_changed = 1;
+ if (get_buffer(pb, palette_buffer, 768) != 768)
+ return AVERROR_IO;
+ /* scale the palette as necessary */
+ palette_scale = 2;
+ for (i = 0; i < 768; i++)
+ if (palette_buffer[i] > 63) {
+ palette_scale = 0;
+ break;
+ }
+
+ for (i = 0; i < 256; i++) {
+ r = palette_buffer[i * 3 ] << palette_scale;
+ g = palette_buffer[i * 3 + 1] << palette_scale;
+ b = palette_buffer[i * 3 + 2] << palette_scale;
+ idcin->palctrl.palette[i] = (r << 16) | (g << 8) | (b);
+ }
+ }
+
+ chunk_size = get_le32(pb);
+ /* skip the number of decoded bytes (always equal to width * height) */
+ url_fseek(pb, 4, SEEK_CUR);
+ chunk_size -= 4;
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = idcin->video_stream_index;
+ pkt->pts = idcin->pts;
+ } else {
+ /* send out the audio chunk */
+ if (idcin->current_audio_chunk)
+ chunk_size = idcin->audio_chunk_size2;
+ else
+ chunk_size = idcin->audio_chunk_size1;
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = idcin->audio_stream_index;
+ pkt->pts = idcin->pts;
+
+ idcin->current_audio_chunk ^= 1;
+ idcin->pts += FRAME_PTS_INC;
+ }
+
+ if (idcin->audio_present)
+ idcin->next_chunk_is_video ^= 1;
+
+ return ret;
+}
+
+static int idcin_read_close(AVFormatContext *s)
+{
+
+ return 0;
+}
+
+AVInputFormat idcin_demuxer = {
+ "idcin",
+ "Id CIN format",
+ sizeof(IdcinDemuxContext),
+ idcin_probe,
+ idcin_read_header,
+ idcin_read_packet,
+ idcin_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/idroq.c b/contrib/ffmpeg/libavformat/idroq.c
new file mode 100644
index 000000000..419696c9a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/idroq.c
@@ -0,0 +1,291 @@
+/*
+ * Id RoQ (.roq) File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file idroq.c
+ * Id RoQ format file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the .roq file format, visit:
+ * http://www.csse.monash.edu.au/~timf/
+ */
+
+#include "avformat.h"
+
+#define RoQ_MAGIC_NUMBER 0x1084
+#define RoQ_CHUNK_PREAMBLE_SIZE 8
+#define RoQ_AUDIO_SAMPLE_RATE 22050
+#define RoQ_CHUNKS_TO_SCAN 30
+
+#define RoQ_INFO 0x1001
+#define RoQ_QUAD_CODEBOOK 0x1002
+#define RoQ_QUAD_VQ 0x1011
+#define RoQ_SOUND_MONO 0x1020
+#define RoQ_SOUND_STEREO 0x1021
+
+typedef struct RoqDemuxContext {
+
+ int width;
+ int height;
+ int audio_channels;
+ int framerate;
+ int frame_pts_inc;
+
+ int video_stream_index;
+ int audio_stream_index;
+
+ int64_t video_pts;
+ unsigned int audio_frame_count;
+
+} RoqDemuxContext;
+
+static int roq_probe(AVProbeData *p)
+{
+ if (p->buf_size < 6)
+ return 0;
+
+ if ((LE_16(&p->buf[0]) != RoQ_MAGIC_NUMBER) ||
+ (LE_32(&p->buf[2]) != 0xFFFFFFFF))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int roq_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RoqDemuxContext *roq = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
+ int i;
+ unsigned int chunk_size;
+ unsigned int chunk_type;
+
+ /* get the main header */
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ roq->framerate = LE_16(&preamble[6]);
+ roq->frame_pts_inc = 90000 / roq->framerate;
+
+ /* init private context parameters */
+ roq->width = roq->height = roq->audio_channels = roq->video_pts =
+ roq->audio_frame_count = 0;
+
+ /* scan the first n chunks searching for A/V parameters */
+ for (i = 0; i < RoQ_CHUNKS_TO_SCAN; i++) {
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+
+ chunk_type = LE_16(&preamble[0]);
+ chunk_size = LE_32(&preamble[2]);
+
+ switch (chunk_type) {
+
+ case RoQ_INFO:
+ /* fetch the width and height; reuse the preamble bytes */
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ roq->width = LE_16(&preamble[0]);
+ roq->height = LE_16(&preamble[2]);
+ break;
+
+ case RoQ_QUAD_CODEBOOK:
+ case RoQ_QUAD_VQ:
+ /* ignore during this scan */
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+
+ case RoQ_SOUND_MONO:
+ roq->audio_channels = 1;
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+
+ case RoQ_SOUND_STEREO:
+ roq->audio_channels = 2;
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+
+ default:
+ av_log(s, AV_LOG_ERROR, " unknown RoQ chunk type (%04X)\n", LE_16(&preamble[0]));
+ return AVERROR_INVALIDDATA;
+ break;
+ }
+
+ /* if all necessary parameters have been gathered, exit early */
+ if ((roq->width && roq->height) && roq->audio_channels)
+ break;
+ }
+
+ /* seek back to the first chunk */
+ url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_SET);
+
+ /* initialize the decoders */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ /* set the pts reference (1 pts = 1/90000) */
+ av_set_pts_info(st, 33, 1, 90000);
+ roq->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_ROQ;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = roq->width;
+ st->codec->height = roq->height;
+
+ if (roq->audio_channels) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ roq->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ROQ_DPCM;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = roq->audio_channels;
+ st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+ }
+
+ return 0;
+}
+
+static int roq_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ RoqDemuxContext *roq = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = 0;
+ unsigned int chunk_size;
+ unsigned int chunk_type;
+ unsigned int codebook_size;
+ unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
+ int packet_read = 0;
+ offset_t codebook_offset;
+
+ while (!packet_read) {
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+
+ /* get the next chunk preamble */
+ if ((ret = get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+
+ chunk_type = LE_16(&preamble[0]);
+ chunk_size = LE_32(&preamble[2]);
+ if(chunk_size > INT_MAX)
+ return AVERROR_INVALIDDATA;
+
+ switch (chunk_type) {
+
+ case RoQ_INFO:
+ /* don't care about this chunk anymore */
+ url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_CUR);
+ break;
+
+ case RoQ_QUAD_CODEBOOK:
+ /* packet needs to contain both this codebook and next VQ chunk */
+ codebook_offset = url_ftell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
+ codebook_size = chunk_size;
+ url_fseek(pb, codebook_size, SEEK_CUR);
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ chunk_size = LE_32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
+ codebook_size;
+
+ /* rewind */
+ url_fseek(pb, codebook_offset, SEEK_SET);
+
+ /* load up the packet */
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = roq->video_stream_index;
+ pkt->pts = roq->video_pts;
+
+ roq->video_pts += roq->frame_pts_inc;
+ packet_read = 1;
+ break;
+
+ case RoQ_SOUND_MONO:
+ case RoQ_SOUND_STEREO:
+ case RoQ_QUAD_VQ:
+ /* load up the packet */
+ if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
+ return AVERROR_IO;
+ /* copy over preamble */
+ memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);
+
+ if (chunk_type == RoQ_QUAD_VQ) {
+ pkt->stream_index = roq->video_stream_index;
+ pkt->pts = roq->video_pts;
+ roq->video_pts += roq->frame_pts_inc;
+ } else {
+ pkt->stream_index = roq->audio_stream_index;
+ pkt->pts = roq->audio_frame_count;
+ pkt->pts *= 90000;
+ pkt->pts /= RoQ_AUDIO_SAMPLE_RATE;
+ roq->audio_frame_count += (chunk_size / roq->audio_channels);
+ }
+
+ pkt->pos= url_ftell(pb);
+ ret = get_buffer(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
+ chunk_size);
+ if (ret != chunk_size)
+ ret = AVERROR_IO;
+
+ packet_read = 1;
+ break;
+
+ default:
+ av_log(s, AV_LOG_ERROR, " unknown RoQ chunk (%04X)\n", chunk_type);
+ return AVERROR_INVALIDDATA;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int roq_read_close(AVFormatContext *s)
+{
+// RoqDemuxContext *roq = (RoqDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat roq_demuxer = {
+ "RoQ",
+ "Id RoQ format",
+ sizeof(RoqDemuxContext),
+ roq_probe,
+ roq_read_header,
+ roq_read_packet,
+ roq_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/img.c b/contrib/ffmpeg/libavformat/img.c
new file mode 100644
index 000000000..5223c691e
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/img.c
@@ -0,0 +1,400 @@
+/*
+ * Image format
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+typedef struct {
+ int width;
+ int height;
+ int img_first;
+ int img_last;
+ int img_number;
+ int img_count;
+ int img_size;
+ AVImageFormat *img_fmt;
+ int pix_fmt;
+ int is_pipe;
+ char path[1024];
+ /* temporary usage */
+ void *ptr;
+} VideoData;
+
+
+/* return -1 if no image found */
+static int find_image_range(int *pfirst_index, int *plast_index,
+ const char *path)
+{
+ char buf[1024];
+ int range, last_index, range1, first_index;
+
+ /* find the first image */
+ for(first_index = 0; first_index < 5; first_index++) {
+ if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0)
+ goto fail;
+ if (url_exist(buf))
+ break;
+ }
+ if (first_index == 5)
+ goto fail;
+
+ /* find the last image */
+ last_index = first_index;
+ for(;;) {
+ range = 0;
+ for(;;) {
+ if (!range)
+ range1 = 1;
+ else
+ range1 = 2 * range;
+ if (av_get_frame_filename(buf, sizeof(buf), path,
+ last_index + range1) < 0)
+ goto fail;
+ if (!url_exist(buf))
+ break;
+ range = range1;
+ /* just in case... */
+ if (range >= (1 << 30))
+ goto fail;
+ }
+ /* we are sure than image last_index + range exists */
+ if (!range)
+ break;
+ last_index += range;
+ }
+ *pfirst_index = first_index;
+ *plast_index = last_index;
+ return 0;
+ fail:
+ return -1;
+}
+
+
+static int image_probe(AVProbeData *p)
+{
+ if (av_filename_number_test(p->filename) && guess_image_format(p->filename))
+ return AVPROBE_SCORE_MAX-1;
+ else
+ return 0;
+}
+
+static int read_header_alloc_cb(void *opaque, AVImageInfo *info)
+{
+ VideoData *s = opaque;
+
+ s->width = info->width;
+ s->height = info->height;
+ s->pix_fmt = info->pix_fmt;
+ /* stop image reading but no error */
+ return 1;
+}
+
+static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ int ret, first_index, last_index;
+ char buf[1024];
+ ByteIOContext pb1, *f = &pb1;
+ AVStream *st;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+
+ if (ap->image_format)
+ s->img_fmt = ap->image_format;
+
+ pstrcpy(s->path, sizeof(s->path), s1->filename);
+ s->img_number = 0;
+ s->img_count = 0;
+
+ /* find format */
+ if (s1->iformat->flags & AVFMT_NOFILE)
+ s->is_pipe = 0;
+ else
+ s->is_pipe = 1;
+
+ if (!ap->time_base.num) {
+ st->codec->time_base= (AVRational){1,25};
+ } else {
+ st->codec->time_base= ap->time_base;
+ }
+
+ if (!s->is_pipe) {
+ if (find_image_range(&first_index, &last_index, s->path) < 0)
+ goto fail;
+ s->img_first = first_index;
+ s->img_last = last_index;
+ s->img_number = first_index;
+ /* compute duration */
+ st->start_time = 0;
+ st->duration = last_index - first_index + 1;
+ if (av_get_frame_filename(buf, sizeof(buf), s->path, s->img_number) < 0)
+ goto fail;
+ if (url_fopen(f, buf, URL_RDONLY) < 0)
+ goto fail;
+ } else {
+ f = &s1->pb;
+ }
+
+ ret = av_read_image(f, s1->filename, s->img_fmt, read_header_alloc_cb, s);
+ if (ret < 0)
+ goto fail1;
+
+ if (!s->is_pipe) {
+ url_fclose(f);
+ } else {
+ url_fseek(f, 0, SEEK_SET);
+ }
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = s->width;
+ st->codec->height = s->height;
+ st->codec->pix_fmt = s->pix_fmt;
+ s->img_size = avpicture_get_size(s->pix_fmt, (s->width+15)&(~15), (s->height+15)&(~15));
+
+ return 0;
+ fail1:
+ if (!s->is_pipe)
+ url_fclose(f);
+ fail:
+ return AVERROR_IO;
+}
+
+static int read_packet_alloc_cb(void *opaque, AVImageInfo *info)
+{
+ VideoData *s = opaque;
+
+ if (info->width != s->width ||
+ info->height != s->height)
+ return -1;
+ avpicture_fill(&info->pict, s->ptr, info->pix_fmt, (info->width+15)&(~15), (info->height+15)&(~15));
+ return 0;
+}
+
+static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ char filename[1024];
+ int ret;
+ ByteIOContext f1, *f;
+
+ if (!s->is_pipe) {
+ /* loop over input */
+ if (s1->loop_input && s->img_number > s->img_last) {
+ s->img_number = s->img_first;
+ }
+ if (av_get_frame_filename(filename, sizeof(filename),
+ s->path, s->img_number) < 0)
+ return AVERROR_IO;
+ f = &f1;
+ if (url_fopen(f, filename, URL_RDONLY) < 0)
+ return AVERROR_IO;
+ } else {
+ f = &s1->pb;
+ if (url_feof(f))
+ return AVERROR_IO;
+ }
+
+ av_new_packet(pkt, s->img_size);
+ pkt->stream_index = 0;
+
+ s->ptr = pkt->data;
+ ret = av_read_image(f, filename, s->img_fmt, read_packet_alloc_cb, s);
+ if (!s->is_pipe) {
+ url_fclose(f);
+ }
+
+ if (ret < 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO; /* signal EOF */
+ } else {
+ /* XXX: computing this pts is not necessary as it is done in
+ the generic code too */
+ pkt->pts = av_rescale((int64_t)s->img_count * s1->streams[0]->codec->time_base.num, s1->streams[0]->time_base.den, s1->streams[0]->codec->time_base.den) / s1->streams[0]->time_base.num;
+ s->img_count++;
+ s->img_number++;
+ return 0;
+ }
+}
+
+static int img_read_close(AVFormatContext *s1)
+{
+ return 0;
+}
+
+/******************************************************/
+/* image output */
+
+static int img_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
+{
+ VideoData *img = s->priv_data;
+ AVStream *st;
+ AVImageFormat *img_fmt;
+ int i;
+
+ /* find output image format */
+ if (ap->image_format) {
+ img_fmt = ap->image_format;
+ } else {
+ img_fmt = guess_image_format(s->filename);
+ }
+ if (!img_fmt)
+ return -1;
+
+ if (s->nb_streams != 1)
+ return -1;
+
+ st = s->streams[0];
+ /* we select the first matching format */
+ for(i=0;i<PIX_FMT_NB;i++) {
+ if (img_fmt->supported_pixel_formats & (1 << i))
+ break;
+ }
+ if (i >= PIX_FMT_NB)
+ return -1;
+ img->img_fmt = img_fmt;
+ img->pix_fmt = i;
+ st->codec->pix_fmt = img->pix_fmt;
+ return 0;
+}
+
+static int img_write_header(AVFormatContext *s)
+{
+ VideoData *img = s->priv_data;
+
+ img->img_number = 1;
+ pstrcpy(img->path, sizeof(img->path), s->filename);
+
+ /* find format */
+ if (s->oformat->flags & AVFMT_NOFILE)
+ img->is_pipe = 0;
+ else
+ img->is_pipe = 1;
+
+ return 0;
+}
+
+static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ VideoData *img = s->priv_data;
+ AVStream *st = s->streams[pkt->stream_index];
+ ByteIOContext pb1, *pb;
+ AVPicture *picture;
+ int width, height, ret;
+ char filename[1024];
+ AVImageInfo info;
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ picture = (AVPicture *)pkt->data;
+
+ if (!img->is_pipe) {
+ if (av_get_frame_filename(filename, sizeof(filename),
+ img->path, img->img_number) < 0)
+ return AVERROR_IO;
+ pb = &pb1;
+ if (url_fopen(pb, filename, URL_WRONLY) < 0)
+ return AVERROR_IO;
+ } else {
+ pb = &s->pb;
+ }
+ info.width = width;
+ info.height = height;
+ info.pix_fmt = st->codec->pix_fmt;
+ info.interleaved = 0; /* FIXME: there should be a way to set it right */
+ info.pict = *picture;
+ ret = av_write_image(pb, img->img_fmt, &info);
+ if (!img->is_pipe) {
+ url_fclose(pb);
+ }
+
+ img->img_number++;
+ return 0;
+}
+
+static int img_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+/* input */
+#ifdef CONFIG_IMAGE_DEMUXER
+AVInputFormat image_demuxer = {
+ "image",
+ "image sequence",
+ sizeof(VideoData),
+ image_probe,
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+ NULL,
+ AVFMT_NOFILE | AVFMT_NEEDNUMBER,
+};
+#endif
+#ifdef CONFIG_IMAGEPIPE_DEMUXER
+AVInputFormat imagepipe_demuxer = {
+ "imagepipe",
+ "piped image sequence",
+ sizeof(VideoData),
+ NULL, /* no probe */
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+};
+#endif
+
+/* output */
+#ifdef CONFIG_IMAGE_MUXER
+AVOutputFormat image_muxer = {
+ "image",
+ "image sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+ AVFMT_NOFILE | AVFMT_NEEDNUMBER | AVFMT_RAWPICTURE,
+ img_set_parameters,
+};
+#endif
+#ifdef CONFIG_IMAGEPIPE_MUXER
+AVOutputFormat imagepipe_muxer = {
+ "imagepipe",
+ "piped image sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+ AVFMT_RAWPICTURE,
+ img_set_parameters,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/img2.c b/contrib/ffmpeg/libavformat/img2.c
new file mode 100644
index 000000000..303190ad2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/img2.c
@@ -0,0 +1,425 @@
+/*
+ * Image format
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ * Copyright (c) 2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+typedef struct {
+ int img_first;
+ int img_last;
+ int img_number;
+ int img_count;
+ int is_pipe;
+ char path[1024];
+} VideoData;
+
+typedef struct {
+ enum CodecID id;
+ const char *str;
+} IdStrMap;
+
+static const IdStrMap img_tags[] = {
+ { CODEC_ID_MJPEG , "jpeg"},
+ { CODEC_ID_MJPEG , "jpg"},
+ { CODEC_ID_LJPEG , "ljpg"},
+ { CODEC_ID_PNG , "png"},
+ { CODEC_ID_PPM , "ppm"},
+ { CODEC_ID_PGM , "pgm"},
+ { CODEC_ID_PGMYUV , "pgmyuv"},
+ { CODEC_ID_PBM , "pbm"},
+ { CODEC_ID_PAM , "pam"},
+ { CODEC_ID_MPEG1VIDEO, "mpg1-img"},
+ { CODEC_ID_MPEG2VIDEO, "mpg2-img"},
+ { CODEC_ID_MPEG4 , "mpg4-img"},
+ { CODEC_ID_FFV1 , "ffv1-img"},
+ { CODEC_ID_RAWVIDEO , "y"},
+ { CODEC_ID_BMP , "bmp"},
+ { CODEC_ID_GIF , "gif"},
+ { CODEC_ID_TARGA , "tga"},
+ { CODEC_ID_TIFF , "tiff"},
+ {0, NULL}
+};
+
+static int sizes[][2] = {
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 352, 288 },
+ { 352, 240 },
+ { 160, 128 },
+ { 512, 384 },
+ { 640, 352 },
+ { 640, 240 },
+};
+
+static int infer_size(int *width_ptr, int *height_ptr, int size)
+{
+ int i;
+
+ for(i=0;i<sizeof(sizes)/sizeof(sizes[0]);i++) {
+ if ((sizes[i][0] * sizes[i][1]) == size) {
+ *width_ptr = sizes[i][0];
+ *height_ptr = sizes[i][1];
+ return 0;
+ }
+ }
+ return -1;
+}
+static enum CodecID av_str2id(const IdStrMap *tags, const char *str)
+{
+ str= strrchr(str, '.');
+ if(!str) return CODEC_ID_NONE;
+ str++;
+
+ while (tags->id) {
+ int i;
+ for(i=0; toupper(tags->str[i]) == toupper(str[i]); i++){
+ if(tags->str[i]==0 && str[i]==0)
+ return tags->id;
+ }
+
+ tags++;
+ }
+ return CODEC_ID_NONE;
+}
+
+/* return -1 if no image found */
+static int find_image_range(int *pfirst_index, int *plast_index,
+ const char *path)
+{
+ char buf[1024];
+ int range, last_index, range1, first_index;
+
+ /* find the first image */
+ for(first_index = 0; first_index < 5; first_index++) {
+ if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0){
+ *pfirst_index =
+ *plast_index = 1;
+ return 0;
+ }
+ if (url_exist(buf))
+ break;
+ }
+ if (first_index == 5)
+ goto fail;
+
+ /* find the last image */
+ last_index = first_index;
+ for(;;) {
+ range = 0;
+ for(;;) {
+ if (!range)
+ range1 = 1;
+ else
+ range1 = 2 * range;
+ if (av_get_frame_filename(buf, sizeof(buf), path,
+ last_index + range1) < 0)
+ goto fail;
+ if (!url_exist(buf))
+ break;
+ range = range1;
+ /* just in case... */
+ if (range >= (1 << 30))
+ goto fail;
+ }
+ /* we are sure than image last_index + range exists */
+ if (!range)
+ break;
+ last_index += range;
+ }
+ *pfirst_index = first_index;
+ *plast_index = last_index;
+ return 0;
+ fail:
+ return -1;
+}
+
+
+static int image_probe(AVProbeData *p)
+{
+ if (av_filename_number_test(p->filename) && av_str2id(img_tags, p->filename))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+enum CodecID av_guess_image2_codec(const char *filename){
+ return av_str2id(img_tags, filename);
+}
+
+static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ int first_index, last_index;
+ AVStream *st;
+
+ s1->ctx_flags |= AVFMTCTX_NOHEADER;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+
+ pstrcpy(s->path, sizeof(s->path), s1->filename);
+ s->img_number = 0;
+ s->img_count = 0;
+
+ /* find format */
+ if (s1->iformat->flags & AVFMT_NOFILE)
+ s->is_pipe = 0;
+ else{
+ s->is_pipe = 1;
+ st->need_parsing= 1;
+ }
+
+ if (!ap->time_base.num) {
+ av_set_pts_info(st, 60, 1, 25);
+ } else {
+ av_set_pts_info(st, 60, ap->time_base.num, ap->time_base.den);
+ }
+
+ if(ap->width && ap->height){
+ st->codec->width = ap->width;
+ st->codec->height= ap->height;
+ }
+
+ if (!s->is_pipe) {
+ if (find_image_range(&first_index, &last_index, s->path) < 0)
+ return AVERROR_IO;
+ s->img_first = first_index;
+ s->img_last = last_index;
+ s->img_number = first_index;
+ /* compute duration */
+ st->start_time = 0;
+ st->duration = last_index - first_index + 1;
+ }
+
+ if(ap->video_codec_id){
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = ap->video_codec_id;
+ }else if(ap->audio_codec_id){
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = ap->audio_codec_id;
+ }else{
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = av_str2id(img_tags, s->path);
+ }
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO && ap->pix_fmt != PIX_FMT_NONE)
+ st->codec->pix_fmt = ap->pix_fmt;
+
+ return 0;
+}
+
+static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ char filename[1024];
+ int i;
+ int size[3]={0}, ret[3]={0};
+ ByteIOContext f1[3], *f[3]= {&f1[0], &f1[1], &f1[2]};
+ AVCodecContext *codec= s1->streams[0]->codec;
+
+ if (!s->is_pipe) {
+ /* loop over input */
+ if (s1->loop_input && s->img_number > s->img_last) {
+ s->img_number = s->img_first;
+ }
+ if (av_get_frame_filename(filename, sizeof(filename),
+ s->path, s->img_number)<0 && s->img_number > 1)
+ return AVERROR_IO;
+ for(i=0; i<3; i++){
+ if (url_fopen(f[i], filename, URL_RDONLY) < 0)
+ return AVERROR_IO;
+ size[i]= url_fsize(f[i]);
+
+ if(codec->codec_id != CODEC_ID_RAWVIDEO)
+ break;
+ filename[ strlen(filename) - 1 ]= 'U' + i;
+ }
+
+ if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width)
+ infer_size(&codec->width, &codec->height, size[0]);
+ } else {
+ f[0] = &s1->pb;
+ if (url_feof(f[0]))
+ return AVERROR_IO;
+ size[0]= 4096;
+ }
+
+ av_new_packet(pkt, size[0] + size[1] + size[2]);
+ pkt->stream_index = 0;
+ pkt->flags |= PKT_FLAG_KEY;
+
+ pkt->size= 0;
+ for(i=0; i<3; i++){
+ if(size[i]){
+ ret[i]= get_buffer(f[i], pkt->data + pkt->size, size[i]);
+ if (!s->is_pipe)
+ url_fclose(f[i]);
+ if(ret[i]>0)
+ pkt->size += ret[i];
+ }
+ }
+
+ if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) {
+ av_free_packet(pkt);
+ return AVERROR_IO; /* signal EOF */
+ } else {
+ s->img_count++;
+ s->img_number++;
+ return 0;
+ }
+}
+
+static int img_read_close(AVFormatContext *s1)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MUXERS
+/******************************************************/
+/* image output */
+
+static int img_write_header(AVFormatContext *s)
+{
+ VideoData *img = s->priv_data;
+
+ img->img_number = 1;
+ pstrcpy(img->path, sizeof(img->path), s->filename);
+
+ /* find format */
+ if (s->oformat->flags & AVFMT_NOFILE)
+ img->is_pipe = 0;
+ else
+ img->is_pipe = 1;
+
+ return 0;
+}
+
+static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ VideoData *img = s->priv_data;
+ ByteIOContext pb1[3], *pb[3]= {&pb1[0], &pb1[1], &pb1[2]};
+ char filename[1024];
+ AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec;
+ int i;
+
+ if (!img->is_pipe) {
+ if (av_get_frame_filename(filename, sizeof(filename),
+ img->path, img->img_number) < 0 && img->img_number>1)
+ return AVERROR_IO;
+ for(i=0; i<3; i++){
+ if (url_fopen(pb[i], filename, URL_WRONLY) < 0)
+ return AVERROR_IO;
+
+ if(codec->codec_id != CODEC_ID_RAWVIDEO)
+ break;
+ filename[ strlen(filename) - 1 ]= 'U' + i;
+ }
+ } else {
+ pb[0] = &s->pb;
+ }
+
+ if(codec->codec_id == CODEC_ID_RAWVIDEO){
+ int ysize = codec->width * codec->height;
+ put_buffer(pb[0], pkt->data , ysize);
+ put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2);
+ put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2);
+ put_flush_packet(pb[1]);
+ put_flush_packet(pb[2]);
+ url_fclose(pb[1]);
+ url_fclose(pb[2]);
+ }else{
+ put_buffer(pb[0], pkt->data, pkt->size);
+ }
+ put_flush_packet(pb[0]);
+ if (!img->is_pipe) {
+ url_fclose(pb[0]);
+ }
+
+ img->img_number++;
+ return 0;
+}
+
+static int img_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MUXERS */
+
+/* input */
+#ifdef CONFIG_IMAGE2_DEMUXER
+AVInputFormat image2_demuxer = {
+ "image2",
+ "image2 sequence",
+ sizeof(VideoData),
+ image_probe,
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+ NULL,
+ AVFMT_NOFILE,
+};
+#endif
+#ifdef CONFIG_IMAGE2PIPE_DEMUXER
+AVInputFormat image2pipe_demuxer = {
+ "image2pipe",
+ "piped image2 sequence",
+ sizeof(VideoData),
+ NULL, /* no probe */
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+};
+#endif
+
+/* output */
+#ifdef CONFIG_IMAGE2_MUXER
+AVOutputFormat image2_muxer = {
+ "image2",
+ "image2 sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_MJPEG,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+ AVFMT_NOFILE,
+};
+#endif
+#ifdef CONFIG_IMAGE2PIPE_MUXER
+AVOutputFormat image2pipe_muxer = {
+ "image2pipe",
+ "piped image2 sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_MJPEG,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/ipmovie.c b/contrib/ffmpeg/libavformat/ipmovie.c
new file mode 100644
index 000000000..3c0459938
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ipmovie.c
@@ -0,0 +1,625 @@
+/*
+ * Interplay MVE File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file ipmovie.c
+ * Interplay MVE file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * For more information regarding the Interplay MVE file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ * The aforementioned site also contains a command line utility for parsing
+ * IP MVE files so that you can get a good idea of the typical structure of
+ * such files. This demuxer is not the best example to use if you are trying
+ * to write your own as it uses a rather roundabout approach for splitting
+ * up and sending out the chunks.
+ */
+
+#include "avformat.h"
+
+/* debugging support: #define DEBUG_IPMOVIE as non-zero to see extremely
+ * verbose information about the demux process */
+#define DEBUG_IPMOVIE 0
+
+#if DEBUG_IPMOVIE
+#define debug_ipmovie printf
+#else
+static inline void debug_ipmovie(const char *format, ...) { }
+#endif
+
+#define IPMOVIE_SIGNATURE "Interplay MVE File\x1A\0"
+#define IPMOVIE_SIGNATURE_SIZE 20
+#define CHUNK_PREAMBLE_SIZE 4
+#define OPCODE_PREAMBLE_SIZE 4
+
+#define CHUNK_INIT_AUDIO 0x0000
+#define CHUNK_AUDIO_ONLY 0x0001
+#define CHUNK_INIT_VIDEO 0x0002
+#define CHUNK_VIDEO 0x0003
+#define CHUNK_SHUTDOWN 0x0004
+#define CHUNK_END 0x0005
+/* these last types are used internally */
+#define CHUNK_DONE 0xFFFC
+#define CHUNK_NOMEM 0xFFFD
+#define CHUNK_EOF 0xFFFE
+#define CHUNK_BAD 0xFFFF
+
+#define OPCODE_END_OF_STREAM 0x00
+#define OPCODE_END_OF_CHUNK 0x01
+#define OPCODE_CREATE_TIMER 0x02
+#define OPCODE_INIT_AUDIO_BUFFERS 0x03
+#define OPCODE_START_STOP_AUDIO 0x04
+#define OPCODE_INIT_VIDEO_BUFFERS 0x05
+#define OPCODE_UNKNOWN_06 0x06
+#define OPCODE_SEND_BUFFER 0x07
+#define OPCODE_AUDIO_FRAME 0x08
+#define OPCODE_SILENCE_FRAME 0x09
+#define OPCODE_INIT_VIDEO_MODE 0x0A
+#define OPCODE_CREATE_GRADIENT 0x0B
+#define OPCODE_SET_PALETTE 0x0C
+#define OPCODE_SET_PALETTE_COMPRESSED 0x0D
+#define OPCODE_UNKNOWN_0E 0x0E
+#define OPCODE_SET_DECODING_MAP 0x0F
+#define OPCODE_UNKNOWN_10 0x10
+#define OPCODE_VIDEO_DATA 0x11
+#define OPCODE_UNKNOWN_12 0x12
+#define OPCODE_UNKNOWN_13 0x13
+#define OPCODE_UNKNOWN_14 0x14
+#define OPCODE_UNKNOWN_15 0x15
+
+#define PALETTE_COUNT 256
+
+typedef struct IPMVEContext {
+
+ unsigned char *buf;
+ int buf_size;
+
+ float fps;
+ int frame_pts_inc;
+
+ unsigned int video_width;
+ unsigned int video_height;
+ int64_t video_pts;
+
+ unsigned int audio_bits;
+ unsigned int audio_channels;
+ unsigned int audio_sample_rate;
+ unsigned int audio_type;
+ unsigned int audio_frame_count;
+
+ int video_stream_index;
+ int audio_stream_index;
+
+ offset_t audio_chunk_offset;
+ int audio_chunk_size;
+ offset_t video_chunk_offset;
+ int video_chunk_size;
+ offset_t decode_map_chunk_offset;
+ int decode_map_chunk_size;
+
+ offset_t next_chunk_offset;
+
+ AVPaletteControl palette_control;
+
+} IPMVEContext;
+
+static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
+ AVPacket *pkt) {
+
+ int chunk_type;
+ int64_t audio_pts = 0;
+
+ if (s->audio_chunk_offset) {
+
+ /* adjust for PCM audio by skipping chunk header */
+ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) {
+ s->audio_chunk_offset += 6;
+ s->audio_chunk_size -= 6;
+ }
+
+ url_fseek(pb, s->audio_chunk_offset, SEEK_SET);
+ s->audio_chunk_offset = 0;
+
+ /* figure out the audio pts */
+ audio_pts = 90000;
+ audio_pts *= s->audio_frame_count;
+ audio_pts /= s->audio_sample_rate;
+
+ if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size))
+ return CHUNK_EOF;
+
+ pkt->stream_index = s->audio_stream_index;
+ pkt->pts = audio_pts;
+
+ /* audio frame maintenance */
+ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM)
+ s->audio_frame_count +=
+ (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8));
+ else
+ s->audio_frame_count +=
+ (s->audio_chunk_size - 6) / s->audio_channels;
+
+ debug_ipmovie("sending audio frame with pts %"PRId64" (%d audio frames)\n",
+ audio_pts, s->audio_frame_count);
+
+ chunk_type = CHUNK_VIDEO;
+
+ } else if (s->decode_map_chunk_offset) {
+
+ /* send both the decode map and the video data together */
+
+ if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
+ return CHUNK_NOMEM;
+
+ pkt->pos= s->decode_map_chunk_offset;
+ url_fseek(pb, s->decode_map_chunk_offset, SEEK_SET);
+ s->decode_map_chunk_offset = 0;
+
+ if (get_buffer(pb, pkt->data, s->decode_map_chunk_size) !=
+ s->decode_map_chunk_size) {
+ av_free_packet(pkt);
+ return CHUNK_EOF;
+ }
+
+ url_fseek(pb, s->video_chunk_offset, SEEK_SET);
+ s->video_chunk_offset = 0;
+
+ if (get_buffer(pb, pkt->data + s->decode_map_chunk_size,
+ s->video_chunk_size) != s->video_chunk_size) {
+ av_free_packet(pkt);
+ return CHUNK_EOF;
+ }
+
+ pkt->stream_index = s->video_stream_index;
+ pkt->pts = s->video_pts;
+
+ debug_ipmovie("sending video frame with pts %"PRId64"\n",
+ pkt->pts);
+
+ s->video_pts += s->frame_pts_inc;
+
+ chunk_type = CHUNK_VIDEO;
+
+ } else {
+
+ url_fseek(pb, s->next_chunk_offset, SEEK_SET);
+ chunk_type = CHUNK_DONE;
+
+ }
+
+ return chunk_type;
+}
+
+/* This function loads and processes a single chunk in an IP movie file.
+ * It returns the type of chunk that was processed. */
+static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
+ AVPacket *pkt)
+{
+ unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
+ int chunk_type;
+ int chunk_size;
+ unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
+ unsigned char opcode_type;
+ unsigned char opcode_version;
+ int opcode_size;
+ unsigned char scratch[1024];
+ int i, j;
+ int first_color, last_color;
+ int audio_flags;
+ unsigned char r, g, b;
+
+ /* see if there are any pending packets */
+ chunk_type = load_ipmovie_packet(s, pb, pkt);
+ if ((chunk_type == CHUNK_VIDEO) && (chunk_type != CHUNK_DONE))
+ return chunk_type;
+
+ /* read the next chunk, wherever the file happens to be pointing */
+ if (url_feof(pb))
+ return CHUNK_EOF;
+ if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
+ CHUNK_PREAMBLE_SIZE)
+ return CHUNK_BAD;
+ chunk_size = LE_16(&chunk_preamble[0]);
+ chunk_type = LE_16(&chunk_preamble[2]);
+
+ debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);
+
+ switch (chunk_type) {
+
+ case CHUNK_INIT_AUDIO:
+ debug_ipmovie("initialize audio\n");
+ break;
+
+ case CHUNK_AUDIO_ONLY:
+ debug_ipmovie("audio only\n");
+ break;
+
+ case CHUNK_INIT_VIDEO:
+ debug_ipmovie("initialize video\n");
+ break;
+
+ case CHUNK_VIDEO:
+ debug_ipmovie("video (and audio)\n");
+ break;
+
+ case CHUNK_SHUTDOWN:
+ debug_ipmovie("shutdown\n");
+ break;
+
+ case CHUNK_END:
+ debug_ipmovie("end\n");
+ break;
+
+ default:
+ debug_ipmovie("invalid chunk\n");
+ chunk_type = CHUNK_BAD;
+ break;
+
+ }
+
+ while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {
+
+ /* read the next chunk, wherever the file happens to be pointing */
+ if (url_feof(pb)) {
+ chunk_type = CHUNK_EOF;
+ break;
+ }
+ if (get_buffer(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
+ CHUNK_PREAMBLE_SIZE) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+
+ opcode_size = LE_16(&opcode_preamble[0]);
+ opcode_type = opcode_preamble[2];
+ opcode_version = opcode_preamble[3];
+
+ chunk_size -= OPCODE_PREAMBLE_SIZE;
+ chunk_size -= opcode_size;
+ if (chunk_size < 0) {
+ debug_ipmovie("chunk_size countdown just went negative\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+
+ debug_ipmovie(" opcode type %02X, version %d, 0x%04X bytes: ",
+ opcode_type, opcode_version, opcode_size);
+ switch (opcode_type) {
+
+ case OPCODE_END_OF_STREAM:
+ debug_ipmovie("end of stream\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_END_OF_CHUNK:
+ debug_ipmovie("end of chunk\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_CREATE_TIMER:
+ debug_ipmovie("create timer\n");
+ if ((opcode_version > 0) || (opcode_size > 6)) {
+ debug_ipmovie("bad create_timer opcode\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) !=
+ opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ s->fps = 1000000.0 / (LE_32(&scratch[0]) * LE_16(&scratch[4]));
+ s->frame_pts_inc = 90000 / s->fps;
+ debug_ipmovie(" %.2f frames/second (timer div = %d, subdiv = %d)\n",
+ s->fps, LE_32(&scratch[0]), LE_16(&scratch[4]));
+ break;
+
+ case OPCODE_INIT_AUDIO_BUFFERS:
+ debug_ipmovie("initialize audio buffers\n");
+ if ((opcode_version > 1) || (opcode_size > 10)) {
+ debug_ipmovie("bad init_audio_buffers opcode\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) !=
+ opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ s->audio_sample_rate = LE_16(&scratch[4]);
+ audio_flags = LE_16(&scratch[2]);
+ /* bit 0 of the flags: 0 = mono, 1 = stereo */
+ s->audio_channels = (audio_flags & 1) + 1;
+ /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
+ s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
+ /* bit 2 indicates compressed audio in version 1 opcode */
+ if ((opcode_version == 1) && (audio_flags & 0x4))
+ s->audio_type = CODEC_ID_INTERPLAY_DPCM;
+ else if (s->audio_bits == 16)
+ s->audio_type = CODEC_ID_PCM_S16LE;
+ else
+ s->audio_type = CODEC_ID_PCM_U8;
+ debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n",
+ s->audio_bits,
+ s->audio_sample_rate,
+ (s->audio_channels == 2) ? "stereo" : "mono",
+ (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
+ "Interplay audio" : "PCM");
+ break;
+
+ case OPCODE_START_STOP_AUDIO:
+ debug_ipmovie("start/stop audio\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_INIT_VIDEO_BUFFERS:
+ debug_ipmovie("initialize video buffers\n");
+ if ((opcode_version > 2) || (opcode_size > 8)) {
+ debug_ipmovie("bad init_video_buffers opcode\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) !=
+ opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ s->video_width = LE_16(&scratch[0]) * 8;
+ s->video_height = LE_16(&scratch[2]) * 8;
+ debug_ipmovie("video resolution: %d x %d\n",
+ s->video_width, s->video_height);
+ break;
+
+ case OPCODE_UNKNOWN_06:
+ case OPCODE_UNKNOWN_0E:
+ case OPCODE_UNKNOWN_10:
+ case OPCODE_UNKNOWN_12:
+ case OPCODE_UNKNOWN_13:
+ case OPCODE_UNKNOWN_14:
+ case OPCODE_UNKNOWN_15:
+ debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type);
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SEND_BUFFER:
+ debug_ipmovie("send buffer\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_AUDIO_FRAME:
+ debug_ipmovie("audio frame\n");
+
+ /* log position and move on for now */
+ s->audio_chunk_offset = url_ftell(pb);
+ s->audio_chunk_size = opcode_size;
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SILENCE_FRAME:
+ debug_ipmovie("silence frame\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_INIT_VIDEO_MODE:
+ debug_ipmovie("initialize video mode\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_CREATE_GRADIENT:
+ debug_ipmovie("create gradient\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SET_PALETTE:
+ debug_ipmovie("set palette\n");
+ /* check for the logical maximum palette size
+ * (3 * 256 + 4 bytes) */
+ if (opcode_size > 0x304) {
+ debug_ipmovie("demux_ipmovie: set_palette opcode too large\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) != opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+
+ /* load the palette into internal data structure */
+ first_color = LE_16(&scratch[0]);
+ last_color = first_color + LE_16(&scratch[2]) - 1;
+ /* sanity check (since they are 16 bit values) */
+ if ((first_color > 0xFF) || (last_color > 0xFF)) {
+ debug_ipmovie("demux_ipmovie: set_palette indices out of range (%d -> %d)\n",
+ first_color, last_color);
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ j = 4; /* offset of first palette data */
+ for (i = first_color; i <= last_color; i++) {
+ /* the palette is stored as a 6-bit VGA palette, thus each
+ * component is shifted up to a 8-bit range */
+ r = scratch[j++] * 4;
+ g = scratch[j++] * 4;
+ b = scratch[j++] * 4;
+ s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+ }
+ /* indicate a palette change */
+ s->palette_control.palette_changed = 1;
+ break;
+
+ case OPCODE_SET_PALETTE_COMPRESSED:
+ debug_ipmovie("set palette compressed\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SET_DECODING_MAP:
+ debug_ipmovie("set decoding map\n");
+
+ /* log position and move on for now */
+ s->decode_map_chunk_offset = url_ftell(pb);
+ s->decode_map_chunk_size = opcode_size;
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_VIDEO_DATA:
+ debug_ipmovie("set video data\n");
+
+ /* log position and move on for now */
+ s->video_chunk_offset = url_ftell(pb);
+ s->video_chunk_size = opcode_size;
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ default:
+ debug_ipmovie("*** unknown opcode type\n");
+ chunk_type = CHUNK_BAD;
+ break;
+
+ }
+ }
+
+ /* make a note of where the stream is sitting */
+ s->next_chunk_offset = url_ftell(pb);
+
+ /* dispatch the first of any pending packets */
+ if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
+ chunk_type = load_ipmovie_packet(s, pb, pkt);
+
+ return chunk_type;
+}
+
+static int ipmovie_probe(AVProbeData *p)
+{
+ if (p->buf_size < IPMOVIE_SIGNATURE_SIZE)
+ return 0;
+ if (strncmp(p->buf, IPMOVIE_SIGNATURE, IPMOVIE_SIGNATURE_SIZE) != 0)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int ipmovie_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVPacket pkt;
+ AVStream *st;
+ unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
+ int chunk_type;
+
+ /* initialize private context members */
+ ipmovie->video_pts = ipmovie->audio_frame_count = 0;
+ ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
+ ipmovie->decode_map_chunk_offset = 0;
+
+ /* on the first read, this will position the stream at the first chunk */
+ ipmovie->next_chunk_offset = IPMOVIE_SIGNATURE_SIZE + 6;
+
+ /* process the first chunk which should be CHUNK_INIT_VIDEO */
+ if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
+ return AVERROR_INVALIDDATA;
+
+ /* peek ahead to the next chunk-- if it is an init audio chunk, process
+ * it; if it is the first video chunk, this is a silent file */
+ if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
+ CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ chunk_type = LE_16(&chunk_preamble[2]);
+ url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);
+
+ if (chunk_type == CHUNK_VIDEO)
+ ipmovie->audio_type = 0; /* no audio */
+ else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
+ return AVERROR_INVALIDDATA;
+
+ /* initialize the stream decoders */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ ipmovie->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_INTERPLAY_VIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = ipmovie->video_width;
+ st->codec->height = ipmovie->video_height;
+
+ /* palette considerations */
+ st->codec->palctrl = &ipmovie->palette_control;
+
+ if (ipmovie->audio_type) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ ipmovie->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = ipmovie->audio_type;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = ipmovie->audio_channels;
+ st->codec->sample_rate = ipmovie->audio_sample_rate;
+ st->codec->bits_per_sample = ipmovie->audio_bits;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ if (st->codec->codec_id == CODEC_ID_INTERPLAY_DPCM)
+ st->codec->bit_rate /= 2;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+ }
+
+ return 0;
+}
+
+static int ipmovie_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret;
+
+ ret = process_ipmovie_chunk(ipmovie, pb, pkt);
+ if (ret == CHUNK_BAD)
+ ret = AVERROR_INVALIDDATA;
+ else if (ret == CHUNK_EOF)
+ ret = AVERROR_IO;
+ else if (ret == CHUNK_NOMEM)
+ ret = AVERROR_NOMEM;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ipmovie_read_close(AVFormatContext *s)
+{
+// IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat ipmovie_demuxer = {
+ "ipmovie",
+ "Interplay MVE format",
+ sizeof(IPMVEContext),
+ ipmovie_probe,
+ ipmovie_read_header,
+ ipmovie_read_packet,
+ ipmovie_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/isom.c b/contrib/ffmpeg/libavformat/isom.c
new file mode 100644
index 000000000..d4e923853
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/isom.c
@@ -0,0 +1,131 @@
+/*
+ * ISO Media common code
+ * Copyright (c) 2001 Fabrice Bellard.
+ * Copyright (c) 2002 Francois Revol <revol@free.fr>
+ * Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "riff.h"
+#include "isom.h"
+
+/* http://gpac.sourceforge.net/tutorial/mediatypes.htm */
+const CodecTag ff_mov_obj_type[] = {
+ { CODEC_ID_MPEG4 , 32 },
+ { CODEC_ID_H264 , 33 },
+ { CODEC_ID_AAC , 64 },
+ { CODEC_ID_MPEG2VIDEO, 96 }, /* MPEG2 Simple */
+ { CODEC_ID_MPEG2VIDEO, 97 }, /* MPEG2 Main */
+ { CODEC_ID_MPEG2VIDEO, 98 }, /* MPEG2 SNR */
+ { CODEC_ID_MPEG2VIDEO, 99 }, /* MPEG2 Spatial */
+ { CODEC_ID_MPEG2VIDEO, 100 }, /* MPEG2 High */
+ { CODEC_ID_MPEG2VIDEO, 101 }, /* MPEG2 422 */
+ { CODEC_ID_AAC , 102 }, /* MPEG2 AAC Main */
+ { CODEC_ID_AAC , 103 }, /* MPEG2 AAC Low */
+ { CODEC_ID_AAC , 104 }, /* MPEG2 AAC SSR */
+ { CODEC_ID_MP3 , 105 },
+ { CODEC_ID_MPEG1VIDEO, 106 },
+ { CODEC_ID_MP2 , 107 },
+ { CODEC_ID_MJPEG , 108 },
+ { CODEC_ID_PCM_S16LE , 224 },
+ { CODEC_ID_VORBIS , 221 },
+ { CODEC_ID_QCELP , 225 },
+ { CODEC_ID_AC3 , 226 },
+ { CODEC_ID_PCM_ALAW , 227 },
+ { CODEC_ID_PCM_MULAW , 228 },
+ { CODEC_ID_PCM_S16BE , 230 },
+ { CODEC_ID_H263 , 242 },
+ { CODEC_ID_H261 , 243 },
+ { 0, 0 },
+};
+
+/* map numeric codes from mdhd atom to ISO 639 */
+/* cf. QTFileFormat.pdf p253, qtff.pdf p205 */
+/* http://developer.apple.com/documentation/mac/Text/Text-368.html */
+/* deprecated by putting the code as 3*5bit ascii */
+static const char *mov_mdhd_language_map[] = {
+ /* 0-9 */
+ "eng", "fra", "ger", "ita", "dut", "sve", "spa", "dan", "por", "nor",
+ "heb", "jpn", "ara", "fin", "gre", "ice", "mlt", "tur", "hr "/*scr*/, "chi"/*ace?*/,
+ "urd", "hin", "tha", "kor", "lit", "pol", "hun", "est", "lav", NULL,
+ "fo ", NULL, "rus", "chi", NULL, "iri", "alb", "ron", "ces", "slk",
+ "slv", "yid", "sr ", "mac", "bul", "ukr", "bel", "uzb", "kaz", "aze",
+ /*?*/
+ "aze", "arm", "geo", "mol", "kir", "tgk", "tuk", "mon", NULL, "pus",
+ "kur", "kas", "snd", "tib", "nep", "san", "mar", "ben", "asm", "guj",
+ "pa ", "ori", "mal", "kan", "tam", "tel", NULL, "bur", "khm", "lao",
+ /* roman? arabic? */
+ "vie", "ind", "tgl", "may", "may", "amh", "tir", "orm", "som", "swa",
+ /*==rundi?*/
+ NULL, "run", NULL, "mlg", "epo", NULL, NULL, NULL, NULL, NULL,
+ /* 100 */
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "wel", "baq",
+ "cat", "lat", "que", "grn", "aym", "tat", "uig", "dzo", "jav"
+};
+
+int ff_mov_iso639_to_lang(const char *lang, int mp4)
+{
+ int i, code = 0;
+
+ /* old way, only for QT? */
+ for (i = 0; !mp4 && (i < (sizeof(mov_mdhd_language_map)/sizeof(char *))); i++) {
+ if (mov_mdhd_language_map[i] && !strcmp(lang, mov_mdhd_language_map[i]))
+ return i;
+ }
+ /* XXX:can we do that in mov too? */
+ if (!mp4)
+ return 0;
+ /* handle undefined as such */
+ if (lang[0] == '\0')
+ lang = "und";
+ /* 5bit ascii */
+ for (i = 0; i < 3; i++) {
+ unsigned char c = (unsigned char)lang[i];
+ if (c < 0x60)
+ return 0;
+ if (c > 0x60 + 0x1f)
+ return 0;
+ code <<= 5;
+ code |= (c - 0x60);
+ }
+ return code;
+}
+
+int ff_mov_lang_to_iso639(int code, char *to)
+{
+ int i;
+ /* is it the mangled iso code? */
+ /* see http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt */
+ if (code > 138) {
+ for (i = 2; i >= 0; i--) {
+ to[i] = 0x60 + (code & 0x1f);
+ code >>= 5;
+ }
+ return 1;
+ }
+ /* old fashion apple lang code */
+ if (code >= (sizeof(mov_mdhd_language_map)/sizeof(char *)))
+ return 0;
+ if (!mov_mdhd_language_map[code])
+ return 0;
+ strncpy(to, mov_mdhd_language_map[code], 4);
+ return 1;
+}
diff --git a/contrib/ffmpeg/libavformat/isom.h b/contrib/ffmpeg/libavformat/isom.h
new file mode 100644
index 000000000..85cbbdc6c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/isom.h
@@ -0,0 +1,38 @@
+/*
+ * ISO Media common code
+ * copyright (c) 2001 Fabrice Bellard.
+ * copyright (c) 2002 Francois Revol <revol@free.fr>
+ * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_ISOM_H
+#define FFMPEG_ISOM_H
+
+/* isom.c */
+extern const CodecTag ff_mov_obj_type[];
+
+int ff_mov_iso639_to_lang(const char *lang, int mp4);
+int ff_mov_lang_to_iso639(int code, char *to);
+
+typedef struct Time2Sample{
+ int count;
+ int duration;
+}Time2Sample;
+
+#endif /* FFMPEG_ISOM_H */
diff --git a/contrib/ffmpeg/libavformat/jpeg.c b/contrib/ffmpeg/libavformat/jpeg.c
new file mode 100644
index 000000000..b5fc043c9
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/jpeg.c
@@ -0,0 +1,240 @@
+/*
+ * JPEG image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int jpeg_probe(AVProbeData *pd)
+{
+ if (pd->buf_size >= 64 &&
+ pd->buf[0] == 0xff && pd->buf[1] == 0xd8 && pd->buf[2] == 0xff)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+typedef struct JpegOpaque {
+ int (*alloc_cb)(void *opaque, AVImageInfo *info);
+ void *opaque;
+ int ret_code;
+} JpegOpaque;
+
+/* called by the codec to allocate the image */
+static int jpeg_get_buffer(AVCodecContext *c, AVFrame *picture)
+{
+ JpegOpaque *jctx = c->opaque;
+ AVImageInfo info1, *info = &info1;
+ int ret, i;
+
+ info->width = c->width;
+ info->height = c->height;
+ switch(c->pix_fmt) {
+ case PIX_FMT_YUV420P:
+ info->pix_fmt = PIX_FMT_YUVJ420P;
+ break;
+ case PIX_FMT_YUV422P:
+ info->pix_fmt = PIX_FMT_YUVJ422P;
+ break;
+ case PIX_FMT_YUV444P:
+ info->pix_fmt = PIX_FMT_YUVJ444P;
+ break;
+ default:
+ return -1;
+ }
+ ret = jctx->alloc_cb(jctx->opaque, info);
+ if (ret) {
+ jctx->ret_code = ret;
+ return -1;
+ } else {
+ for(i=0;i<3;i++) {
+ picture->data[i] = info->pict.data[i];
+ picture->linesize[i] = info->pict.linesize[i];
+ }
+ return 0;
+ }
+}
+
+static void jpeg_img_copy(uint8_t *dst, int dst_wrap,
+ uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ for(;height > 0; height--) {
+ memcpy(dst, src, width);
+ dst += dst_wrap;
+ src += src_wrap;
+ }
+}
+
+/* XXX: libavcodec is broken for truncated jpegs! */
+#define IO_BUF_SIZE (1024*1024)
+
+static int jpeg_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ AVCodecContext *c;
+ AVFrame *picture, picture1;
+ int len, size, got_picture, i;
+ uint8_t *inbuf_ptr, inbuf[IO_BUF_SIZE];
+ JpegOpaque jctx;
+
+ jctx.alloc_cb = alloc_cb;
+ jctx.opaque = opaque;
+ jctx.ret_code = -1; /* default return code is error */
+
+ c = avcodec_alloc_context();
+ if (!c)
+ return -1;
+ picture= avcodec_alloc_frame();
+ if (!picture) {
+ av_free(c);
+ return -1;
+ }
+ c->opaque = &jctx;
+ c->get_buffer = jpeg_get_buffer;
+ c->flags |= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
+ if (avcodec_open(c, &mjpeg_decoder) < 0)
+ goto fail1;
+ for(;;) {
+ size = get_buffer(f, inbuf, sizeof(inbuf));
+ if (size == 0)
+ break;
+ inbuf_ptr = inbuf;
+ while (size > 0) {
+ len = avcodec_decode_video(c, &picture1, &got_picture,
+ inbuf_ptr, size);
+ if (len < 0)
+ goto fail;
+ if (got_picture)
+ goto the_end;
+ size -= len;
+ inbuf_ptr += len;
+ }
+ }
+ the_end:
+ /* XXX: currently, the mjpeg decoder does not use AVFrame, so we
+ must do it by hand */
+ if (jpeg_get_buffer(c, picture) < 0)
+ goto fail;
+ for(i=0;i<3;i++) {
+ int w, h;
+ w = c->width;
+ h = c->height;
+ if (i >= 1) {
+ switch(c->pix_fmt) {
+ default:
+ case PIX_FMT_YUV420P:
+ w = (w + 1) >> 1;
+ h = (h + 1) >> 1;
+ break;
+ case PIX_FMT_YUV422P:
+ w = (w + 1) >> 1;
+ break;
+ case PIX_FMT_YUV444P:
+ break;
+ }
+ }
+ jpeg_img_copy(picture->data[i], picture->linesize[i],
+ picture1.data[i], picture1.linesize[i],
+ w, h);
+ }
+ jctx.ret_code = 0;
+ fail:
+ avcodec_close(c);
+ fail1:
+ av_free(picture);
+ av_free(c);
+ return jctx.ret_code;
+}
+
+#if defined(CONFIG_MUXERS) && defined(CONFIG_MJPEG_ENCODER)
+static int jpeg_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ AVCodecContext *c;
+ uint8_t *outbuf = NULL;
+ int outbuf_size, ret, size, i;
+ AVFrame *picture;
+
+ ret = -1;
+ c = avcodec_alloc_context();
+ if (!c)
+ return -1;
+ picture = avcodec_alloc_frame();
+ if (!picture)
+ goto fail2;
+ c->width = info->width;
+ c->height = info->height;
+ /* XXX: currently move that to the codec ? */
+ switch(info->pix_fmt) {
+ case PIX_FMT_YUVJ420P:
+ c->pix_fmt = PIX_FMT_YUV420P;
+ break;
+ case PIX_FMT_YUVJ422P:
+ c->pix_fmt = PIX_FMT_YUV422P;
+ break;
+ case PIX_FMT_YUVJ444P:
+ c->pix_fmt = PIX_FMT_YUV444P;
+ break;
+ default:
+ goto fail1;
+ }
+ for(i=0;i<3;i++) {
+ picture->data[i] = info->pict.data[i];
+ picture->linesize[i] = info->pict.linesize[i];
+ }
+ /* set the quality */
+ picture->quality = 3; /* XXX: a parameter should be used */
+ c->flags |= CODEC_FLAG_QSCALE;
+
+ if (avcodec_open(c, &mjpeg_encoder) < 0)
+ goto fail1;
+
+ /* XXX: needs to sort out that size problem */
+ outbuf_size = 1000000;
+ outbuf = av_malloc(outbuf_size);
+
+ size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
+ if (size < 0)
+ goto fail;
+ put_buffer(pb, outbuf, size);
+ put_flush_packet(pb);
+ ret = 0;
+
+ fail:
+ avcodec_close(c);
+ av_free(outbuf);
+ fail1:
+ av_free(picture);
+ fail2:
+ av_free(c);
+ return ret;
+}
+#endif //CONFIG_MUXERS
+
+AVImageFormat jpeg_image_format = {
+ "jpeg",
+ "jpg,jpeg",
+ jpeg_probe,
+ jpeg_read,
+ (1 << PIX_FMT_YUVJ420P) | (1 << PIX_FMT_YUVJ422P) | (1 << PIX_FMT_YUVJ444P),
+#if defined(CONFIG_MUXERS) && defined(CONFIG_MJPEG_ENCODER)
+ jpeg_write,
+#else
+ NULL,
+#endif //CONFIG_MUXERS
+};
diff --git a/contrib/ffmpeg/libavformat/libnut.c b/contrib/ffmpeg/libavformat/libnut.c
new file mode 100644
index 000000000..d4e7201ab
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/libnut.c
@@ -0,0 +1,283 @@
+#include "avformat.h"
+#include "riff.h"
+#include <libnut.h>
+
+#define ID_STRING "nut/multimedia container"
+#define ID_LENGTH (strlen(ID_STRING) + 1)
+
+typedef struct {
+ nut_context_t * nut;
+ nut_stream_header_t * s;
+} NUTContext;
+
+static const CodecTag nut_tags[] = {
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MP3, MKTAG('m', 'p', '3', ' ') },
+ { CODEC_ID_VORBIS, MKTAG('v', 'r', 'b', 's') },
+ { 0, 0 },
+};
+
+#ifdef CONFIG_MUXERS
+static int av_write(void * h, size_t len, const uint8_t * buf) {
+ ByteIOContext * bc = h;
+ put_buffer(bc, buf, len);
+ //put_flush_packet(bc);
+ return len;
+}
+
+static int nut_write_header(AVFormatContext * avf) {
+ NUTContext * priv = avf->priv_data;
+ ByteIOContext * bc = &avf->pb;
+ nut_muxer_opts_t mopts = {
+ .output = {
+ .priv = bc,
+ .write = av_write,
+ },
+ .alloc = { av_malloc, av_realloc, av_free },
+ .write_index = 1,
+ .realtime_stream = 0,
+ .max_distance = 32768,
+ .fti = NULL,
+ };
+ nut_stream_header_t * s;
+ int i;
+
+ priv->s = s = av_mallocz((avf->nb_streams + 1) * sizeof*s);
+
+ for (i = 0; i < avf->nb_streams; i++) {
+ AVCodecContext * codec = avf->streams[i]->codec;
+ int j;
+ int fourcc = 0;
+ int nom, denom, ssize;
+
+ s[i].type = codec->codec_type == CODEC_TYPE_VIDEO ? NUT_VIDEO_CLASS : NUT_AUDIO_CLASS;
+
+ if (codec->codec_tag) fourcc = codec->codec_tag;
+ else fourcc = codec_get_tag(nut_tags, codec->codec_id);
+
+ if (!fourcc) {
+ if (codec->codec_type == CODEC_TYPE_VIDEO) fourcc = codec_get_bmp_tag(codec->codec_id);
+ if (codec->codec_type == CODEC_TYPE_AUDIO) fourcc = codec_get_wav_tag(codec->codec_id);
+ }
+
+ s[i].fourcc_len = 4;
+ s[i].fourcc = av_malloc(s[i].fourcc_len);
+ for (j = 0; j < s[i].fourcc_len; j++) s[i].fourcc[j] = (fourcc >> (j*8)) & 0xFF;
+
+ ff_parse_specific_params(codec, &nom, &ssize, &denom);
+ av_set_pts_info(avf->streams[i], 60, denom, nom);
+
+ s[i].time_base.nom = denom;
+ s[i].time_base.den = nom;
+
+ s[i].fixed_fps = 0;
+ s[i].decode_delay = codec->has_b_frames;
+ s[i].codec_specific_len = codec->extradata_size;
+ s[i].codec_specific = codec->extradata;
+
+ if (codec->codec_type == CODEC_TYPE_VIDEO) {
+ s[i].width = codec->width;
+ s[i].height = codec->height;
+ s[i].sample_width = 0;
+ s[i].sample_height = 0;
+ s[i].colorspace_type = 0;
+ } else {
+ s[i].samplerate_nom = codec->sample_rate;
+ s[i].samplerate_denom = 1;
+ s[i].channel_count = codec->channels;
+ }
+ }
+
+ s[avf->nb_streams].type = -1;
+ priv->nut = nut_muxer_init(&mopts, s, NULL);
+
+ return 0;
+}
+
+static int nut_write_packet(AVFormatContext * avf, AVPacket * pkt) {
+ NUTContext * priv = avf->priv_data;
+ nut_packet_t p;
+
+ p.len = pkt->size;
+ p.stream = pkt->stream_index;
+ p.pts = pkt->pts;
+ p.flags = pkt->flags & PKT_FLAG_KEY ? NUT_FLAG_KEY : 0;
+ p.next_pts = 0;
+
+ nut_write_frame_reorder(priv->nut, &p, pkt->data);
+
+ return 0;
+}
+
+static int nut_write_trailer(AVFormatContext * avf) {
+ ByteIOContext * bc = &avf->pb;
+ NUTContext * priv = avf->priv_data;
+ int i;
+
+ nut_muxer_uninit_reorder(priv->nut);
+ put_flush_packet(bc);
+
+ for(i = 0; priv->s[i].type != -1; i++ ) av_freep(&priv->s[i].fourcc);
+ av_freep(&priv->s);
+
+ return 0;
+}
+
+AVOutputFormat nut_muxer = {
+ "nut",
+ "nut format",
+ "video/x-nut",
+ "nut",
+ sizeof(NUTContext),
+ CODEC_ID_VORBIS,
+ CODEC_ID_MPEG4,
+ nut_write_header,
+ nut_write_packet,
+ nut_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif //CONFIG_MUXERS
+
+static int nut_probe(AVProbeData *p) {
+ if (p->buf_size >= ID_LENGTH && !memcmp(p->buf, ID_STRING, ID_LENGTH)) return AVPROBE_SCORE_MAX;
+
+ return 0;
+}
+
+static size_t av_read(void * h, size_t len, uint8_t * buf) {
+ ByteIOContext * bc = h;
+ return get_buffer(bc, buf, len);
+}
+
+static off_t av_seek(void * h, long long pos, int whence) {
+ ByteIOContext * bc = h;
+ if (whence == SEEK_END) {
+ pos = url_fsize(bc) + pos;
+ whence = SEEK_SET;
+ }
+ return url_fseek(bc, pos, whence);
+}
+
+static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) {
+ NUTContext * priv = avf->priv_data;
+ ByteIOContext * bc = &avf->pb;
+ nut_demuxer_opts_t dopts = {
+ .input = {
+ .priv = bc,
+ .seek = av_seek,
+ .read = av_read,
+ .eof = NULL,
+ .file_pos = 0,
+ },
+ .alloc = { av_malloc, av_realloc, av_free },
+ .read_index = 1,
+ .cache_syncpoints = 1,
+ };
+ nut_context_t * nut = priv->nut = nut_demuxer_init(&dopts);
+ nut_stream_header_t * s;
+ int ret, i;
+
+ if ((ret = nut_read_headers(nut, &s, NULL))) {
+ av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
+ nut_demuxer_uninit(nut);
+ return -1;
+ }
+
+ priv->s = s;
+
+ for (i = 0; s[i].type != -1 && i < 2; i++) {
+ AVStream * st = av_new_stream(avf, i);
+ int j;
+
+ for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8);
+
+ st->codec->has_b_frames = s[i].decode_delay;
+
+ st->codec->extradata_size = s[i].codec_specific_len;
+ if (st->codec->extradata_size) {
+ st->codec->extradata = av_mallocz(st->codec->extradata_size);
+ memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size);
+ }
+
+ av_set_pts_info(avf->streams[i], 60, s[i].time_base.nom, s[i].time_base.den);
+ st->start_time = 0;
+ st->duration = s[i].max_pts;
+
+ st->codec->codec_id = codec_get_id(nut_tags, st->codec->codec_tag);
+
+ switch(s[i].type) {
+ case NUT_AUDIO_CLASS:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_wav_id(st->codec->codec_tag);
+
+ st->codec->channels = s[i].channel_count;
+ st->codec->sample_rate = s[i].samplerate_nom / s[i].samplerate_denom;
+ break;
+ case NUT_VIDEO_CLASS:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_bmp_id(st->codec->codec_tag);
+
+ st->codec->width = s[i].width;
+ st->codec->height = s[i].height;
+ st->codec->sample_aspect_ratio.num = s[i].sample_width;
+ st->codec->sample_aspect_ratio.den = s[i].sample_height;
+ break;
+ }
+ if (st->codec->codec_id == CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, "Unknown codec?!\n");
+ }
+
+ return 0;
+}
+
+static int nut_read_packet(AVFormatContext * avf, AVPacket * pkt) {
+ NUTContext * priv = avf->priv_data;
+ nut_packet_t pd;
+ int ret;
+
+ ret = nut_read_next_packet(priv->nut, &pd);
+
+ if (ret || av_new_packet(pkt, pd.len) < 0) {
+ if (ret != NUT_ERR_EOF)
+ av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
+ return -1;
+ }
+
+ if (pd.flags & NUT_FLAG_KEY) pkt->flags |= PKT_FLAG_KEY;
+ pkt->pts = pd.pts;
+ pkt->stream_index = pd.stream;
+ pkt->pos = url_ftell(&avf->pb);
+
+ ret = nut_read_frame(priv->nut, &pd.len, pkt->data);
+
+ return ret;
+}
+
+static int nut_read_seek(AVFormatContext * avf, int stream_index, int64_t target_ts, int flags) {
+ NUTContext * priv = avf->priv_data;
+ int active_streams[] = { stream_index, -1 };
+ double time_pos = target_ts * priv->s[stream_index].time_base.nom / (double)priv->s[stream_index].time_base.den;
+
+ if (nut_seek(priv->nut, time_pos, 2*!(flags & AVSEEK_FLAG_BACKWARD), active_streams)) return -1;
+
+ return 0;
+}
+
+static int nut_read_close(AVFormatContext *s) {
+ NUTContext * priv = s->priv_data;
+
+ nut_demuxer_uninit(priv->nut);
+
+ return 0;
+}
+
+AVInputFormat nut_demuxer = {
+ "nut",
+ "nut format",
+ sizeof(NUTContext),
+ nut_probe,
+ nut_read_header,
+ nut_read_packet,
+ nut_read_close,
+ nut_read_seek,
+ .extensions = "nut",
+};
diff --git a/contrib/ffmpeg/libavformat/matroska.c b/contrib/ffmpeg/libavformat/matroska.c
new file mode 100644
index 000000000..0cd119e71
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/matroska.c
@@ -0,0 +1,2767 @@
+/*
+ * Matroska file demuxer (no muxer yet)
+ * Copyright (c) 2003-2004 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file matroska.c
+ * Matroska file demuxer
+ * by Ronald Bultje <rbultje@ronald.bitfreak.net>
+ * with a little help from Moritz Bunkus <moritz@bunkus.org>
+ * Specs available on the matroska project page:
+ * http://www.matroska.org/.
+ */
+
+#include "avformat.h"
+/* For codec_get_bmp_id and codec_get_wav_id. */
+#include "riff.h"
+#include "intfloat_readwrite.h"
+
+/* EBML version supported */
+#define EBML_VERSION 1
+
+/* top-level master-IDs */
+#define EBML_ID_HEADER 0x1A45DFA3
+
+/* IDs in the HEADER master */
+#define EBML_ID_EBMLVERSION 0x4286
+#define EBML_ID_EBMLREADVERSION 0x42F7
+#define EBML_ID_EBMLMAXIDLENGTH 0x42F2
+#define EBML_ID_EBMLMAXSIZELENGTH 0x42F3
+#define EBML_ID_DOCTYPE 0x4282
+#define EBML_ID_DOCTYPEVERSION 0x4287
+#define EBML_ID_DOCTYPEREADVERSION 0x4285
+
+/* general EBML types */
+#define EBML_ID_VOID 0xEC
+
+/*
+ * Matroska element IDs. max. 32-bit.
+ */
+
+/* toplevel segment */
+#define MATROSKA_ID_SEGMENT 0x18538067
+
+/* matroska top-level master IDs */
+#define MATROSKA_ID_INFO 0x1549A966
+#define MATROSKA_ID_TRACKS 0x1654AE6B
+#define MATROSKA_ID_CUES 0x1C53BB6B
+#define MATROSKA_ID_TAGS 0x1254C367
+#define MATROSKA_ID_SEEKHEAD 0x114D9B74
+#define MATROSKA_ID_CLUSTER 0x1F43B675
+
+/* IDs in the info master */
+#define MATROSKA_ID_TIMECODESCALE 0x2AD7B1
+#define MATROSKA_ID_DURATION 0x4489
+#define MATROSKA_ID_TITLE 0x7BA9
+#define MATROSKA_ID_WRITINGAPP 0x5741
+#define MATROSKA_ID_MUXINGAPP 0x4D80
+#define MATROSKA_ID_DATEUTC 0x4461
+
+/* ID in the tracks master */
+#define MATROSKA_ID_TRACKENTRY 0xAE
+
+/* IDs in the trackentry master */
+#define MATROSKA_ID_TRACKNUMBER 0xD7
+#define MATROSKA_ID_TRACKUID 0x73C5
+#define MATROSKA_ID_TRACKTYPE 0x83
+#define MATROSKA_ID_TRACKAUDIO 0xE1
+#define MATROSKA_ID_TRACKVIDEO 0xE0
+#define MATROSKA_ID_CODECID 0x86
+#define MATROSKA_ID_CODECPRIVATE 0x63A2
+#define MATROSKA_ID_CODECNAME 0x258688
+#define MATROSKA_ID_CODECINFOURL 0x3B4040
+#define MATROSKA_ID_CODECDOWNLOADURL 0x26B240
+#define MATROSKA_ID_TRACKNAME 0x536E
+#define MATROSKA_ID_TRACKLANGUAGE 0x22B59C
+#define MATROSKA_ID_TRACKFLAGENABLED 0xB9
+#define MATROSKA_ID_TRACKFLAGDEFAULT 0x88
+#define MATROSKA_ID_TRACKFLAGLACING 0x9C
+#define MATROSKA_ID_TRACKMINCACHE 0x6DE7
+#define MATROSKA_ID_TRACKMAXCACHE 0x6DF8
+#define MATROSKA_ID_TRACKDEFAULTDURATION 0x23E383
+
+/* IDs in the trackvideo master */
+#define MATROSKA_ID_VIDEOFRAMERATE 0x2383E3
+#define MATROSKA_ID_VIDEODISPLAYWIDTH 0x54B0
+#define MATROSKA_ID_VIDEODISPLAYHEIGHT 0x54BA
+#define MATROSKA_ID_VIDEOPIXELWIDTH 0xB0
+#define MATROSKA_ID_VIDEOPIXELHEIGHT 0xBA
+#define MATROSKA_ID_VIDEOFLAGINTERLACED 0x9A
+#define MATROSKA_ID_VIDEOSTEREOMODE 0x53B9
+#define MATROSKA_ID_VIDEOASPECTRATIO 0x54B3
+#define MATROSKA_ID_VIDEOCOLOURSPACE 0x2EB524
+
+/* IDs in the trackaudio master */
+#define MATROSKA_ID_AUDIOSAMPLINGFREQ 0xB5
+#define MATROSKA_ID_AUDIOOUTSAMPLINGFREQ 0x78B5
+
+#define MATROSKA_ID_AUDIOBITDEPTH 0x6264
+#define MATROSKA_ID_AUDIOCHANNELS 0x9F
+
+/* ID in the cues master */
+#define MATROSKA_ID_POINTENTRY 0xBB
+
+/* IDs in the pointentry master */
+#define MATROSKA_ID_CUETIME 0xB3
+#define MATROSKA_ID_CUETRACKPOSITION 0xB7
+
+/* IDs in the cuetrackposition master */
+#define MATROSKA_ID_CUETRACK 0xF7
+#define MATROSKA_ID_CUECLUSTERPOSITION 0xF1
+
+/* IDs in the tags master */
+/* TODO */
+
+/* IDs in the seekhead master */
+#define MATROSKA_ID_SEEKENTRY 0x4DBB
+
+/* IDs in the seekpoint master */
+#define MATROSKA_ID_SEEKID 0x53AB
+#define MATROSKA_ID_SEEKPOSITION 0x53AC
+
+/* IDs in the cluster master */
+#define MATROSKA_ID_CLUSTERTIMECODE 0xE7
+#define MATROSKA_ID_BLOCKGROUP 0xA0
+
+/* IDs in the blockgroup master */
+#define MATROSKA_ID_BLOCK 0xA1
+#define MATROSKA_ID_BLOCKDURATION 0x9B
+#define MATROSKA_ID_BLOCKREFERENCE 0xFB
+
+typedef enum {
+ MATROSKA_TRACK_TYPE_VIDEO = 0x1,
+ MATROSKA_TRACK_TYPE_AUDIO = 0x2,
+ MATROSKA_TRACK_TYPE_COMPLEX = 0x3,
+ MATROSKA_TRACK_TYPE_LOGO = 0x10,
+ MATROSKA_TRACK_TYPE_SUBTITLE = 0x11,
+ MATROSKA_TRACK_TYPE_CONTROL = 0x20,
+} MatroskaTrackType;
+
+typedef enum {
+ MATROSKA_EYE_MODE_MONO = 0x0,
+ MATROSKA_EYE_MODE_RIGHT = 0x1,
+ MATROSKA_EYE_MODE_LEFT = 0x2,
+ MATROSKA_EYE_MODE_BOTH = 0x3,
+} MatroskaEyeMode;
+
+typedef enum {
+ MATROSKA_ASPECT_RATIO_MODE_FREE = 0x0,
+ MATROSKA_ASPECT_RATIO_MODE_KEEP = 0x1,
+ MATROSKA_ASPECT_RATIO_MODE_FIXED = 0x2,
+} MatroskaAspectRatioMode;
+
+/*
+ * These aren't in any way "matroska-form" things,
+ * it's just something I use in the muxer/demuxer.
+ */
+
+typedef enum {
+ MATROSKA_TRACK_ENABLED = (1<<0),
+ MATROSKA_TRACK_DEFAULT = (1<<1),
+ MATROSKA_TRACK_LACING = (1<<2),
+ MATROSKA_TRACK_REAL_V = (1<<4),
+ MATROSKA_TRACK_SHIFT = (1<<16)
+} MatroskaTrackFlags;
+
+typedef enum {
+ MATROSKA_VIDEOTRACK_INTERLACED = (MATROSKA_TRACK_SHIFT<<0)
+} MatroskaVideoTrackFlags;
+
+/*
+ * Matroska Codec IDs. Strings.
+ */
+
+typedef struct CodecTags{
+ const char *str;
+ enum CodecID id;
+}CodecTags;
+
+#define MATROSKA_CODEC_ID_VIDEO_VFW_FOURCC "V_MS/VFW/FOURCC"
+#define MATROSKA_CODEC_ID_AUDIO_ACM "A_MS/ACM"
+
+static CodecTags codec_tags[]={
+// {"V_MS/VFW/FOURCC" , CODEC_ID_NONE},
+ {"V_UNCOMPRESSED" , CODEC_ID_RAWVIDEO},
+ {"V_MPEG4/ISO/SP" , CODEC_ID_MPEG4},
+ {"V_MPEG4/ISO/ASP" , CODEC_ID_MPEG4},
+ {"V_MPEG4/ISO/AP" , CODEC_ID_MPEG4},
+ {"V_MPEG4/ISO/AVC" , CODEC_ID_H264},
+ {"V_MPEG4/MS/V3" , CODEC_ID_MSMPEG4V3},
+ {"V_MPEG1" , CODEC_ID_MPEG1VIDEO},
+ {"V_MPEG2" , CODEC_ID_MPEG2VIDEO},
+ {"V_MJPEG" , CODEC_ID_MJPEG},
+ {"V_REAL/RV10" , CODEC_ID_RV10},
+ {"V_REAL/RV20" , CODEC_ID_RV20},
+ {"V_REAL/RV30" , CODEC_ID_RV30},
+ {"V_REAL/RV40" , CODEC_ID_RV40},
+/* TODO: Real/Quicktime */
+
+// {"A_MS/ACM" , CODEC_ID_NONE},
+ {"A_MPEG/L1" , CODEC_ID_MP3},
+ {"A_MPEG/L2" , CODEC_ID_MP3},
+ {"A_MPEG/L3" , CODEC_ID_MP3},
+ {"A_PCM/INT/BIG" , CODEC_ID_PCM_U16BE},
+ {"A_PCM/INT/LIT" , CODEC_ID_PCM_U16LE},
+// {"A_PCM/FLOAT/IEEE" , CODEC_ID_NONE},
+ {"A_AC3" , CODEC_ID_AC3},
+ {"A_DTS" , CODEC_ID_DTS},
+ {"A_VORBIS" , CODEC_ID_VORBIS},
+ {"A_AAC" , CODEC_ID_AAC},
+ {"A_FLAC" , CODEC_ID_FLAC},
+ {"A_WAVPACK4" , CODEC_ID_WAVPACK},
+ {"A_TTA1" , CODEC_ID_TTA},
+ {NULL , CODEC_ID_NONE}
+/* TODO: AC3-9/10 (?), Real, Musepack, Quicktime */
+};
+
+/* max. depth in the EBML tree structure */
+#define EBML_MAX_DEPTH 16
+
+typedef struct Track {
+ MatroskaTrackType type;
+
+ /* Unique track number and track ID. stream_index is the index that
+ * the calling app uses for this track. */
+ uint32_t num,
+ uid,
+ stream_index;
+
+ char *name,
+ *language;
+
+ char *codec_id,
+ *codec_name;
+
+ unsigned char *codec_priv;
+ int codec_priv_size;
+
+ int64_t default_duration;
+ MatroskaTrackFlags flags;
+} MatroskaTrack;
+
+typedef struct MatroskaVideoTrack {
+ MatroskaTrack track;
+
+ int pixel_width,
+ pixel_height,
+ display_width,
+ display_height;
+
+ uint32_t fourcc;
+
+ MatroskaAspectRatioMode ar_mode;
+ MatroskaEyeMode eye_mode;
+
+ //..
+} MatroskaVideoTrack;
+
+typedef struct MatroskaAudioTrack {
+ MatroskaTrack track;
+
+ int channels,
+ bitdepth,
+ internal_samplerate,
+ samplerate;
+ //..
+} MatroskaAudioTrack;
+
+typedef struct MatroskaSubtitleTrack {
+ MatroskaTrack track;
+
+ //..
+} MatroskaSubtitleTrack;
+
+typedef struct MatroskaLevel {
+ uint64_t start, length;
+} MatroskaLevel;
+
+typedef struct MatroskaDemuxIndex {
+ uint64_t pos; /* of the corresponding *cluster*! */
+ uint16_t track; /* reference to 'num' */
+ uint64_t time; /* in nanoseconds */
+} MatroskaDemuxIndex;
+
+typedef struct MatroskaDemuxContext {
+ AVFormatContext *ctx;
+
+ /* ebml stuff */
+ int num_levels;
+ MatroskaLevel levels[EBML_MAX_DEPTH];
+ int level_up;
+
+ /* matroska stuff */
+ char *writing_app,
+ *muxing_app;
+ int64_t created;
+
+ /* timescale in the file */
+ int64_t time_scale;
+
+ /* position (time, ns) */
+ int64_t pos;
+
+ /* num_streams is the number of streams that av_new_stream() was called
+ * for ( = that are available to the calling program). */
+ int num_tracks, num_streams;
+ MatroskaTrack *tracks[MAX_STREAMS];
+
+ /* cache for ID peeking */
+ uint32_t peek_id;
+
+ /* byte position of the segment inside the stream */
+ offset_t segment_start;
+
+ /* The packet queue. */
+ AVPacket **packets;
+ int num_packets;
+
+ /* have we already parse metadata/cues/clusters? */
+ int metadata_parsed,
+ index_parsed,
+ done;
+
+ /* The index for seeking. */
+ int num_indexes;
+ MatroskaDemuxIndex *index;
+} MatroskaDemuxContext;
+
+/*
+ * The first few functions handle EBML file parsing. The rest
+ * is the document interpretation. Matroska really just is a
+ * EBML file.
+ */
+
+/*
+ * Return: the amount of levels in the hierarchy that the
+ * current element lies higher than the previous one.
+ * The opposite isn't done - that's auto-done using master
+ * element reading.
+ */
+
+static int
+ebml_read_element_level_up (MatroskaDemuxContext *matroska)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ offset_t pos = url_ftell(pb);
+ int num = 0;
+
+ while (matroska->num_levels > 0) {
+ MatroskaLevel *level = &matroska->levels[matroska->num_levels - 1];
+
+ if (pos >= level->start + level->length) {
+ matroska->num_levels--;
+ num++;
+ } else {
+ break;
+ }
+ }
+
+ return num;
+}
+
+/*
+ * Read: an "EBML number", which is defined as a variable-length
+ * array of bytes. The first byte indicates the length by giving a
+ * number of 0-bits followed by a one. The position of the first
+ * "one" bit inside the first byte indicates the length of this
+ * number.
+ * Returns: num. of bytes read. < 0 on error.
+ */
+
+static int
+ebml_read_num (MatroskaDemuxContext *matroska,
+ int max_size,
+ uint64_t *number)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int len_mask = 0x80, read = 1, n = 1;
+ int64_t total = 0;
+
+ /* the first byte tells us the length in bytes - get_byte() can normally
+ * return 0, but since that's not a valid first ebmlID byte, we can
+ * use it safely here to catch EOS. */
+ if (!(total = get_byte(pb))) {
+ /* we might encounter EOS here */
+ if (!url_feof(pb)) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Read error at pos. %"PRIu64" (0x%"PRIx64")\n",
+ pos, pos);
+ }
+ return AVERROR_IO; /* EOS or actual I/O error */
+ }
+
+ /* get the length of the EBML number */
+ while (read <= max_size && !(total & len_mask)) {
+ read++;
+ len_mask >>= 1;
+ }
+ if (read > max_size) {
+ offset_t pos = url_ftell(pb) - 1;
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid EBML number size tag 0x%02x at pos %"PRIu64" (0x%"PRIx64")\n",
+ (uint8_t) total, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* read out length */
+ total &= ~len_mask;
+ while (n++ < read)
+ total = (total << 8) | get_byte(pb);
+
+ *number = total;
+
+ return read;
+}
+
+/*
+ * Read: the element content data ID.
+ * Return: the number of bytes read or < 0 on error.
+ */
+
+static int
+ebml_read_element_id (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ int *level_up)
+{
+ int read;
+ uint64_t total;
+
+ /* if we re-call this, use our cached ID */
+ if (matroska->peek_id != 0) {
+ if (level_up)
+ *level_up = 0;
+ *id = matroska->peek_id;
+ return 0;
+ }
+
+ /* read out the "EBML number", include tag in ID */
+ if ((read = ebml_read_num(matroska, 4, &total)) < 0)
+ return read;
+ *id = matroska->peek_id = total | (1 << (read * 7));
+
+ /* level tracking */
+ if (level_up)
+ *level_up = ebml_read_element_level_up(matroska);
+
+ return read;
+}
+
+/*
+ * Read: element content length.
+ * Return: the number of bytes read or < 0 on error.
+ */
+
+static int
+ebml_read_element_length (MatroskaDemuxContext *matroska,
+ uint64_t *length)
+{
+ /* clear cache since we're now beyond that data point */
+ matroska->peek_id = 0;
+
+ /* read out the "EBML number", include tag in ID */
+ return ebml_read_num(matroska, 8, length);
+}
+
+/*
+ * Return: the ID of the next element, or 0 on error.
+ * Level_up contains the amount of levels that this
+ * next element lies higher than the previous one.
+ */
+
+static uint32_t
+ebml_peek_id (MatroskaDemuxContext *matroska,
+ int *level_up)
+{
+ uint32_t id;
+
+ assert(level_up != NULL);
+
+ if (ebml_read_element_id(matroska, &id, level_up) < 0)
+ return 0;
+
+ return id;
+}
+
+/*
+ * Seek to a given offset.
+ * 0 is success, -1 is failure.
+ */
+
+static int
+ebml_read_seek (MatroskaDemuxContext *matroska,
+ offset_t offset)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+
+ /* clear ID cache, if any */
+ matroska->peek_id = 0;
+
+ return (url_fseek(pb, offset, SEEK_SET) == offset) ? 0 : -1;
+}
+
+/*
+ * Skip the next element.
+ * 0 is success, -1 is failure.
+ */
+
+static int
+ebml_read_skip (MatroskaDemuxContext *matroska)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ uint32_t id;
+ uint64_t length;
+ int res;
+
+ if ((res = ebml_read_element_id(matroska, &id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &length)) < 0)
+ return res;
+
+ url_fskip(pb, length);
+
+ return 0;
+}
+
+/*
+ * Read the next element as an unsigned int.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_uint (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ uint64_t *num)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int n = 0, size, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+ if (size < 1 || size > 8) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid uint element size %d at position %"PRId64" (0x%"PRIx64")\n",
+ size, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* big-endian ordening; build up number */
+ *num = 0;
+ while (n++ < size)
+ *num = (*num << 8) | get_byte(pb);
+
+ return 0;
+}
+
+/*
+ * Read the next element as a signed int.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_sint (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ int64_t *num)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int size, n = 1, negative = 0, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+ if (size < 1 || size > 8) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid sint element size %d at position %"PRId64" (0x%"PRIx64")\n",
+ size, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+ if ((*num = get_byte(pb)) & 0x80) {
+ negative = 1;
+ *num &= ~0x80;
+ }
+ *num = 0;
+ while (n++ < size)
+ *num = (*num << 8) | get_byte(pb);
+
+ /* make signed */
+ if (negative)
+ *num = *num - (1LL << ((8 * size) - 1));
+
+ return 0;
+}
+
+/*
+ * Read the next element as a float.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_float (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ double *num)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int size, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+
+ if (size == 4) {
+ *num= av_int2flt(get_be32(pb));
+ } else if(size==8){
+ *num= av_int2dbl(get_be64(pb));
+ } else{
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid float element size %d at position %"PRIu64" (0x%"PRIx64")\n",
+ size, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+/*
+ * Read the next element as an ASCII string.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_ascii (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ char **str)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int size, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+
+ /* ebml strings are usually not 0-terminated, so we allocate one
+ * byte more, read the string and NULL-terminate it ourselves. */
+ if (size < 0 || !(*str = av_malloc(size + 1))) {
+ av_log(matroska->ctx, AV_LOG_ERROR, "Memory allocation failed\n");
+ return AVERROR_NOMEM;
+ }
+ if (get_buffer(pb, (uint8_t *) *str, size) != size) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Read error at pos. %"PRIu64" (0x%"PRIx64")\n", pos, pos);
+ return AVERROR_IO;
+ }
+ (*str)[size] = '\0';
+
+ return 0;
+}
+
+/*
+ * Read the next element as a UTF-8 string.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_utf8 (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ char **str)
+{
+ return ebml_read_ascii(matroska, id, str);
+}
+
+/*
+ * Read the next element as a date (nanoseconds since 1/1/2000).
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_date (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ int64_t *date)
+{
+ return ebml_read_sint(matroska, id, date);
+}
+
+/*
+ * Read the next element, but only the header. The contents
+ * are supposed to be sub-elements which can be read separately.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_master (MatroskaDemuxContext *matroska,
+ uint32_t *id)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ uint64_t length;
+ MatroskaLevel *level;
+ int res;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &length)) < 0)
+ return res;
+
+ /* protect... (Heaven forbids that the '>' is true) */
+ if (matroska->num_levels >= EBML_MAX_DEPTH) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "File moves beyond max. allowed depth (%d)\n", EBML_MAX_DEPTH);
+ return AVERROR_NOTSUPP;
+ }
+
+ /* remember level */
+ level = &matroska->levels[matroska->num_levels++];
+ level->start = url_ftell(pb);
+ level->length = length;
+
+ return 0;
+}
+
+/*
+ * Read the next element as binary data.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_binary (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ uint8_t **binary,
+ int *size)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ uint64_t rlength;
+ int res;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ *size = rlength;
+
+ if (!(*binary = av_malloc(*size))) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Memory allocation error\n");
+ return AVERROR_NOMEM;
+ }
+
+ if (get_buffer(pb, *binary, *size) != *size) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Read error at pos. %"PRIu64" (0x%"PRIx64")\n", pos, pos);
+ return AVERROR_IO;
+ }
+
+ return 0;
+}
+
+/*
+ * Read signed/unsigned "EBML" numbers.
+ * Return: number of bytes processed, < 0 on error.
+ * XXX: use ebml_read_num().
+ */
+
+static int
+matroska_ebmlnum_uint (uint8_t *data,
+ uint32_t size,
+ uint64_t *num)
+{
+ int len_mask = 0x80, read = 1, n = 1, num_ffs = 0;
+ uint64_t total;
+
+ if (size <= 0)
+ return AVERROR_INVALIDDATA;
+
+ total = data[0];
+ while (read <= 8 && !(total & len_mask)) {
+ read++;
+ len_mask >>= 1;
+ }
+ if (read > 8)
+ return AVERROR_INVALIDDATA;
+
+ if ((total &= (len_mask - 1)) == len_mask - 1)
+ num_ffs++;
+ if (size < read)
+ return AVERROR_INVALIDDATA;
+ while (n < read) {
+ if (data[n] == 0xff)
+ num_ffs++;
+ total = (total << 8) | data[n];
+ n++;
+ }
+
+ if (read == num_ffs)
+ *num = (uint64_t)-1;
+ else
+ *num = total;
+
+ return read;
+}
+
+/*
+ * Same as above, but signed.
+ */
+
+static int
+matroska_ebmlnum_sint (uint8_t *data,
+ uint32_t size,
+ int64_t *num)
+{
+ uint64_t unum;
+ int res;
+
+ /* read as unsigned number first */
+ if ((res = matroska_ebmlnum_uint(data, size, &unum)) < 0)
+ return res;
+
+ /* make signed (weird way) */
+ if (unum == (uint64_t)-1)
+ *num = INT64_MAX;
+ else
+ *num = unum - ((1LL << ((7 * res) - 1)) - 1);
+
+ return res;
+}
+
+/*
+ * Read an EBML header.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_header (MatroskaDemuxContext *matroska,
+ char **doctype,
+ int *version)
+{
+ uint32_t id;
+ int level_up, res = 0;
+
+ /* default init */
+ if (doctype)
+ *doctype = NULL;
+ if (version)
+ *version = 1;
+
+ if (!(id = ebml_peek_id(matroska, &level_up)) ||
+ level_up != 0 || id != EBML_ID_HEADER) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "This is not an EBML file (id=0x%x/0x%x)\n", id, EBML_ID_HEADER);
+ return AVERROR_INVALIDDATA;
+ }
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ return res;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &level_up)))
+ return AVERROR_IO;
+
+ /* end-of-header */
+ if (level_up)
+ break;
+
+ switch (id) {
+ /* is our read version uptodate? */
+ case EBML_ID_EBMLREADVERSION: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (num > EBML_VERSION) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "EBML version %"PRIu64" (> %d) is not supported\n",
+ num, EBML_VERSION);
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ /* we only handle 8 byte lengths at max */
+ case EBML_ID_EBMLMAXSIZELENGTH: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (num > sizeof(uint64_t)) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Integers of size %"PRIu64" (> %zd) not supported\n",
+ num, sizeof(uint64_t));
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ /* we handle 4 byte IDs at max */
+ case EBML_ID_EBMLMAXIDLENGTH: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (num > sizeof(uint32_t)) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "IDs of size %"PRIu64" (> %zu) not supported\n",
+ num, sizeof(uint32_t));
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ case EBML_ID_DOCTYPE: {
+ char *text;
+
+ if ((res = ebml_read_ascii(matroska, &id, &text)) < 0)
+ return res;
+ if (doctype) {
+ if (*doctype)
+ av_free(*doctype);
+ *doctype = text;
+ } else
+ av_free(text);
+ break;
+ }
+
+ case EBML_ID_DOCTYPEREADVERSION: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (version)
+ *version = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown data type 0x%x in EBML header", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ /* we ignore these two, as they don't tell us anything we
+ * care about */
+ case EBML_ID_EBMLVERSION:
+ case EBML_ID_DOCTYPEVERSION:
+ res = ebml_read_skip (matroska);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Put one packet in an application-supplied AVPacket struct.
+ * Returns 0 on success or -1 on failure.
+ */
+
+static int
+matroska_deliver_packet (MatroskaDemuxContext *matroska,
+ AVPacket *pkt)
+{
+ if (matroska->num_packets > 0) {
+ memcpy(pkt, matroska->packets[0], sizeof(AVPacket));
+ av_free(matroska->packets[0]);
+ if (matroska->num_packets > 1) {
+ memmove(&matroska->packets[0], &matroska->packets[1],
+ (matroska->num_packets - 1) * sizeof(AVPacket *));
+ matroska->packets =
+ av_realloc(matroska->packets, (matroska->num_packets - 1) *
+ sizeof(AVPacket *));
+ } else {
+ av_freep(&matroska->packets);
+ }
+ matroska->num_packets--;
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * Put a packet into our internal queue. Will be delivered to the
+ * user/application during the next get_packet() call.
+ */
+
+static void
+matroska_queue_packet (MatroskaDemuxContext *matroska,
+ AVPacket *pkt)
+{
+ matroska->packets =
+ av_realloc(matroska->packets, (matroska->num_packets + 1) *
+ sizeof(AVPacket *));
+ matroska->packets[matroska->num_packets] = pkt;
+ matroska->num_packets++;
+}
+
+/*
+ * Autodetecting...
+ */
+
+static int
+matroska_probe (AVProbeData *p)
+{
+ uint64_t total = 0;
+ int len_mask = 0x80, size = 1, n = 1;
+ uint8_t probe_data[] = { 'm', 'a', 't', 'r', 'o', 's', 'k', 'a' };
+
+ if (p->buf_size < 5)
+ return 0;
+
+ /* ebml header? */
+ if ((p->buf[0] << 24 | p->buf[1] << 16 |
+ p->buf[2] << 8 | p->buf[3]) != EBML_ID_HEADER)
+ return 0;
+
+ /* length of header */
+ total = p->buf[4];
+ while (size <= 8 && !(total & len_mask)) {
+ size++;
+ len_mask >>= 1;
+ }
+ if (size > 8)
+ return 0;
+ total &= (len_mask - 1);
+ while (n < size)
+ total = (total << 8) | p->buf[4 + n++];
+
+ /* does the probe data contain the whole header? */
+ if (p->buf_size < 4 + size + total)
+ return 0;
+
+ /* the header must contain the document type 'matroska'. For now,
+ * we don't parse the whole header but simply check for the
+ * availability of that array of characters inside the header.
+ * Not fully fool-proof, but good enough. */
+ for (n = 4 + size; n < 4 + size + total - sizeof(probe_data); n++)
+ if (!memcmp (&p->buf[n], probe_data, sizeof(probe_data)))
+ return AVPROBE_SCORE_MAX;
+
+ return 0;
+}
+
+/*
+ * From here on, it's all XML-style DTD stuff... Needs no comments.
+ */
+
+static int
+matroska_parse_info (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "Parsing info...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* cluster timecode */
+ case MATROSKA_ID_TIMECODESCALE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ matroska->time_scale = num;
+ break;
+ }
+
+ case MATROSKA_ID_DURATION: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id, &num)) < 0)
+ break;
+ matroska->ctx->duration = num * matroska->time_scale * 1000 / AV_TIME_BASE;
+ break;
+ }
+
+ case MATROSKA_ID_TITLE: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ strncpy(matroska->ctx->title, text,
+ sizeof(matroska->ctx->title)-1);
+ av_free(text);
+ break;
+ }
+
+ case MATROSKA_ID_WRITINGAPP: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ matroska->writing_app = text;
+ break;
+ }
+
+ case MATROSKA_ID_MUXINGAPP: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ matroska->muxing_app = text;
+ break;
+ }
+
+ case MATROSKA_ID_DATEUTC: {
+ int64_t time;
+ if ((res = ebml_read_date(matroska, &id, &time)) < 0)
+ break;
+ matroska->created = time;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in info header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_add_stream (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+ MatroskaTrack *track;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing track, adding stream..,\n");
+
+ /* Allocate a generic track. As soon as we know its type we'll realloc. */
+ track = av_mallocz(sizeof(MatroskaTrack));
+ matroska->num_tracks++;
+
+ /* start with the master */
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ return res;
+
+ /* try reading the trackentry headers */
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up > 0) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* track number (unique stream ID) */
+ case MATROSKA_ID_TRACKNUMBER: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->num = num;
+ break;
+ }
+
+ /* track UID (unique identifier) */
+ case MATROSKA_ID_TRACKUID: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->uid = num;
+ break;
+ }
+
+ /* track type (video, audio, combined, subtitle, etc.) */
+ case MATROSKA_ID_TRACKTYPE: {
+ uint64_t num;
+ if (track->type != 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "More than one tracktype in an entry - skip\n");
+ break;
+ }
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->type = num;
+
+ /* ok, so we're actually going to reallocate this thing */
+ switch (track->type) {
+ case MATROSKA_TRACK_TYPE_VIDEO:
+ track = (MatroskaTrack *)
+ av_realloc(track, sizeof(MatroskaVideoTrack));
+ break;
+ case MATROSKA_TRACK_TYPE_AUDIO:
+ track = (MatroskaTrack *)
+ av_realloc(track, sizeof(MatroskaAudioTrack));
+ ((MatroskaAudioTrack *)track)->channels = 1;
+ ((MatroskaAudioTrack *)track)->samplerate = 8000;
+ break;
+ case MATROSKA_TRACK_TYPE_SUBTITLE:
+ track = (MatroskaTrack *)
+ av_realloc(track, sizeof(MatroskaSubtitleTrack));
+ break;
+ case MATROSKA_TRACK_TYPE_COMPLEX:
+ case MATROSKA_TRACK_TYPE_LOGO:
+ case MATROSKA_TRACK_TYPE_CONTROL:
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown or unsupported track type 0x%x\n",
+ track->type);
+ track->type = 0;
+ break;
+ }
+ matroska->tracks[matroska->num_tracks - 1] = track;
+ break;
+ }
+
+ /* tracktype specific stuff for video */
+ case MATROSKA_ID_TRACKVIDEO: {
+ MatroskaVideoTrack *videotrack;
+ if (track->type != MATROSKA_TRACK_TYPE_VIDEO) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "video data in non-video track - ignoring\n");
+ res = AVERROR_INVALIDDATA;
+ break;
+ } else if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ videotrack = (MatroskaVideoTrack *)track;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up > 0) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* fixme, this should be one-up, but I get it here */
+ case MATROSKA_ID_TRACKDEFAULTDURATION: {
+ uint64_t num;
+ if ((res = ebml_read_uint (matroska, &id,
+ &num)) < 0)
+ break;
+ track->default_duration = num;
+ break;
+ }
+
+ /* video framerate */
+ case MATROSKA_ID_VIDEOFRAMERATE: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id,
+ &num)) < 0)
+ break;
+ track->default_duration = 1000000000 * (1. / num);
+ break;
+ }
+
+ /* width of the size to display the video at */
+ case MATROSKA_ID_VIDEODISPLAYWIDTH: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->display_width = num;
+ break;
+ }
+
+ /* height of the size to display the video at */
+ case MATROSKA_ID_VIDEODISPLAYHEIGHT: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->display_height = num;
+ break;
+ }
+
+ /* width of the video in the file */
+ case MATROSKA_ID_VIDEOPIXELWIDTH: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->pixel_width = num;
+ break;
+ }
+
+ /* height of the video in the file */
+ case MATROSKA_ID_VIDEOPIXELHEIGHT: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->pixel_height = num;
+ break;
+ }
+
+ /* whether the video is interlaced */
+ case MATROSKA_ID_VIDEOFLAGINTERLACED: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ if (num)
+ track->flags |=
+ MATROSKA_VIDEOTRACK_INTERLACED;
+ else
+ track->flags &=
+ ~MATROSKA_VIDEOTRACK_INTERLACED;
+ break;
+ }
+
+ /* stereo mode (whether the video has two streams,
+ * where one is for the left eye and the other for
+ * the right eye, which creates a 3D-like
+ * effect) */
+ case MATROSKA_ID_VIDEOSTEREOMODE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ if (num != MATROSKA_EYE_MODE_MONO &&
+ num != MATROSKA_EYE_MODE_LEFT &&
+ num != MATROSKA_EYE_MODE_RIGHT &&
+ num != MATROSKA_EYE_MODE_BOTH) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Ignoring unknown eye mode 0x%x\n",
+ (uint32_t) num);
+ break;
+ }
+ videotrack->eye_mode = num;
+ break;
+ }
+
+ /* aspect ratio behaviour */
+ case MATROSKA_ID_VIDEOASPECTRATIO: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ if (num != MATROSKA_ASPECT_RATIO_MODE_FREE &&
+ num != MATROSKA_ASPECT_RATIO_MODE_KEEP &&
+ num != MATROSKA_ASPECT_RATIO_MODE_FIXED) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Ignoring unknown aspect ratio 0x%x\n",
+ (uint32_t) num);
+ break;
+ }
+ videotrack->ar_mode = num;
+ break;
+ }
+
+ /* colourspace (only matters for raw video)
+ * fourcc */
+ case MATROSKA_ID_VIDEOCOLOURSPACE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->fourcc = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown video track header entry "
+ "0x%x - ignoring\n", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* tracktype specific stuff for audio */
+ case MATROSKA_ID_TRACKAUDIO: {
+ MatroskaAudioTrack *audiotrack;
+ if (track->type != MATROSKA_TRACK_TYPE_AUDIO) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "audio data in non-audio track - ignoring\n");
+ res = AVERROR_INVALIDDATA;
+ break;
+ } else if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ audiotrack = (MatroskaAudioTrack *)track;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up > 0) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* samplerate */
+ case MATROSKA_ID_AUDIOSAMPLINGFREQ: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->internal_samplerate =
+ audiotrack->samplerate = num;
+ break;
+ }
+
+ case MATROSKA_ID_AUDIOOUTSAMPLINGFREQ: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->samplerate = num;
+ break;
+ }
+
+ /* bitdepth */
+ case MATROSKA_ID_AUDIOBITDEPTH: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->bitdepth = num;
+ break;
+ }
+
+ /* channels */
+ case MATROSKA_ID_AUDIOCHANNELS: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->channels = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown audio track header entry "
+ "0x%x - ignoring\n", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* codec identifier */
+ case MATROSKA_ID_CODECID: {
+ char *text;
+ if ((res = ebml_read_ascii(matroska, &id, &text)) < 0)
+ break;
+ track->codec_id = text;
+ break;
+ }
+
+ /* codec private data */
+ case MATROSKA_ID_CODECPRIVATE: {
+ uint8_t *data;
+ int size;
+ if ((res = ebml_read_binary(matroska, &id, &data, &size) < 0))
+ break;
+ track->codec_priv = data;
+ track->codec_priv_size = size;
+ break;
+ }
+
+ /* name of the codec */
+ case MATROSKA_ID_CODECNAME: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ track->codec_name = text;
+ break;
+ }
+
+ /* name of this track */
+ case MATROSKA_ID_TRACKNAME: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ track->name = text;
+ break;
+ }
+
+ /* language (matters for audio/subtitles, mostly) */
+ case MATROSKA_ID_TRACKLANGUAGE: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ track->language = text;
+ break;
+ }
+
+ /* whether this is actually used */
+ case MATROSKA_ID_TRACKFLAGENABLED: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (num)
+ track->flags |= MATROSKA_TRACK_ENABLED;
+ else
+ track->flags &= ~MATROSKA_TRACK_ENABLED;
+ break;
+ }
+
+ /* whether it's the default for this track type */
+ case MATROSKA_ID_TRACKFLAGDEFAULT: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (num)
+ track->flags |= MATROSKA_TRACK_DEFAULT;
+ else
+ track->flags &= ~MATROSKA_TRACK_DEFAULT;
+ break;
+ }
+
+ /* lacing (like MPEG, where blocks don't end/start on frame
+ * boundaries) */
+ case MATROSKA_ID_TRACKFLAGLACING: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (num)
+ track->flags |= MATROSKA_TRACK_LACING;
+ else
+ track->flags &= ~MATROSKA_TRACK_LACING;
+ break;
+ }
+
+ /* default length (in time) of one data block in this track */
+ case MATROSKA_ID_TRACKDEFAULTDURATION: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->default_duration = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown track header entry 0x%x - ignoring\n", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ /* we ignore these because they're nothing useful. */
+ case MATROSKA_ID_CODECINFOURL:
+ case MATROSKA_ID_CODECDOWNLOADURL:
+ case MATROSKA_ID_TRACKMINCACHE:
+ case MATROSKA_ID_TRACKMAXCACHE:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_tracks (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing tracks...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one track within the "all-tracks" header */
+ case MATROSKA_ID_TRACKENTRY:
+ res = matroska_add_stream(matroska);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in track header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_index (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+ MatroskaDemuxIndex idx;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing index...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one single index entry ('point') */
+ case MATROSKA_ID_POINTENTRY:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+
+ /* in the end, we hope to fill one entry with a
+ * timestamp, a file position and a tracknum */
+ idx.pos = (uint64_t) -1;
+ idx.time = (uint64_t) -1;
+ idx.track = (uint16_t) -1;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one single index entry ('point') */
+ case MATROSKA_ID_CUETIME: {
+ uint64_t time;
+ if ((res = ebml_read_uint(matroska, &id,
+ &time)) < 0)
+ break;
+ idx.time = time * matroska->time_scale;
+ break;
+ }
+
+ /* position in the file + track to which it
+ * belongs */
+ case MATROSKA_ID_CUETRACKPOSITION:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id (matroska,
+ &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* track number */
+ case MATROSKA_ID_CUETRACK: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska,
+ &id, &num)) < 0)
+ break;
+ idx.track = num;
+ break;
+ }
+
+ /* position in file */
+ case MATROSKA_ID_CUECLUSTERPOSITION: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska,
+ &id, &num)) < 0)
+ break;
+ idx.pos = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in "
+ "CuesTrackPositions\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in cuespoint "
+ "index\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ /* so let's see if we got what we wanted */
+ if (idx.pos != (uint64_t) -1 &&
+ idx.time != (uint64_t) -1 &&
+ idx.track != (uint16_t) -1) {
+ if (matroska->num_indexes % 32 == 0) {
+ /* re-allocate bigger index */
+ matroska->index =
+ av_realloc(matroska->index,
+ (matroska->num_indexes + 32) *
+ sizeof(MatroskaDemuxIndex));
+ }
+ matroska->index[matroska->num_indexes] = idx;
+ matroska->num_indexes++;
+ }
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in cues header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_metadata (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* Hm, this is unsupported... */
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in metadata header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_seekhead (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing seekhead...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ case MATROSKA_ID_SEEKENTRY: {
+ uint32_t seek_id = 0, peek_id_cache = 0;
+ uint64_t seek_pos = (uint64_t) -1, t;
+
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ case MATROSKA_ID_SEEKID:
+ res = ebml_read_uint(matroska, &id, &t);
+ seek_id = t;
+ break;
+
+ case MATROSKA_ID_SEEKPOSITION:
+ res = ebml_read_uint(matroska, &id, &seek_pos);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown seekhead ID 0x%x\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ if (!seek_id || seek_pos == (uint64_t) -1) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Incomplete seekhead entry (0x%x/%"PRIu64")\n",
+ seek_id, seek_pos);
+ break;
+ }
+
+ switch (seek_id) {
+ case MATROSKA_ID_CUES:
+ case MATROSKA_ID_TAGS: {
+ uint32_t level_up = matroska->level_up;
+ offset_t before_pos;
+ uint64_t length;
+ MatroskaLevel level;
+
+ /* remember the peeked ID and the current position */
+ peek_id_cache = matroska->peek_id;
+ before_pos = url_ftell(&matroska->ctx->pb);
+
+ /* seek */
+ if ((res = ebml_read_seek(matroska, seek_pos +
+ matroska->segment_start)) < 0)
+ return res;
+
+ /* we don't want to lose our seekhead level, so we add
+ * a dummy. This is a crude hack. */
+ if (matroska->num_levels == EBML_MAX_DEPTH) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Max EBML element depth (%d) reached, "
+ "cannot parse further.\n", EBML_MAX_DEPTH);
+ return AVERROR_UNKNOWN;
+ }
+
+ level.start = 0;
+ level.length = (uint64_t)-1;
+ matroska->levels[matroska->num_levels] = level;
+ matroska->num_levels++;
+
+ /* check ID */
+ if (!(id = ebml_peek_id (matroska,
+ &matroska->level_up)))
+ goto finish;
+ if (id != seek_id) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "We looked for ID=0x%x but got "
+ "ID=0x%x (pos=%"PRIu64")",
+ seek_id, id, seek_pos +
+ matroska->segment_start);
+ goto finish;
+ }
+
+ /* read master + parse */
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ goto finish;
+ switch (id) {
+ case MATROSKA_ID_CUES:
+ if (!(res = matroska_parse_index(matroska)) ||
+ url_feof(&matroska->ctx->pb)) {
+ matroska->index_parsed = 1;
+ res = 0;
+ }
+ break;
+ case MATROSKA_ID_TAGS:
+ if (!(res = matroska_parse_metadata(matroska)) ||
+ url_feof(&matroska->ctx->pb)) {
+ matroska->metadata_parsed = 1;
+ res = 0;
+ }
+ break;
+ }
+
+ finish:
+ /* remove dummy level */
+ while (matroska->num_levels) {
+ matroska->num_levels--;
+ length =
+ matroska->levels[matroska->num_levels].length;
+ if (length == (uint64_t)-1)
+ break;
+ }
+
+ /* seek back */
+ if ((res = ebml_read_seek(matroska, before_pos)) < 0)
+ return res;
+ matroska->peek_id = peek_id_cache;
+ matroska->level_up = level_up;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Ignoring seekhead entry for ID=0x%x\n",
+ seek_id);
+ break;
+ }
+
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown seekhead ID 0x%x\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(*x))
+
+static int
+matroska_aac_profile (char *codec_id)
+{
+ static const char *aac_profiles[] = {
+ "MAIN", "LC", "SSR"
+ };
+ int profile;
+
+ for (profile=0; profile<ARRAY_SIZE(aac_profiles); profile++)
+ if (strstr(codec_id, aac_profiles[profile]))
+ break;
+ return profile + 1;
+}
+
+static int
+matroska_aac_sri (int samplerate)
+{
+ static const int aac_sample_rates[] = {
+ 96000, 88200, 64000, 48000, 44100, 32000,
+ 24000, 22050, 16000, 12000, 11025, 8000,
+ };
+ int sri;
+
+ for (sri=0; sri<ARRAY_SIZE(aac_sample_rates); sri++)
+ if (aac_sample_rates[sri] == samplerate)
+ break;
+ return sri;
+}
+
+static int
+matroska_read_header (AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MatroskaDemuxContext *matroska = s->priv_data;
+ char *doctype;
+ int version, last_level, res = 0;
+ uint32_t id;
+
+ matroska->ctx = s;
+
+ /* First read the EBML header. */
+ doctype = NULL;
+ if ((res = ebml_read_header(matroska, &doctype, &version)) < 0)
+ return res;
+ if ((doctype == NULL) || strcmp(doctype, "matroska")) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Wrong EBML doctype ('%s' != 'matroska').\n",
+ doctype ? doctype : "(none)");
+ if (doctype)
+ av_free(doctype);
+ return AVERROR_NOFMT;
+ }
+ av_free(doctype);
+ if (version != 1) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Matroska demuxer version 1 too old for file version %d\n",
+ version);
+ return AVERROR_NOFMT;
+ }
+
+ /* The next thing is a segment. */
+ while (1) {
+ if (!(id = ebml_peek_id(matroska, &last_level)))
+ return AVERROR_IO;
+ if (id == MATROSKA_ID_SEGMENT)
+ break;
+
+ /* oi! */
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Expected a Segment ID (0x%x), but received 0x%x!\n",
+ MATROSKA_ID_SEGMENT, id);
+ if ((res = ebml_read_skip(matroska)) < 0)
+ return res;
+ }
+
+ /* We now have a Matroska segment.
+ * Seeks are from the beginning of the segment,
+ * after the segment ID/length. */
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ return res;
+ matroska->segment_start = url_ftell(&s->pb);
+
+ matroska->time_scale = 1000000;
+ /* we've found our segment, start reading the different contents in here */
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* stream info */
+ case MATROSKA_ID_INFO: {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_info(matroska);
+ break;
+ }
+
+ /* track info headers */
+ case MATROSKA_ID_TRACKS: {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_tracks(matroska);
+ break;
+ }
+
+ /* stream index */
+ case MATROSKA_ID_CUES: {
+ if (!matroska->index_parsed) {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_index(matroska);
+ } else
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ /* metadata */
+ case MATROSKA_ID_TAGS: {
+ if (!matroska->metadata_parsed) {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_metadata(matroska);
+ } else
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ /* file index (if seekable, seek to Cues/Tags to parse it) */
+ case MATROSKA_ID_SEEKHEAD: {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_seekhead(matroska);
+ break;
+ }
+
+ case MATROSKA_ID_CLUSTER: {
+ /* Do not read the master - this will be done in the next
+ * call to matroska_read_packet. */
+ res = 1;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown matroska file header ID 0x%x\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ /* Have we found a cluster? */
+ if (ebml_peek_id(matroska, NULL) == MATROSKA_ID_CLUSTER) {
+ int i, j;
+ MatroskaTrack *track;
+ AVStream *st;
+
+ for (i = 0; i < matroska->num_tracks; i++) {
+ enum CodecID codec_id = CODEC_ID_NONE;
+ uint8_t *extradata = NULL;
+ int extradata_size = 0;
+ int extradata_offset = 0;
+ track = matroska->tracks[i];
+
+ /* libavformat does not really support subtitles.
+ * Also apply some sanity checks. */
+ if ((track->type == MATROSKA_TRACK_TYPE_SUBTITLE) ||
+ (track->codec_id == NULL))
+ continue;
+
+ for(j=0; codec_tags[j].str; j++){
+ if(!strncmp(codec_tags[j].str, track->codec_id,
+ strlen(codec_tags[j].str))){
+ codec_id= codec_tags[j].id;
+ break;
+ }
+ }
+
+ /* Set the FourCC from the CodecID. */
+ /* This is the MS compatibility mode which stores a
+ * BITMAPINFOHEADER in the CodecPrivate. */
+ if (!strcmp(track->codec_id,
+ MATROSKA_CODEC_ID_VIDEO_VFW_FOURCC) &&
+ (track->codec_priv_size >= 40) &&
+ (track->codec_priv != NULL)) {
+ unsigned char *p;
+
+ /* Offset of biCompression. Stored in LE. */
+ p = (unsigned char *)track->codec_priv + 16;
+ ((MatroskaVideoTrack *)track)->fourcc = (p[3] << 24) |
+ (p[2] << 16) | (p[1] << 8) | p[0];
+ codec_id = codec_get_bmp_id(((MatroskaVideoTrack *)track)->fourcc);
+
+ }
+
+ /* This is the MS compatibility mode which stores a
+ * WAVEFORMATEX in the CodecPrivate. */
+ else if (!strcmp(track->codec_id,
+ MATROSKA_CODEC_ID_AUDIO_ACM) &&
+ (track->codec_priv_size >= 18) &&
+ (track->codec_priv != NULL)) {
+ unsigned char *p;
+ uint16_t tag;
+
+ /* Offset of wFormatTag. Stored in LE. */
+ p = (unsigned char *)track->codec_priv;
+ tag = (p[1] << 8) | p[0];
+ codec_id = codec_get_wav_id(tag);
+
+ }
+
+ else if (codec_id == CODEC_ID_AAC && !track->codec_priv_size) {
+ MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *) track;
+ int profile = matroska_aac_profile(track->codec_id);
+ int sri = matroska_aac_sri(audiotrack->internal_samplerate);
+ extradata = av_malloc(5);
+ if (extradata == NULL)
+ return AVERROR_NOMEM;
+ extradata[0] = (profile << 3) | ((sri&0x0E) >> 1);
+ extradata[1] = ((sri&0x01) << 7) | (audiotrack->channels<<3);
+ if (strstr(track->codec_id, "SBR")) {
+ sri = matroska_aac_sri(audiotrack->samplerate);
+ extradata[2] = 0x56;
+ extradata[3] = 0xE5;
+ extradata[4] = 0x80 | (sri<<3);
+ extradata_size = 5;
+ } else {
+ extradata_size = 2;
+ }
+ }
+
+ else if (codec_id == CODEC_ID_TTA) {
+ MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *) track;
+ ByteIOContext b;
+ extradata_size = 30;
+ extradata = av_mallocz(extradata_size);
+ if (extradata == NULL)
+ return AVERROR_NOMEM;
+ init_put_byte(&b, extradata, extradata_size, 1,
+ NULL, NULL, NULL, NULL);
+ put_buffer(&b, (uint8_t *) "TTA1", 4);
+ put_le16(&b, 1);
+ put_le16(&b, audiotrack->channels);
+ put_le16(&b, audiotrack->bitdepth);
+ put_le32(&b, audiotrack->samplerate);
+ put_le32(&b, matroska->ctx->duration * audiotrack->samplerate);
+ }
+
+ else if (codec_id == CODEC_ID_RV10 || codec_id == CODEC_ID_RV20 ||
+ codec_id == CODEC_ID_RV30 || codec_id == CODEC_ID_RV40) {
+ extradata_offset = 26;
+ track->codec_priv_size -= extradata_offset;
+ track->flags |= MATROSKA_TRACK_REAL_V;
+ }
+
+ if (codec_id == CODEC_ID_NONE) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown/unsupported CodecID %s.\n",
+ track->codec_id);
+ }
+
+ track->stream_index = matroska->num_streams;
+
+ matroska->num_streams++;
+ st = av_new_stream(s, track->stream_index);
+ if (st == NULL)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 64, matroska->time_scale, 1000*1000*1000); /* 64 bit pts in ns */
+
+ st->codec->codec_id = codec_id;
+
+ if (track->default_duration)
+ av_reduce(&st->codec->time_base.num, &st->codec->time_base.den,
+ track->default_duration, 1000000000, 30000);
+
+ if(extradata){
+ st->codec->extradata = extradata;
+ st->codec->extradata_size = extradata_size;
+ } else if(track->codec_priv && track->codec_priv_size > 0){
+ st->codec->extradata = av_malloc(track->codec_priv_size);
+ if(st->codec->extradata == NULL)
+ return AVERROR_NOMEM;
+ st->codec->extradata_size = track->codec_priv_size;
+ memcpy(st->codec->extradata,track->codec_priv+extradata_offset,
+ track->codec_priv_size);
+ }
+
+ if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
+ MatroskaVideoTrack *videotrack = (MatroskaVideoTrack *)track;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_tag = videotrack->fourcc;
+ st->codec->width = videotrack->pixel_width;
+ st->codec->height = videotrack->pixel_height;
+ if (videotrack->display_width == 0)
+ videotrack->display_width= videotrack->pixel_width;
+ if (videotrack->display_height == 0)
+ videotrack->display_height= videotrack->pixel_height;
+ av_reduce(&st->codec->sample_aspect_ratio.num,
+ &st->codec->sample_aspect_ratio.den,
+ st->codec->height * videotrack->display_width,
+ st->codec-> width * videotrack->display_height,
+ 255);
+ } else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
+ MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *)track;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->sample_rate = audiotrack->samplerate;
+ st->codec->channels = audiotrack->channels;
+ } else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) {
+ st->codec->codec_type = CODEC_TYPE_SUBTITLE;
+ }
+
+ /* What do we do with private data? E.g. for Vorbis. */
+ }
+ res = 0;
+ }
+
+ return res;
+}
+
+static int
+matroska_find_track_by_num (MatroskaDemuxContext *matroska,
+ int num)
+{
+ int i;
+
+ for (i = 0; i < matroska->num_tracks; i++)
+ if (matroska->tracks[i]->num == num)
+ return i;
+
+ return -1;
+}
+
+static inline int
+rv_offset(uint8_t *data, int slice, int slices)
+{
+ return LE_32(data+8*slice+4) + 8*slices;
+}
+
+static int
+matroska_parse_blockgroup (MatroskaDemuxContext *matroska,
+ uint64_t cluster_time)
+{
+ int res = 0;
+ uint32_t id;
+ AVPacket *pkt = NULL;
+ int is_keyframe = PKT_FLAG_KEY, last_num_packets = matroska->num_packets;
+ uint64_t duration = AV_NOPTS_VALUE;
+ int track = -1;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing blockgroup...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one block inside the group. Note, block parsing is one
+ * of the harder things, so this code is a bit complicated.
+ * See http://www.matroska.org/ for documentation. */
+ case MATROSKA_ID_BLOCK: {
+ uint8_t *data, *origdata;
+ int size;
+ int16_t block_time;
+ uint32_t *lace_size = NULL;
+ int n, flags, laces = 0;
+ uint64_t num;
+ int64_t pos= url_ftell(&matroska->ctx->pb);
+
+ if ((res = ebml_read_binary(matroska, &id, &data, &size)) < 0)
+ break;
+ origdata = data;
+
+ /* first byte(s): tracknum */
+ if ((n = matroska_ebmlnum_uint(data, size, &num)) < 0) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "EBML block data error\n");
+ av_free(origdata);
+ break;
+ }
+ data += n;
+ size -= n;
+
+ /* fetch track from num */
+ track = matroska_find_track_by_num(matroska, num);
+ if (size <= 3 || track < 0 || track >= matroska->num_tracks) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Invalid stream %d or size %u\n", track, size);
+ av_free(origdata);
+ break;
+ }
+ if(matroska->ctx->streams[ matroska->tracks[track]->stream_index ]->discard >= AVDISCARD_ALL){
+ av_free(origdata);
+ break;
+ }
+
+ /* block_time (relative to cluster time) */
+ block_time = (data[0] << 8) | data[1];
+ data += 2;
+ size -= 2;
+ flags = *data;
+ data += 1;
+ size -= 1;
+ switch ((flags & 0x06) >> 1) {
+ case 0x0: /* no lacing */
+ laces = 1;
+ lace_size = av_mallocz(sizeof(int));
+ lace_size[0] = size;
+ break;
+
+ case 0x1: /* xiph lacing */
+ case 0x2: /* fixed-size lacing */
+ case 0x3: /* EBML lacing */
+ if (size == 0) {
+ res = -1;
+ break;
+ }
+ laces = (*data) + 1;
+ data += 1;
+ size -= 1;
+ lace_size = av_mallocz(laces * sizeof(int));
+
+ switch ((flags & 0x06) >> 1) {
+ case 0x1: /* xiph lacing */ {
+ uint8_t temp;
+ uint32_t total = 0;
+ for (n = 0; res == 0 && n < laces - 1; n++) {
+ while (1) {
+ if (size == 0) {
+ res = -1;
+ break;
+ }
+ temp = *data;
+ lace_size[n] += temp;
+ data += 1;
+ size -= 1;
+ if (temp != 0xff)
+ break;
+ }
+ total += lace_size[n];
+ }
+ lace_size[n] = size - total;
+ break;
+ }
+
+ case 0x2: /* fixed-size lacing */
+ for (n = 0; n < laces; n++)
+ lace_size[n] = size / laces;
+ break;
+
+ case 0x3: /* EBML lacing */ {
+ uint32_t total;
+ n = matroska_ebmlnum_uint(data, size, &num);
+ if (n < 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "EBML block data error\n");
+ break;
+ }
+ data += n;
+ size -= n;
+ total = lace_size[0] = num;
+ for (n = 1; res == 0 && n < laces - 1; n++) {
+ int64_t snum;
+ int r;
+ r = matroska_ebmlnum_sint (data, size,
+ &snum);
+ if (r < 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "EBML block data error\n");
+ break;
+ }
+ data += r;
+ size -= r;
+ lace_size[n] = lace_size[n - 1] + snum;
+ total += lace_size[n];
+ }
+ lace_size[n] = size - total;
+ break;
+ }
+ }
+ break;
+ }
+
+ if (res == 0) {
+ int real_v = matroska->tracks[track]->flags & MATROSKA_TRACK_REAL_V;
+ for (n = 0; n < laces; n++) {
+ uint64_t timecode = AV_NOPTS_VALUE;
+ int slice, slices = 1;
+
+ if (real_v) {
+ slices = *data++ + 1;
+ lace_size[n]--;
+ }
+ if (cluster_time != (uint64_t)-1 && n == 0) {
+ if (cluster_time + block_time >= 0)
+ timecode = (cluster_time + block_time) * matroska->time_scale;
+ }
+ /* FIXME: duration */
+
+ for (slice=0; slice<slices; slice++) {
+ int slice_size, slice_offset = 0;
+ if (real_v)
+ slice_offset = rv_offset(data, slice, slices);
+ if (slice+1 == slices)
+ slice_size = lace_size[n] - slice_offset;
+ else
+ slice_size = rv_offset(data, slice+1, slices) - slice_offset;
+ pkt = av_mallocz(sizeof(AVPacket));
+ /* XXX: prevent data copy... */
+ if (av_new_packet(pkt, slice_size) < 0) {
+ res = AVERROR_NOMEM;
+ n = laces-1;
+ break;
+ }
+ memcpy (pkt->data, data+slice_offset, slice_size);
+
+ if (n == 0)
+ pkt->flags = is_keyframe;
+ pkt->stream_index =
+ matroska->tracks[track]->stream_index;
+
+ pkt->pts = timecode;
+ pkt->pos = pos;
+
+ matroska_queue_packet(matroska, pkt);
+ }
+ data += lace_size[n];
+ }
+ }
+
+ av_free(lace_size);
+ av_free(origdata);
+ break;
+ }
+
+ case MATROSKA_ID_BLOCKDURATION: {
+ if ((res = ebml_read_uint(matroska, &id, &duration)) < 0)
+ break;
+ break;
+ }
+
+ case MATROSKA_ID_BLOCKREFERENCE:
+ /* We've found a reference, so not even the first frame in
+ * the lace is a key frame. */
+ is_keyframe = 0;
+ if (last_num_packets != matroska->num_packets)
+ matroska->packets[last_num_packets]->flags = 0;
+ res = ebml_read_skip(matroska);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in blockgroup data\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ if (pkt)
+ {
+ if (duration != AV_NOPTS_VALUE)
+ pkt->duration = duration;
+ else if (track >= 0 && track < matroska->num_tracks)
+ pkt->duration = matroska->tracks[track]->default_duration / matroska->time_scale;
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_cluster (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+ uint64_t cluster_time = 0;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG,
+ "parsing cluster at %"PRId64"\n", url_ftell(&matroska->ctx->pb));
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* cluster timecode */
+ case MATROSKA_ID_CLUSTERTIMECODE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ cluster_time = num;
+ break;
+ }
+
+ /* a group of blocks inside a cluster */
+ case MATROSKA_ID_BLOCKGROUP:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_blockgroup(matroska, cluster_time);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in cluster data\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_read_packet (AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MatroskaDemuxContext *matroska = s->priv_data;
+ int res = 0;
+ uint32_t id;
+
+ /* Do we still have a packet queued? */
+ if (matroska_deliver_packet(matroska, pkt) == 0)
+ return 0;
+
+ /* Have we already reached the end? */
+ if (matroska->done)
+ return AVERROR_IO;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ case MATROSKA_ID_CLUSTER:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ if ((res = matroska_parse_cluster(matroska)) == 0)
+ res = 1; /* Parsed one cluster, let's get out. */
+ break;
+
+ default:
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ if (res == -1)
+ matroska->done = 1;
+
+ return matroska_deliver_packet(matroska, pkt);
+}
+
+static int
+matroska_read_close (AVFormatContext *s)
+{
+ MatroskaDemuxContext *matroska = s->priv_data;
+ int n = 0;
+
+ av_free(matroska->writing_app);
+ av_free(matroska->muxing_app);
+ av_free(matroska->index);
+
+ if (matroska->packets != NULL) {
+ for (n = 0; n < matroska->num_packets; n++) {
+ av_free_packet(matroska->packets[n]);
+ av_free(matroska->packets[n]);
+ }
+ av_free(matroska->packets);
+ }
+
+ for (n = 0; n < matroska->num_tracks; n++) {
+ MatroskaTrack *track = matroska->tracks[n];
+ av_free(track->codec_id);
+ av_free(track->codec_name);
+ av_free(track->codec_priv);
+ av_free(track->name);
+ av_free(track->language);
+
+ av_free(track);
+ }
+
+ return 0;
+}
+
+AVInputFormat matroska_demuxer = {
+ "matroska",
+ "Matroska file format",
+ sizeof(MatroskaDemuxContext),
+ matroska_probe,
+ matroska_read_header,
+ matroska_read_packet,
+ matroska_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/mm.c b/contrib/ffmpeg/libavformat/mm.c
new file mode 100644
index 000000000..a3c637fb2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mm.c
@@ -0,0 +1,212 @@
+/*
+ * American Laser Games MM Format Demuxer
+ * Copyright (c) 2006 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mm.c
+ * American Laser Games MM Format Demuxer
+ * by Peter Ross (suxen_drol at hotmail dot com)
+ *
+ * The MM format was used by IBM-PC ports of ALG's "arcade shooter" games,
+ * including Mad Dog McCree and Crime Patrol.
+ *
+ * Technical details here:
+ * http://wiki.multimedia.cx/index.php?title=American_Laser_Games_MM
+ */
+
+#include "avformat.h"
+
+#define MM_PREAMBLE_SIZE 6
+
+#define MM_TYPE_HEADER 0x0
+#define MM_TYPE_INTER 0x5
+#define MM_TYPE_INTRA 0x8
+#define MM_TYPE_INTRA_HH 0xc
+#define MM_TYPE_INTER_HH 0xd
+#define MM_TYPE_INTRA_HHV 0xe
+#define MM_TYPE_INTER_HHV 0xf
+#define MM_TYPE_AUDIO 0x15
+#define MM_TYPE_PALETTE 0x31
+
+#define MM_HEADER_LEN_V 0x16 /* video only */
+#define MM_HEADER_LEN_AV 0x18 /* video + audio */
+
+#define MM_PALETTE_COUNT 128
+#define MM_PALETTE_SIZE (MM_PALETTE_COUNT*3)
+
+typedef struct {
+ AVPaletteControl palette_control;
+ unsigned int audio_pts, video_pts;
+} MmDemuxContext;
+
+static int mm_probe(AVProbeData *p)
+{
+ /* the first chunk is always the header */
+ if (p->buf_size < MM_PREAMBLE_SIZE)
+ return 0;
+ if (LE_16(&p->buf[0]) != MM_TYPE_HEADER)
+ return 0;
+ if (LE_32(&p->buf[2]) != MM_HEADER_LEN_V && LE_32(&p->buf[2]) != MM_HEADER_LEN_AV)
+ return 0;
+
+ /* only return half certainty since this check is a bit sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int mm_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MmDemuxContext *mm = (MmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+ unsigned int type, length;
+ unsigned int frame_rate, width, height;
+
+ type = get_le16(pb);
+ length = get_le32(pb);
+
+ if (type != MM_TYPE_HEADER)
+ return AVERROR_INVALIDDATA;
+
+ /* read header */
+ get_le16(pb); /* total number of chunks */
+ frame_rate = get_le16(pb);
+ get_le16(pb); /* ibm-pc video bios mode */
+ width = get_le16(pb);
+ height = get_le16(pb);
+ url_fseek(pb, length - 10, SEEK_CUR); /* unknown data */
+
+ /* video stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MMVIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->palctrl = &mm->palette_control;
+ av_set_pts_info(st, 64, 1, frame_rate);
+
+ /* audio stream */
+ if (length == MM_HEADER_LEN_AV) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->codec_id = CODEC_ID_PCM_U8;
+ st->codec->channels = 1;
+ st->codec->sample_rate = 8000;
+ av_set_pts_info(st, 64, 1, 8000); /* 8000 hz */
+ }
+
+ mm->palette_control.palette_changed = 0;
+ mm->audio_pts = 0;
+ mm->video_pts = 0;
+ return 0;
+}
+
+static int mm_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MmDemuxContext *mm = (MmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char preamble[MM_PREAMBLE_SIZE];
+ unsigned char pal[MM_PALETTE_SIZE];
+ unsigned int type, length;
+ int i;
+
+ while(1) {
+
+ if (get_buffer(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) {
+ return AVERROR_IO;
+ }
+
+ type = LE_16(&preamble[0]);
+ length = LE_16(&preamble[2]);
+
+ switch(type) {
+ case MM_TYPE_PALETTE :
+ url_fseek(pb, 4, SEEK_CUR); /* unknown data */
+ if (get_buffer(pb, pal, MM_PALETTE_SIZE) != MM_PALETTE_SIZE)
+ return AVERROR_IO;
+ url_fseek(pb, length - (4 + MM_PALETTE_SIZE), SEEK_CUR);
+
+ for (i=0; i<MM_PALETTE_COUNT; i++) {
+ int r = pal[i*3 + 0];
+ int g = pal[i*3 + 1];
+ int b = pal[i*3 + 2];
+ mm->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+ /* repeat palette, where each components is multiplied by four */
+ mm->palette_control.palette[i+128] = (r << 18) | (g << 10) | (b<<2);
+ }
+ mm->palette_control.palette_changed = 1;
+ break;
+
+ case MM_TYPE_INTER :
+ case MM_TYPE_INTRA :
+ case MM_TYPE_INTRA_HH :
+ case MM_TYPE_INTER_HH :
+ case MM_TYPE_INTRA_HHV :
+ case MM_TYPE_INTER_HHV :
+ /* output preamble + data */
+ if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE))
+ return AVERROR_NOMEM;
+ memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE);
+ if (get_buffer(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length)
+ return AVERROR_IO;
+ pkt->size = length + MM_PREAMBLE_SIZE;
+ pkt->stream_index = 0;
+ pkt->pts = mm->video_pts++;
+ return 0;
+
+ case MM_TYPE_AUDIO :
+ if (av_get_packet(&s->pb, pkt, length)<0)
+ return AVERROR_NOMEM;
+ pkt->size = length;
+ pkt->stream_index = 1;
+ pkt->pts = mm->audio_pts++;
+ return 0;
+
+ default :
+ av_log(NULL, AV_LOG_INFO, "mm: unknown chunk type 0x%x\n", type);
+ url_fseek(pb, length, SEEK_CUR);
+ }
+ }
+
+ return 0;
+}
+
+static int mm_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat mm_demuxer = {
+ "mm",
+ "American Laser Games MM format",
+ sizeof(MmDemuxContext),
+ mm_probe,
+ mm_read_header,
+ mm_read_packet,
+ mm_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/mmf.c b/contrib/ffmpeg/libavformat/mmf.c
new file mode 100644
index 000000000..40b1a497c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mmf.c
@@ -0,0 +1,331 @@
+/*
+ * Yamaha SMAF format
+ * Copyright (c) 2005 Vidar Madsen
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+
+typedef struct {
+ offset_t atrpos, atsqpos, awapos;
+ offset_t data_size;
+} MMFContext;
+
+static int mmf_rates[] = { 4000, 8000, 11025, 22050, 44100 };
+
+static int mmf_rate_code(int rate)
+{
+ int i;
+ for(i = 0; i < 5; i++)
+ if(mmf_rates[i] == rate)
+ return i;
+ return -1;
+}
+
+static int mmf_rate(int code)
+{
+ if((code < 0) || (code > 4))
+ return -1;
+ return mmf_rates[code];
+}
+
+#ifdef CONFIG_MUXERS
+/* Copy of end_tag() from avienc.c, but for big-endian chunk size */
+static void end_tag_be(ByteIOContext *pb, offset_t start)
+{
+ offset_t pos;
+
+ pos = url_ftell(pb);
+ url_fseek(pb, start - 4, SEEK_SET);
+ put_be32(pb, (uint32_t)(pos - start));
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+static int mmf_write_header(AVFormatContext *s)
+{
+ MMFContext *mmf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t pos;
+ int rate;
+
+ rate = mmf_rate_code(s->streams[0]->codec->sample_rate);
+ if(rate < 0) {
+ av_log(s, AV_LOG_ERROR, "Unsupported sample rate %d\n", s->streams[0]->codec->sample_rate);
+ return -1;
+ }
+
+ put_tag(pb, "MMMD");
+ put_be32(pb, 0);
+ pos = start_tag(pb, "CNTI");
+ put_byte(pb, 0); /* class */
+ put_byte(pb, 0); /* type */
+ put_byte(pb, 0); /* code type */
+ put_byte(pb, 0); /* status */
+ put_byte(pb, 0); /* counts */
+ put_tag(pb, "VN:libavcodec,"); /* metadata ("ST:songtitle,VN:version,...") */
+ end_tag_be(pb, pos);
+
+ put_buffer(pb, "ATR\x00", 4);
+ put_be32(pb, 0);
+ mmf->atrpos = url_ftell(pb);
+ put_byte(pb, 0); /* format type */
+ put_byte(pb, 0); /* sequence type */
+ put_byte(pb, (0 << 7) | (1 << 4) | rate); /* (channel << 7) | (format << 4) | rate */
+ put_byte(pb, 0); /* wave base bit */
+ put_byte(pb, 2); /* time base d */
+ put_byte(pb, 2); /* time base g */
+
+ put_tag(pb, "Atsq");
+ put_be32(pb, 16);
+ mmf->atsqpos = url_ftell(pb);
+ /* Will be filled on close */
+ put_buffer(pb, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 16);
+
+ mmf->awapos = start_tag(pb, "Awa\x01");
+
+ av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int mmf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+/* Write a variable-length symbol */
+static void put_varlength(ByteIOContext *pb, int val)
+{
+ if(val < 128)
+ put_byte(pb, val);
+ else {
+ val -= 128;
+ put_byte(pb, 0x80 | val >> 7);
+ put_byte(pb, 0x7f & val);
+ }
+}
+
+static int mmf_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ MMFContext *mmf = s->priv_data;
+ offset_t pos, size;
+ int gatetime;
+
+ if (!url_is_streamed(&s->pb)) {
+ /* Fill in length fields */
+ end_tag_be(pb, mmf->awapos);
+ end_tag_be(pb, mmf->atrpos);
+ end_tag_be(pb, 8);
+
+ pos = url_ftell(pb);
+ size = pos - mmf->awapos;
+
+ /* Fill Atsq chunk */
+ url_fseek(pb, mmf->atsqpos, SEEK_SET);
+
+ /* "play wav" */
+ put_byte(pb, 0); /* start time */
+ put_byte(pb, 1); /* (channel << 6) | wavenum */
+ gatetime = size * 500 / s->streams[0]->codec->sample_rate;
+ put_varlength(pb, gatetime); /* duration */
+
+ /* "nop" */
+ put_varlength(pb, gatetime); /* start time */
+ put_buffer(pb, "\xff\x00", 2); /* nop */
+
+ /* "end of sequence" */
+ put_buffer(pb, "\x00\x00\x00\x00", 4);
+
+ url_fseek(pb, pos, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int mmf_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'M' && p->buf[1] == 'M' &&
+ p->buf[2] == 'M' && p->buf[3] == 'D' &&
+ p->buf[8] == 'C' && p->buf[9] == 'N' &&
+ p->buf[10] == 'T' && p->buf[11] == 'I')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* mmf input */
+static int mmf_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MMFContext *mmf = s->priv_data;
+ unsigned int tag;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ offset_t file_size, size;
+ int rate, params;
+
+ tag = get_le32(pb);
+ if (tag != MKTAG('M', 'M', 'M', 'D'))
+ return -1;
+ file_size = get_be32(pb);
+
+ /* Skip some unused chunks that may or may not be present */
+ for(;; url_fseek(pb, size, SEEK_CUR)) {
+ tag = get_le32(pb);
+ size = get_be32(pb);
+ if(tag == MKTAG('C','N','T','I')) continue;
+ if(tag == MKTAG('O','P','D','A')) continue;
+ break;
+ }
+
+ /* Tag = "ATRx", where "x" = track number */
+ if ((tag & 0xffffff) == MKTAG('M', 'T', 'R', 0)) {
+ av_log(s, AV_LOG_ERROR, "MIDI like format found, unsupported\n");
+ return -1;
+ }
+ if ((tag & 0xffffff) != MKTAG('A', 'T', 'R', 0)) {
+ av_log(s, AV_LOG_ERROR, "Unsupported SMAF chunk %08x\n", tag);
+ return -1;
+ }
+
+ get_byte(pb); /* format type */
+ get_byte(pb); /* sequence type */
+ params = get_byte(pb); /* (channel << 7) | (format << 4) | rate */
+ rate = mmf_rate(params & 0x0f);
+ if(rate < 0) {
+ av_log(s, AV_LOG_ERROR, "Invalid sample rate\n");
+ return -1;
+ }
+ get_byte(pb); /* wave base bit */
+ get_byte(pb); /* time base d */
+ get_byte(pb); /* time base g */
+
+ /* Skip some unused chunks that may or may not be present */
+ for(;; url_fseek(pb, size, SEEK_CUR)) {
+ tag = get_le32(pb);
+ size = get_be32(pb);
+ if(tag == MKTAG('A','t','s','q')) continue;
+ if(tag == MKTAG('A','s','p','I')) continue;
+ break;
+ }
+
+ /* Make sure it's followed by an Awa chunk, aka wave data */
+ if ((tag & 0xffffff) != MKTAG('A', 'w', 'a', 0)) {
+ av_log(s, AV_LOG_ERROR, "Unexpected SMAF chunk %08x\n", tag);
+ return -1;
+ }
+ mmf->data_size = size;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ADPCM_YAMAHA;
+ st->codec->sample_rate = rate;
+ st->codec->channels = 1;
+ st->codec->bits_per_sample = 4;
+ st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample;
+
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int mmf_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MMFContext *mmf = s->priv_data;
+ AVStream *st;
+ int ret, size;
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ st = s->streams[0];
+
+ size = MAX_SIZE;
+ if(size > mmf->data_size)
+ size = mmf->data_size;
+
+ if(!size)
+ return AVERROR_IO;
+
+ if (av_new_packet(pkt, size))
+ return AVERROR_IO;
+ pkt->stream_index = 0;
+
+ ret = get_buffer(&s->pb, pkt->data, pkt->size);
+ if (ret < 0)
+ av_free_packet(pkt);
+
+ mmf->data_size -= ret;
+
+ pkt->size = ret;
+ return ret;
+}
+
+static int mmf_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int mmf_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ return pcm_read_seek(s, stream_index, timestamp, flags);
+}
+
+#ifdef CONFIG_MMF_DEMUXER
+AVInputFormat mmf_demuxer = {
+ "mmf",
+ "mmf format",
+ sizeof(MMFContext),
+ mmf_probe,
+ mmf_read_header,
+ mmf_read_packet,
+ mmf_read_close,
+ mmf_read_seek,
+};
+#endif
+#ifdef CONFIG_MMF_MUXER
+AVOutputFormat mmf_muxer = {
+ "mmf",
+ "mmf format",
+ "application/vnd.smaf",
+ "mmf",
+ sizeof(MMFContext),
+ CODEC_ID_ADPCM_YAMAHA,
+ CODEC_ID_NONE,
+ mmf_write_header,
+ mmf_write_packet,
+ mmf_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mov.c b/contrib/ffmpeg/libavformat/mov.c
new file mode 100644
index 000000000..3ceac64b1
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mov.c
@@ -0,0 +1,1798 @@
+/*
+ * MOV demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+
+//#define DEBUG
+
+#include "avformat.h"
+#include "riff.h"
+#include "isom.h"
+#include "dv.h"
+
+#ifdef CONFIG_ZLIB
+#include <zlib.h>
+#endif
+
+/*
+ * First version by Francois Revol revol@free.fr
+ * Seek function by Gael Chardon gael.dev@4now.net
+ *
+ * Features and limitations:
+ * - reads most of the QT files I have (at least the structure),
+ * the exceptions are .mov with zlib compressed headers ('cmov' section). It shouldn't be hard to implement.
+ * FIXED, Francois Revol, 07/17/2002
+ * - ffmpeg has nearly none of the usual QuickTime codecs,
+ * although I succesfully dumped raw and mp3 audio tracks off .mov files.
+ * Sample QuickTime files with mp3 audio can be found at: http://www.3ivx.com/showcase.html
+ * - .mp4 parsing is still hazardous, although the format really is QuickTime with some minor changes
+ * (to make .mov parser crash maybe ?), despite what they say in the MPEG FAQ at
+ * http://mpeg.telecomitalialab.com/faq.htm
+ * - the code is quite ugly... maybe I won't do it recursive next time :-)
+ * - seek is not supported with files that contain edit list
+ *
+ * Funny I didn't know about http://sourceforge.net/projects/qt-ffmpeg/
+ * when coding this :) (it's a writer anyway)
+ *
+ * Reference documents:
+ * http://www.geocities.com/xhelmboyx/quicktime/formats/qtm-layout.txt
+ * Apple:
+ * http://developer.apple.com/documentation/QuickTime/QTFF/
+ * http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf
+ * QuickTime is a trademark of Apple (AFAIK :))
+ */
+
+#include "qtpalette.h"
+
+
+#undef NDEBUG
+#include <assert.h>
+
+static const CodecTag mov_video_tags[] = {
+/* { CODEC_ID_, MKTAG('c', 'v', 'i', 'd') }, *//* Cinepak */
+/* { CODEC_ID_H263, MKTAG('r', 'a', 'w', ' ') }, *//* Uncompressed RGB */
+/* { CODEC_ID_H263, MKTAG('Y', 'u', 'v', '2') }, *//* Uncompressed YUV422 */
+/* { CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'U', 'I') }, *//* YUV with alpha-channel (AVID Uncompressed) */
+/* Graphics */
+/* Animation */
+/* Apple video */
+/* Kodak Photo CD */
+ { CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, /* PhotoJPEG */
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'e', 'g') }, /* MPEG */
+ { CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, /* Motion-JPEG (format A) */
+ { CODEC_ID_MJPEGB, MKTAG('m', 'j', 'p', 'b') }, /* Motion-JPEG (format B) */
+ { CODEC_ID_MJPEG, MKTAG('A', 'V', 'D', 'J') }, /* MJPEG with alpha-channel (AVID JFIF meridien compressed) */
+/* { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, *//* MJPEG with alpha-channel (AVID ABVB/Truevision NuVista) */
+ { CODEC_ID_GIF, MKTAG('g', 'i', 'f', ' ') }, /* embedded gif files as frames (usually one "click to play movie" frame) */
+/* Sorenson video */
+ { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') }, /* Sorenson Video v1 */
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, /* Sorenson Video v1 */
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', 'i') }, /* Sorenson Video v1 (from QT specs)*/
+ { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') }, /* Sorenson Video v3 */
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, /* OpenDiVX *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
+ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
+ { CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, /* experimental: 3IVX files before ivx D4 4.5.1 */
+/* { CODEC_ID_, MKTAG('I', 'V', '5', '0') }, *//* Indeo 5.0 */
+ { CODEC_ID_H263, MKTAG('h', '2', '6', '3') }, /* H263 */
+ { CODEC_ID_H263, MKTAG('s', '2', '6', '3') }, /* H263 ?? works */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, /* On2 VP3 */
+ { CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, /* Apple Video (RPZA) */
+ { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, /* Cinepak */
+ { CODEC_ID_8BPS, MKTAG('8', 'B', 'P', 'S') }, /* Planar RGB (8BPS) */
+ { CODEC_ID_SMC, MKTAG('s', 'm', 'c', ' ') }, /* Apple Graphics (SMC) */
+ { CODEC_ID_QTRLE, MKTAG('r', 'l', 'e', ' ') }, /* Apple Animation (RLE) */
+ { CODEC_ID_QDRAW, MKTAG('q', 'd', 'r', 'w') }, /* QuickDraw */
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, /* AVC-1/H.264 */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '2') }, /* MPEG2 produced by Sony HD camera */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '3') }, /* HDV produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'n') }, /* MPEG2 IMX NTSC 525/60 50mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'p') }, /* MPEG2 IMX PAL 625/50 50mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'n') }, /* MPEG2 IMX NTSC 525/60 30mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'p') }, /* MPEG2 IMX PAL 625/50 30mb/s produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL produced by FCP */
+ //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '5') }, /* DVCPRO HD 50i produced by FCP */
+ //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '6') }, /* DVCPRO HD 60i produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', 'v') }, /* AVID DV */
+ //{ CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') }, /* JPEG 2000 produced by FCP */
+ { CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, /* Truevision Targa */
+ { CODEC_ID_TIFF, MKTAG('t', 'i', 'f', 'f') }, /* TIFF embedded in MOV */
+ { CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, /* UNCOMPRESSED 8BIT 4:2:2 */
+ { CODEC_ID_NONE, 0 },
+};
+
+static const CodecTag mov_audio_tags[] = {
+ { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S16BE, MKTAG('N', 'O', 'N', 'E') }, /* uncompressed */
+ { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') }, /* 16 bits */
+ { CODEC_ID_PCM_U8, MKTAG('r', 'a', 'w', ' ') }, /* 8 bits unsigned */
+ { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') }, /* */
+ { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') }, /* */
+ { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') }, /* */
+ { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') }, /* IMA-4 ADPCM */
+ { CODEC_ID_ADPCM_MS, MKTAG('m', 's', 0x00, 0x02) }, /* MS ADPCM */
+ { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') }, /* Macintosh Audio Compression and Expansion 3:1 */
+ { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') }, /* Macintosh Audio Compression and Expansion 6:1 */
+
+ { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') }, /* MPEG layer 3 */ /* sample files at http://www.3ivx.com/showcase.html use this tag */
+ { CODEC_ID_MP2, 0x6D730055 }, /* MPEG layer 3 */
+ { CODEC_ID_MP2, 0x5500736D }, /* MPEG layer 3 *//* XXX: check endianness */
+/* { CODEC_ID_OGG_VORBIS, MKTAG('O', 'g', 'g', 'S') }, *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
+/* MP4 tags */
+ { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, /* MPEG-4 AAC */
+ /* The standard for mpeg4 audio is still not normalised AFAIK anyway */
+ { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
+ { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
+ { CODEC_ID_AC3, MKTAG('m', 's', 0x20, 0x00) }, /* Dolby AC-3 */
+ { CODEC_ID_ALAC,MKTAG('a', 'l', 'a', 'c') }, /* Apple Lossless */
+ { CODEC_ID_QDM2,MKTAG('Q', 'D', 'M', '2') }, /* QDM2 */
+ { CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
+ { CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
+ { CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
+ { CODEC_ID_NONE, 0 },
+};
+
+/* the QuickTime file format is quite convoluted...
+ * it has lots of index tables, each indexing something in another one...
+ * Here we just use what is needed to read the chunks
+ */
+
+typedef struct MOV_sample_to_chunk_tbl {
+ long first;
+ long count;
+ long id;
+} MOV_sample_to_chunk_tbl;
+
+typedef struct {
+ uint32_t type;
+ int64_t offset;
+ int64_t size; /* total size (excluding the size and type fields) */
+} MOV_atom_t;
+
+typedef struct {
+ int seed;
+ int flags;
+ int size;
+ void* clrs;
+} MOV_ctab_t;
+
+typedef struct MOV_mdat_atom_s {
+ offset_t offset;
+ int64_t size;
+} MOV_mdat_atom_t;
+
+typedef struct {
+ uint8_t version;
+ uint32_t flags; // 24bit
+
+ /* 0x03 ESDescrTag */
+ uint16_t es_id;
+#define MP4ODescrTag 0x01
+#define MP4IODescrTag 0x02
+#define MP4ESDescrTag 0x03
+#define MP4DecConfigDescrTag 0x04
+#define MP4DecSpecificDescrTag 0x05
+#define MP4SLConfigDescrTag 0x06
+#define MP4ContentIdDescrTag 0x07
+#define MP4SupplContentIdDescrTag 0x08
+#define MP4IPIPtrDescrTag 0x09
+#define MP4IPMPPtrDescrTag 0x0A
+#define MP4IPMPDescrTag 0x0B
+#define MP4RegistrationDescrTag 0x0D
+#define MP4ESIDIncDescrTag 0x0E
+#define MP4ESIDRefDescrTag 0x0F
+#define MP4FileIODescrTag 0x10
+#define MP4FileODescrTag 0x11
+#define MP4ExtProfileLevelDescrTag 0x13
+#define MP4ExtDescrTagsStart 0x80
+#define MP4ExtDescrTagsEnd 0xFE
+ uint8_t stream_priority;
+
+ /* 0x04 DecConfigDescrTag */
+ uint8_t object_type_id;
+ uint8_t stream_type;
+ /* XXX: really streamType is
+ * only 6bit, followed by:
+ * 1bit upStream
+ * 1bit reserved
+ */
+ uint32_t buffer_size_db; // 24
+ uint32_t max_bitrate;
+ uint32_t avg_bitrate;
+
+ /* 0x05 DecSpecificDescrTag */
+ uint8_t decoder_cfg_len;
+ uint8_t *decoder_cfg;
+
+ /* 0x06 SLConfigDescrTag */
+ uint8_t sl_config_len;
+ uint8_t *sl_config;
+} MOV_esds_t;
+
+struct MOVParseTableEntry;
+
+typedef struct MOVStreamContext {
+ int ffindex; /* the ffmpeg stream id */
+ long next_chunk;
+ long chunk_count;
+ int64_t *chunk_offsets;
+ int stts_count;
+ Time2Sample *stts_data;
+ int ctts_count;
+ Time2Sample *ctts_data;
+ int edit_count; /* number of 'edit' (elst atom) */
+ long sample_to_chunk_sz;
+ MOV_sample_to_chunk_tbl *sample_to_chunk;
+ int sample_to_ctime_index;
+ int sample_to_ctime_sample;
+ long sample_size;
+ long sample_count;
+ long *sample_sizes;
+ long keyframe_count;
+ long *keyframes;
+ int time_scale;
+ int time_rate;
+ long current_sample;
+ MOV_esds_t esds;
+ AVRational sample_size_v1;
+ int dv_audio_container;
+} MOVStreamContext;
+
+typedef struct MOVContext {
+ AVFormatContext *fc;
+ int time_scale;
+ int64_t duration; /* duration of the longest track */
+ int found_moov; /* when both 'moov' and 'mdat' sections has been found */
+ int found_mdat; /* we suppose we have enough data to read the file */
+ int64_t mdat_size;
+ int64_t mdat_offset;
+ int total_streams;
+ /* some streams listed here aren't presented to the ffmpeg API, since they aren't either video nor audio
+ * but we need the info to be able to skip data from those streams in the 'mdat' section
+ */
+ MOVStreamContext *streams[MAX_STREAMS];
+
+ int ctab_size;
+ MOV_ctab_t **ctab; /* color tables */
+ const struct MOVParseTableEntry *parse_table; /* could be eventually used to change the table */
+ /* NOTE: for recursion save to/ restore from local variable! */
+
+ AVPaletteControl palette_control;
+ MOV_mdat_atom_t *mdat_list;
+ int mdat_count;
+ DVDemuxContext *dv_demux;
+ AVFormatContext *dv_fctx;
+ int isom; /* 1 if file is ISO Media (mp4/3gp) */
+} MOVContext;
+
+
+/* XXX: it's the first time I make a recursive parser I think... sorry if it's ugly :P */
+
+/* those functions parse an atom */
+/* return code:
+ 1: found what I wanted, exit
+ 0: continue to parse next atom
+ -1: error occured, exit
+ */
+typedef int (*mov_parse_function)(MOVContext *ctx, ByteIOContext *pb, MOV_atom_t atom);
+
+/* links atom IDs to parse functions */
+typedef struct MOVParseTableEntry {
+ uint32_t type;
+ mov_parse_function func;
+} MOVParseTableEntry;
+
+static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int64_t total_size = 0;
+ MOV_atom_t a;
+ int i;
+ int err = 0;
+
+ a.offset = atom.offset;
+
+ if (atom.size < 0)
+ atom.size = 0x7fffffffffffffffLL;
+ while(((total_size + 8) < atom.size) && !url_feof(pb) && !err) {
+ a.size = atom.size;
+ a.type=0L;
+ if(atom.size >= 8) {
+ a.size = get_be32(pb);
+ a.type = get_le32(pb);
+ }
+ total_size += 8;
+ a.offset += 8;
+ dprintf("type: %08x %.4s sz: %"PRIx64" %"PRIx64" %"PRIx64"\n", a.type, (char*)&a.type, a.size, atom.size, total_size);
+ if (a.size == 1) { /* 64 bit extended size */
+ a.size = get_be64(pb) - 8;
+ a.offset += 8;
+ total_size += 8;
+ }
+ if (a.size == 0) {
+ a.size = atom.size - total_size;
+ if (a.size <= 8)
+ break;
+ }
+ for (i = 0; c->parse_table[i].type != 0L
+ && c->parse_table[i].type != a.type; i++)
+ /* empty */;
+
+ a.size -= 8;
+
+ if(a.size < 0)
+ break;
+
+ if (c->parse_table[i].type == 0) { /* skip leaf atoms data */
+ url_fskip(pb, a.size);
+ } else {
+ offset_t start_pos = url_ftell(pb);
+ int64_t left;
+ err = (c->parse_table[i].func)(c, pb, a);
+ left = a.size - url_ftell(pb) + start_pos;
+ if (left > 0) /* skip garbage at atom end */
+ url_fskip(pb, left);
+ }
+
+ a.offset += a.size;
+ total_size += a.size;
+ }
+
+ if (!err && total_size < atom.size && atom.size < 0x7ffff) {
+ url_fskip(pb, atom.size - total_size);
+ }
+
+ return err;
+}
+
+static int mov_read_ctab(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+#if 1
+ url_fskip(pb, atom.size); // for now
+#else
+ VERY VERY BROKEN, NEVER execute this, needs rewrite
+ unsigned int len;
+ MOV_ctab_t *t;
+ c->ctab = av_realloc(c->ctab, ++c->ctab_size);
+ t = c->ctab[c->ctab_size];
+ t->seed = get_be32(pb);
+ t->flags = get_be16(pb);
+ t->size = get_be16(pb) + 1;
+ len = 2 * t->size * 4;
+ if (len > 0) {
+ t->clrs = av_malloc(len); // 16bit A R G B
+ if (t->clrs)
+ get_buffer(pb, t->clrs, len);
+ }
+#endif
+
+ return 0;
+}
+
+static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ uint32_t type;
+ uint32_t ctype;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ /* component type */
+ ctype = get_le32(pb);
+ type = get_le32(pb); /* component subtype */
+
+ dprintf("ctype= %c%c%c%c (0x%08lx)\n", *((char *)&ctype), ((char *)&ctype)[1], ((char *)&ctype)[2], ((char *)&ctype)[3], (long) ctype);
+ dprintf("stype= %c%c%c%c\n", *((char *)&type), ((char *)&type)[1], ((char *)&type)[2], ((char *)&type)[3]);
+ if(!ctype)
+ c->isom = 1;
+ if(type == MKTAG('v', 'i', 'd', 'e'))
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ else if(type == MKTAG('s', 'o', 'u', 'n'))
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ else if(type == MKTAG('m', '1', 'a', ' '))
+ st->codec->codec_id = CODEC_ID_MP2;
+ else if(type == MKTAG('s', 'u', 'b', 'p')) {
+ st->codec->codec_type = CODEC_TYPE_SUBTITLE;
+ st->codec->codec_id = CODEC_ID_DVD_SUBTITLE;
+ }
+ get_be32(pb); /* component manufacture */
+ get_be32(pb); /* component flags */
+ get_be32(pb); /* component flags mask */
+
+ if(atom.size <= 24)
+ return 0; /* nothing left to read */
+
+ url_fskip(pb, atom.size - (url_ftell(pb) - atom.offset));
+ return 0;
+}
+
+static int mov_mp4_read_descr_len(ByteIOContext *pb)
+{
+ int len = 0;
+ int count = 4;
+ while (count--) {
+ int c = get_byte(pb);
+ len = (len << 7) | (c & 0x7f);
+ if (!(c & 0x80))
+ break;
+ }
+ return len;
+}
+
+static int mov_mp4_read_descr(ByteIOContext *pb, int *tag)
+{
+ int len;
+ *tag = get_byte(pb);
+ len = mov_mp4_read_descr_len(pb);
+ dprintf("MPEG4 description: tag=0x%02x len=%d\n", *tag, len);
+ return len;
+}
+
+static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ int tag, len;
+
+ /* Well, broken but suffisant for some MP4 streams */
+ get_be32(pb); /* version + flags */
+ len = mov_mp4_read_descr(pb, &tag);
+ if (tag == MP4ESDescrTag) {
+ get_be16(pb); /* ID */
+ get_byte(pb); /* priority */
+ } else
+ get_be16(pb); /* ID */
+
+ len = mov_mp4_read_descr(pb, &tag);
+ if (tag == MP4DecConfigDescrTag) {
+ sc->esds.object_type_id = get_byte(pb);
+ sc->esds.stream_type = get_byte(pb);
+ sc->esds.buffer_size_db = get_be24(pb);
+ sc->esds.max_bitrate = get_be32(pb);
+ sc->esds.avg_bitrate = get_be32(pb);
+
+ st->codec->codec_id= codec_get_id(ff_mov_obj_type, sc->esds.object_type_id);
+ dprintf("esds object type id %d\n", sc->esds.object_type_id);
+ len = mov_mp4_read_descr(pb, &tag);
+ if (tag == MP4DecSpecificDescrTag) {
+ dprintf("Specific MPEG4 header len=%d\n", len);
+ st->codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (st->codec->extradata) {
+ get_buffer(pb, st->codec->extradata, len);
+ st->codec->extradata_size = len;
+ /* from mplayer */
+ if ((*st->codec->extradata >> 3) == 29) {
+ st->codec->codec_id = CODEC_ID_MP3ON4;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* this atom contains actual media data */
+static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ if(atom.size == 0) /* wrong one (MP4) */
+ return 0;
+ c->mdat_list = av_realloc(c->mdat_list, (c->mdat_count + 1) * sizeof(*c->mdat_list));
+ c->mdat_list[c->mdat_count].offset = atom.offset;
+ c->mdat_list[c->mdat_count].size = atom.size;
+ c->mdat_count++;
+ c->found_mdat=1;
+ c->mdat_offset = atom.offset;
+ c->mdat_size = atom.size;
+ if(c->found_moov)
+ return 1; /* found both, just go */
+ url_fskip(pb, atom.size);
+ return 0; /* now go for moov */
+}
+
+static int mov_read_ftyp(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ uint32_t type = get_le32(pb);
+
+ if (type != MKTAG('q','t',' ',' '))
+ c->isom = 1;
+ av_log(c->fc, AV_LOG_DEBUG, "ISO: File Type Major Brand: %.4s\n",(char *)&type);
+ get_be32(pb); /* minor version */
+ url_fskip(pb, atom.size - 8);
+ return 0;
+}
+
+/* this atom should contain all header atoms */
+static int mov_read_moov(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int err;
+
+ err = mov_read_default(c, pb, atom);
+ /* we parsed the 'moov' atom, we can terminate the parsing as soon as we find the 'mdat' */
+ /* so we don't parse the whole file if over a network */
+ c->found_moov=1;
+ if(c->found_mdat)
+ return 1; /* found both, just go */
+ return 0; /* now go for mdat */
+}
+
+
+static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ int version = get_byte(pb);
+ int lang;
+
+ if (version > 1)
+ return 1; /* unsupported */
+
+ get_byte(pb); get_byte(pb);
+ get_byte(pb); /* flags */
+
+ if (version == 1) {
+ get_be64(pb);
+ get_be64(pb);
+ } else {
+ get_be32(pb); /* creation time */
+ get_be32(pb); /* modification time */
+ }
+
+ sc->time_scale = get_be32(pb);
+ st->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
+
+ lang = get_be16(pb); /* language */
+ ff_mov_lang_to_iso639(lang, st->language);
+ get_be16(pb); /* quality */
+
+ return 0;
+}
+
+static int mov_read_mvhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int version = get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ if (version == 1) {
+ get_be64(pb);
+ get_be64(pb);
+ } else {
+ get_be32(pb); /* creation time */
+ get_be32(pb); /* modification time */
+ }
+ c->time_scale = get_be32(pb); /* time scale */
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "time scale = %i\n", c->time_scale);
+#endif
+ c->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
+ get_be32(pb); /* preferred scale */
+
+ get_be16(pb); /* preferred volume */
+
+ url_fskip(pb, 10); /* reserved */
+
+ url_fskip(pb, 36); /* display matrix */
+
+ get_be32(pb); /* preview time */
+ get_be32(pb); /* preview duration */
+ get_be32(pb); /* poster time */
+ get_be32(pb); /* selection time */
+ get_be32(pb); /* selection duration */
+ get_be32(pb); /* current time */
+ get_be32(pb); /* next track ID */
+
+ return 0;
+}
+
+static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ // currently SVQ3 decoder expect full STSD header - so let's fake it
+ // this should be fixed and just SMI header should be passed
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = 0x5a + atom.size;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ strcpy(st->codec->extradata, "SVQ3"); // fake
+ get_buffer(pb, st->codec->extradata + 0x5a, atom.size);
+ dprintf("Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a);
+ } else
+ url_fskip(pb, atom.size);
+
+ return 0;
+}
+
+static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ int little_endian = get_be16(pb);
+
+ if (little_endian) {
+ switch (st->codec->codec_id) {
+ case CODEC_ID_PCM_S24BE:
+ st->codec->codec_id = CODEC_ID_PCM_S24LE;
+ break;
+ case CODEC_ID_PCM_S32BE:
+ st->codec->codec_id = CODEC_ID_PCM_S32LE;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int mov_read_alac(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ // currently ALAC decoder expect full atom header - so let's fake it
+ // this should be fixed and just ALAC header should be passed
+
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = 36;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ strcpy(st->codec->extradata + 4, "alac"); // fake
+ get_buffer(pb, st->codec->extradata + 8, 36 - 8);
+ dprintf("Reading alac %d %s\n", st->codec->extradata_size, st->codec->extradata);
+ } else
+ url_fskip(pb, atom.size);
+ return 0;
+}
+
+static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ if (st->codec->codec_id == CODEC_ID_QDM2) {
+ // pass all frma atom to codec, needed at least for QDM2
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = atom.size;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ get_buffer(pb, st->codec->extradata, atom.size);
+ } else
+ url_fskip(pb, atom.size);
+ } else if (atom.size > 8) { /* to read frma, esds atoms */
+ mov_read_default(c, pb, atom);
+ } else
+ url_fskip(pb, atom.size);
+ return 0;
+}
+
+static int mov_read_jp2h(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ av_free(st->codec->extradata);
+
+ st->codec->extradata_size = atom.size + 8;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ /* pass all jp2h atom to codec */
+ if (st->codec->extradata) {
+ strcpy(st->codec->extradata + 4, "jp2h");
+ get_buffer(pb, st->codec->extradata + 8, atom.size);
+ } else
+ url_fskip(pb, atom.size);
+ return 0;
+}
+
+static int mov_read_avcC(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ av_free(st->codec->extradata);
+
+ st->codec->extradata_size = atom.size;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ get_buffer(pb, st->codec->extradata, atom.size);
+ } else
+ url_fskip(pb, atom.size);
+
+ return 0;
+}
+
+static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ if(entries >= UINT_MAX/sizeof(int64_t))
+ return -1;
+
+ sc->chunk_count = entries;
+ sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
+ if (!sc->chunk_offsets)
+ return -1;
+ if (atom.type == MKTAG('s', 't', 'c', 'o')) {
+ for(i=0; i<entries; i++) {
+ sc->chunk_offsets[i] = get_be32(pb);
+ }
+ } else if (atom.type == MKTAG('c', 'o', '6', '4')) {
+ for(i=0; i<entries; i++) {
+ sc->chunk_offsets[i] = get_be64(pb);
+ }
+ } else
+ return -1;
+
+ return 0;
+}
+
+static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ int entries, frames_per_sample;
+ uint32_t format;
+ uint8_t codec_name[32];
+
+ /* for palette traversal */
+ int color_depth;
+ int color_start;
+ int color_count;
+ int color_end;
+ int color_index;
+ int color_dec;
+ int color_greyscale;
+ unsigned char *color_table;
+ int j;
+ unsigned char r, g, b;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ while(entries--) { //Parsing Sample description table
+ enum CodecID id;
+ MOV_atom_t a = { 0, 0, 0 };
+ offset_t start_pos = url_ftell(pb);
+ int size = get_be32(pb); /* size */
+ format = get_le32(pb); /* data format */
+
+ get_be32(pb); /* reserved */
+ get_be16(pb); /* reserved */
+ get_be16(pb); /* index */
+
+ if (st->codec->codec_tag) {
+ /* multiple fourcc, just skip for now */
+ url_fskip(pb, size - (url_ftell(pb) - start_pos));
+ continue;
+ }
+
+ st->codec->codec_tag = format;
+ id = codec_get_id(mov_audio_tags, format);
+ if (st->codec->codec_type != CODEC_TYPE_VIDEO && id > 0) {
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ } else if (st->codec->codec_type != CODEC_TYPE_AUDIO && /* do not overwrite codec type */
+ format && format != MKTAG('m', 'p', '4', 's')) { /* skip old asf mpeg4 tag */
+ id = codec_get_id(mov_video_tags, format);
+ if (id <= 0)
+ id = codec_get_id(codec_bmp_tags, format);
+ if (id > 0)
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ }
+
+ dprintf("size=%d 4CC= %c%c%c%c codec_type=%d\n",
+ size,
+ (format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff, (format >> 24) & 0xff,
+ st->codec->codec_type);
+
+ if(st->codec->codec_type==CODEC_TYPE_VIDEO) {
+ st->codec->codec_id = id;
+ get_be16(pb); /* version */
+ get_be16(pb); /* revision level */
+ get_be32(pb); /* vendor */
+ get_be32(pb); /* temporal quality */
+ get_be32(pb); /* spacial quality */
+
+ st->codec->width = get_be16(pb); /* width */
+ st->codec->height = get_be16(pb); /* height */
+
+ get_be32(pb); /* horiz resolution */
+ get_be32(pb); /* vert resolution */
+ get_be32(pb); /* data size, always 0 */
+ frames_per_sample = get_be16(pb); /* frames per samples */
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "frames/samples = %d\n", frames_per_sample);
+#endif
+ get_buffer(pb, codec_name, 32); /* codec name, pascal string (FIXME: true for mp4?) */
+ if (codec_name[0] <= 31) {
+ memcpy(st->codec->codec_name, &codec_name[1],codec_name[0]);
+ st->codec->codec_name[codec_name[0]] = 0;
+ }
+
+ st->codec->bits_per_sample = get_be16(pb); /* depth */
+ st->codec->color_table_id = get_be16(pb); /* colortable id */
+
+ /* figure out the palette situation */
+ color_depth = st->codec->bits_per_sample & 0x1F;
+ color_greyscale = st->codec->bits_per_sample & 0x20;
+
+ /* if the depth is 2, 4, or 8 bpp, file is palettized */
+ if ((color_depth == 2) || (color_depth == 4) ||
+ (color_depth == 8)) {
+
+ if (color_greyscale) {
+
+ /* compute the greyscale palette */
+ color_count = 1 << color_depth;
+ color_index = 255;
+ color_dec = 256 / (color_count - 1);
+ for (j = 0; j < color_count; j++) {
+ r = g = b = color_index;
+ c->palette_control.palette[j] =
+ (r << 16) | (g << 8) | (b);
+ color_index -= color_dec;
+ if (color_index < 0)
+ color_index = 0;
+ }
+
+ } else if (st->codec->color_table_id & 0x08) {
+
+ /* if flag bit 3 is set, use the default palette */
+ color_count = 1 << color_depth;
+ if (color_depth == 2)
+ color_table = ff_qt_default_palette_4;
+ else if (color_depth == 4)
+ color_table = ff_qt_default_palette_16;
+ else
+ color_table = ff_qt_default_palette_256;
+
+ for (j = 0; j < color_count; j++) {
+ r = color_table[j * 4 + 0];
+ g = color_table[j * 4 + 1];
+ b = color_table[j * 4 + 2];
+ c->palette_control.palette[j] =
+ (r << 16) | (g << 8) | (b);
+ }
+
+ } else {
+
+ /* load the palette from the file */
+ color_start = get_be32(pb);
+ color_count = get_be16(pb);
+ color_end = get_be16(pb);
+ for (j = color_start; j <= color_end; j++) {
+ /* each R, G, or B component is 16 bits;
+ * only use the top 8 bits; skip alpha bytes
+ * up front */
+ get_byte(pb);
+ get_byte(pb);
+ r = get_byte(pb);
+ get_byte(pb);
+ g = get_byte(pb);
+ get_byte(pb);
+ b = get_byte(pb);
+ get_byte(pb);
+ c->palette_control.palette[j] =
+ (r << 16) | (g << 8) | (b);
+ }
+ }
+
+ st->codec->palctrl = &c->palette_control;
+ st->codec->palctrl->palette_changed = 1;
+ } else
+ st->codec->palctrl = NULL;
+ } else if(st->codec->codec_type==CODEC_TYPE_AUDIO) {
+ int bits_per_sample;
+ uint16_t version = get_be16(pb);
+
+ st->codec->codec_id = id;
+ get_be16(pb); /* revision level */
+ get_be32(pb); /* vendor */
+
+ st->codec->channels = get_be16(pb); /* channel count */
+ dprintf("audio channels %d\n", st->codec->channels);
+ st->codec->bits_per_sample = get_be16(pb); /* sample size */
+ /* do we need to force to 16 for AMR ? */
+
+ /* handle specific s8 codec */
+ get_be16(pb); /* compression id = 0*/
+ get_be16(pb); /* packet size = 0 */
+
+ st->codec->sample_rate = ((get_be32(pb) >> 16));
+
+ switch (st->codec->codec_id) {
+ case CODEC_ID_PCM_S8:
+ case CODEC_ID_PCM_U8:
+ if (st->codec->bits_per_sample == 16)
+ st->codec->codec_id = CODEC_ID_PCM_S16BE;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ if (st->codec->bits_per_sample == 8)
+ st->codec->codec_id = CODEC_ID_PCM_S8;
+ else if (st->codec->bits_per_sample == 24)
+ st->codec->codec_id = CODEC_ID_PCM_S24BE;
+ break;
+ default:
+ break;
+ }
+
+ //Read QT version 1 fields. In version 0 theese dont exist
+ dprintf("version =%d, isom =%d\n",version,c->isom);
+ if(!c->isom) {
+ if(version==1) {
+ sc->sample_size_v1.den = get_be32(pb); /* samples per packet */
+ get_be32(pb); /* bytes per packet */
+ sc->sample_size_v1.num = get_be32(pb); /* bytes per frame */
+ get_be32(pb); /* bytes per sample */
+ } else if(version==2) {
+ get_be32(pb); /* sizeof struct only */
+ st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */
+ st->codec->channels = get_be32(pb);
+ get_be32(pb); /* always 0x7F000000 */
+ get_be32(pb); /* bits per channel if sound is uncompressed */
+ get_be32(pb); /* lcpm format specific flag */
+ get_be32(pb); /* bytes per audio packet if constant */
+ get_be32(pb); /* lpcm frames per audio packet if constant */
+ }
+ }
+
+ bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
+ if (bits_per_sample) {
+ st->codec->bits_per_sample = bits_per_sample;
+ sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
+ }
+ } else {
+ /* other codec type, just skip (rtp, mp4s, tmcd ...) */
+ url_fskip(pb, size - (url_ftell(pb) - start_pos));
+ }
+ /* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */
+ a.size = size - (url_ftell(pb) - start_pos);
+ if (a.size > 8)
+ mov_read_default(c, pb, a);
+ else if (a.size > 0)
+ url_fskip(pb, a.size);
+ }
+
+ if(st->codec->codec_type==CODEC_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) {
+ st->codec->sample_rate= sc->time_scale;
+ }
+
+ /* special codec parameters handling */
+ switch (st->codec->codec_id) {
+#ifdef CONFIG_H261_DECODER
+ case CODEC_ID_H261:
+#endif
+#ifdef CONFIG_H263_DECODER
+ case CODEC_ID_H263:
+#endif
+#ifdef CONFIG_MPEG4_DECODER
+ case CODEC_ID_MPEG4:
+#endif
+ st->codec->width= 0; /* let decoder init width/height */
+ st->codec->height= 0;
+ break;
+#ifdef CONFIG_FAAD
+ case CODEC_ID_AAC:
+#endif
+#ifdef CONFIG_VORBIS_DECODER
+ case CODEC_ID_VORBIS:
+#endif
+ case CODEC_ID_MP3ON4:
+ st->codec->sample_rate= 0; /* let decoder init parameters properly */
+ break;
+#ifdef CONFIG_DV_DEMUXER
+ case CODEC_ID_DVAUDIO:
+ c->dv_fctx = av_alloc_format_context();
+ c->dv_demux = dv_init_demux(c->dv_fctx);
+ if (!c->dv_demux) {
+ av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
+ return -1;
+ }
+ sc->dv_audio_container = 1;
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ break;
+#endif
+ /* no ifdef since parameters are always those */
+ case CODEC_ID_AMR_WB:
+ st->codec->sample_rate= 16000;
+ st->codec->channels= 1; /* really needed */
+ break;
+ case CODEC_ID_AMR_NB:
+ st->codec->sample_rate= 8000;
+ st->codec->channels= 1; /* really needed */
+ break;
+ case CODEC_ID_MP2:
+ st->codec->codec_type = CODEC_TYPE_AUDIO; /* force type after stsd for m1a hdlr */
+ st->need_parsing = 1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ if(entries >= UINT_MAX / sizeof(MOV_sample_to_chunk_tbl))
+ return -1;
+
+#ifdef DEBUG
+av_log(NULL, AV_LOG_DEBUG, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
+#endif
+ sc->sample_to_chunk_sz = entries;
+ sc->sample_to_chunk = av_malloc(entries * sizeof(MOV_sample_to_chunk_tbl));
+ if (!sc->sample_to_chunk)
+ return -1;
+ for(i=0; i<entries; i++) {
+ sc->sample_to_chunk[i].first = get_be32(pb);
+ sc->sample_to_chunk[i].count = get_be32(pb);
+ sc->sample_to_chunk[i].id = get_be32(pb);
+ }
+ return 0;
+}
+
+static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ if(entries >= UINT_MAX / sizeof(long))
+ return -1;
+
+ sc->keyframe_count = entries;
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "keyframe_count = %ld\n", sc->keyframe_count);
+#endif
+ sc->keyframes = av_malloc(entries * sizeof(long));
+ if (!sc->keyframes)
+ return -1;
+ for(i=0; i<entries; i++) {
+ sc->keyframes[i] = get_be32(pb);
+#ifdef DEBUG
+/* av_log(NULL, AV_LOG_DEBUG, "keyframes[]=%ld\n", sc->keyframes[i]); */
+#endif
+ }
+ return 0;
+}
+
+static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries, sample_size;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ sample_size = get_be32(pb);
+ if (!sc->sample_size) /* do not overwrite value computed in stsd */
+ sc->sample_size = sample_size;
+ entries = get_be32(pb);
+ if(entries >= UINT_MAX / sizeof(long))
+ return -1;
+
+ sc->sample_count = entries;
+ if (sample_size)
+ return 0;
+
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "sample_size = %ld sample_count = %ld\n", sc->sample_size, sc->sample_count);
+#endif
+ sc->sample_sizes = av_malloc(entries * sizeof(long));
+ if (!sc->sample_sizes)
+ return -1;
+ for(i=0; i<entries; i++) {
+ sc->sample_sizes[i] = get_be32(pb);
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "sample_sizes[]=%ld\n", sc->sample_sizes[i]);
+#endif
+ }
+ return 0;
+}
+
+static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+ int64_t duration=0;
+ int64_t total_sample_count=0;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+ entries = get_be32(pb);
+ if(entries >= UINT_MAX / sizeof(Time2Sample))
+ return -1;
+
+ sc->stts_count = entries;
+ sc->stts_data = av_malloc(entries * sizeof(Time2Sample));
+
+#ifdef DEBUG
+av_log(NULL, AV_LOG_DEBUG, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1, entries);
+#endif
+
+ sc->time_rate=0;
+
+ for(i=0; i<entries; i++) {
+ int sample_duration;
+ int sample_count;
+
+ sample_count=get_be32(pb);
+ sample_duration = get_be32(pb);
+ sc->stts_data[i].count= sample_count;
+ sc->stts_data[i].duration= sample_duration;
+
+ sc->time_rate= ff_gcd(sc->time_rate, sample_duration);
+
+ dprintf("sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
+
+ duration+=(int64_t)sample_duration*sample_count;
+ total_sample_count+=sample_count;
+ }
+
+ st->nb_frames= total_sample_count;
+ if(duration)
+ st->duration= duration;
+ return 0;
+}
+
+static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+ entries = get_be32(pb);
+ if(entries >= UINT_MAX / sizeof(Time2Sample))
+ return -1;
+
+ sc->ctts_count = entries;
+ sc->ctts_data = av_malloc(entries * sizeof(Time2Sample));
+
+ dprintf("track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
+
+ for(i=0; i<entries; i++) {
+ int count =get_be32(pb);
+ int duration =get_be32(pb);
+
+ if (duration < 0) {
+ av_log(c->fc, AV_LOG_ERROR, "negative ctts, ignoring\n");
+ sc->ctts_count = 0;
+ url_fskip(pb, 8 * (entries - i - 1));
+ break;
+ }
+ sc->ctts_data[i].count = count;
+ sc->ctts_data[i].duration= duration;
+
+ sc->time_rate= ff_gcd(sc->time_rate, duration);
+ }
+ return 0;
+}
+
+static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st;
+ MOVStreamContext *sc;
+
+ st = av_new_stream(c->fc, c->fc->nb_streams);
+ if (!st) return -2;
+ sc = av_mallocz(sizeof(MOVStreamContext));
+ if (!sc) {
+ av_free(st);
+ return -1;
+ }
+
+ st->priv_data = sc;
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->start_time = 0; /* XXX: check */
+ c->streams[c->fc->nb_streams-1] = sc;
+
+ return mov_read_default(c, pb, atom);
+}
+
+static int mov_read_tkhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ int version = get_byte(pb);
+
+ get_byte(pb); get_byte(pb);
+ get_byte(pb); /* flags */
+ /*
+ MOV_TRACK_ENABLED 0x0001
+ MOV_TRACK_IN_MOVIE 0x0002
+ MOV_TRACK_IN_PREVIEW 0x0004
+ MOV_TRACK_IN_POSTER 0x0008
+ */
+
+ if (version == 1) {
+ get_be64(pb);
+ get_be64(pb);
+ } else {
+ get_be32(pb); /* creation time */
+ get_be32(pb); /* modification time */
+ }
+ st->id = (int)get_be32(pb); /* track id (NOT 0 !)*/
+ get_be32(pb); /* reserved */
+ st->start_time = 0; /* check */
+ (version == 1) ? get_be64(pb) : get_be32(pb); /* highlevel (considering edits) duration in movie timebase */
+ get_be32(pb); /* reserved */
+ get_be32(pb); /* reserved */
+
+ get_be16(pb); /* layer */
+ get_be16(pb); /* alternate group */
+ get_be16(pb); /* volume */
+ get_be16(pb); /* reserved */
+
+ url_fskip(pb, 36); /* display matrix */
+
+ /* those are fixed-point */
+ get_be32(pb); /* track width */
+ get_be32(pb); /* track height */
+
+ return 0;
+}
+
+/* this atom should be null (from specs), but some buggy files put the 'moov' atom inside it... */
+/* like the files created with Adobe Premiere 5.0, for samples see */
+/* http://graphics.tudelft.nl/~wouter/publications/soundtests/ */
+static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int err;
+
+ if (atom.size < 8)
+ return 0; /* continue */
+ if (get_be32(pb) != 0) { /* 0 sized mdat atom... use the 'wide' atom size */
+ url_fskip(pb, atom.size - 4);
+ return 0;
+ }
+ atom.type = get_le32(pb);
+ atom.offset += 8;
+ atom.size -= 8;
+ if (atom.type != MKTAG('m', 'd', 'a', 't')) {
+ url_fskip(pb, atom.size);
+ return 0;
+ }
+ err = mov_read_mdat(c, pb, atom);
+ return err;
+}
+
+static int mov_read_cmov(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+#ifdef CONFIG_ZLIB
+ ByteIOContext ctx;
+ uint8_t *cmov_data;
+ uint8_t *moov_data; /* uncompressed data */
+ long cmov_len, moov_len;
+ int ret;
+
+ get_be32(pb); /* dcom atom */
+ if (get_le32(pb) != MKTAG( 'd', 'c', 'o', 'm' ))
+ return -1;
+ if (get_le32(pb) != MKTAG( 'z', 'l', 'i', 'b' )) {
+ av_log(NULL, AV_LOG_ERROR, "unknown compression for cmov atom !");
+ return -1;
+ }
+ get_be32(pb); /* cmvd atom */
+ if (get_le32(pb) != MKTAG( 'c', 'm', 'v', 'd' ))
+ return -1;
+ moov_len = get_be32(pb); /* uncompressed size */
+ cmov_len = atom.size - 6 * 4;
+
+ cmov_data = av_malloc(cmov_len);
+ if (!cmov_data)
+ return -1;
+ moov_data = av_malloc(moov_len);
+ if (!moov_data) {
+ av_free(cmov_data);
+ return -1;
+ }
+ get_buffer(pb, cmov_data, cmov_len);
+ if(uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK)
+ return -1;
+ if(init_put_byte(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0)
+ return -1;
+ atom.type = MKTAG( 'm', 'o', 'o', 'v' );
+ atom.offset = 0;
+ atom.size = moov_len;
+#ifdef DEBUG
+// { int fd = open("/tmp/uncompheader.mov", O_WRONLY | O_CREAT); write(fd, moov_data, moov_len); close(fd); }
+#endif
+ ret = mov_read_default(c, &ctx, atom);
+ av_free(moov_data);
+ av_free(cmov_data);
+ return ret;
+#else
+ av_log(c->fc, AV_LOG_ERROR, "this file requires zlib support compiled in\n");
+ return -1;
+#endif
+}
+
+/* edit list atom */
+static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int i, edit_count;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+ edit_count= c->streams[c->fc->nb_streams-1]->edit_count = get_be32(pb); /* entries */
+
+ for(i=0; i<edit_count; i++){
+ get_be32(pb); /* Track duration */
+ get_be32(pb); /* Media time */
+ get_be32(pb); /* Media rate */
+ }
+ dprintf("track[%i].edit_count = %i\n", c->fc->nb_streams-1, c->streams[c->fc->nb_streams-1]->edit_count);
+ return 0;
+}
+
+static const MOVParseTableEntry mov_default_parse_table[] = {
+/* mp4 atoms */
+{ MKTAG( 'c', 'o', '6', '4' ), mov_read_stco },
+{ MKTAG( 'c', 't', 't', 's' ), mov_read_ctts }, /* composition time to sample */
+{ MKTAG( 'e', 'd', 't', 's' ), mov_read_default },
+{ MKTAG( 'e', 'l', 's', 't' ), mov_read_elst },
+{ MKTAG( 'e', 'n', 'd', 'a' ), mov_read_enda },
+{ MKTAG( 'f', 't', 'y', 'p' ), mov_read_ftyp },
+{ MKTAG( 'h', 'd', 'l', 'r' ), mov_read_hdlr },
+{ MKTAG( 'j', 'p', '2', 'h' ), mov_read_jp2h },
+{ MKTAG( 'm', 'd', 'a', 't' ), mov_read_mdat },
+{ MKTAG( 'm', 'd', 'h', 'd' ), mov_read_mdhd },
+{ MKTAG( 'm', 'd', 'i', 'a' ), mov_read_default },
+{ MKTAG( 'm', 'i', 'n', 'f' ), mov_read_default },
+{ MKTAG( 'm', 'o', 'o', 'v' ), mov_read_moov },
+{ MKTAG( 'm', 'v', 'h', 'd' ), mov_read_mvhd },
+{ MKTAG( 'S', 'M', 'I', ' ' ), mov_read_smi }, /* Sorenson extension ??? */
+{ MKTAG( 'a', 'l', 'a', 'c' ), mov_read_alac }, /* alac specific atom */
+{ MKTAG( 'a', 'v', 'c', 'C' ), mov_read_avcC },
+{ MKTAG( 's', 't', 'b', 'l' ), mov_read_default },
+{ MKTAG( 's', 't', 'c', 'o' ), mov_read_stco },
+{ MKTAG( 's', 't', 's', 'c' ), mov_read_stsc },
+{ MKTAG( 's', 't', 's', 'd' ), mov_read_stsd }, /* sample description */
+{ MKTAG( 's', 't', 's', 's' ), mov_read_stss }, /* sync sample */
+{ MKTAG( 's', 't', 's', 'z' ), mov_read_stsz }, /* sample size */
+{ MKTAG( 's', 't', 't', 's' ), mov_read_stts },
+{ MKTAG( 't', 'k', 'h', 'd' ), mov_read_tkhd }, /* track header */
+{ MKTAG( 't', 'r', 'a', 'k' ), mov_read_trak },
+{ MKTAG( 'w', 'a', 'v', 'e' ), mov_read_wave },
+{ MKTAG( 'c', 't', 'a', 'b' ), mov_read_ctab },
+{ MKTAG( 'e', 's', 'd', 's' ), mov_read_esds },
+{ MKTAG( 'w', 'i', 'd', 'e' ), mov_read_wide }, /* place holder */
+{ MKTAG( 'c', 'm', 'o', 'v' ), mov_read_cmov },
+{ 0L, NULL }
+};
+
+static void mov_free_stream_context(MOVStreamContext *sc)
+{
+ if(sc) {
+ av_freep(&sc->ctts_data);
+ av_freep(&sc);
+ }
+}
+
+/* XXX: is it sufficient ? */
+static int mov_probe(AVProbeData *p)
+{
+ unsigned int offset;
+ uint32_t tag;
+ int score = 0;
+
+ /* check file header */
+ if (p->buf_size <= 12)
+ return 0;
+ offset = 0;
+ for(;;) {
+ /* ignore invalid offset */
+ if ((offset + 8) > (unsigned int)p->buf_size)
+ return score;
+ tag = LE_32(p->buf + offset + 4);
+ switch(tag) {
+ /* check for obvious tags */
+ case MKTAG( 'j', 'P', ' ', ' ' ): /* jpeg 2000 signature */
+ case MKTAG( 'm', 'o', 'o', 'v' ):
+ case MKTAG( 'm', 'd', 'a', 't' ):
+ case MKTAG( 'p', 'n', 'o', 't' ): /* detect movs with preview pics like ew.mov and april.mov */
+ case MKTAG( 'u', 'd', 't', 'a' ): /* Packet Video PVAuthor adds this and a lot of more junk */
+ return AVPROBE_SCORE_MAX;
+ /* those are more common words, so rate then a bit less */
+ case MKTAG( 'w', 'i', 'd', 'e' ):
+ case MKTAG( 'f', 'r', 'e', 'e' ):
+ case MKTAG( 'j', 'u', 'n', 'k' ):
+ case MKTAG( 'p', 'i', 'c', 't' ):
+ return AVPROBE_SCORE_MAX - 5;
+ case MKTAG( 'f', 't', 'y', 'p' ):
+ case MKTAG( 's', 'k', 'i', 'p' ):
+ case MKTAG( 'u', 'u', 'i', 'd' ):
+ offset = BE_32(p->buf+offset) + offset;
+ /* if we only find those cause probedata is too small at least rate them */
+ score = AVPROBE_SCORE_MAX - 50;
+ break;
+ default:
+ /* unrecognized tag */
+ return score;
+ }
+ }
+ return score;
+}
+
+static void mov_build_index(MOVContext *mov, AVStream *st)
+{
+ MOVStreamContext *sc = st->priv_data;
+ offset_t current_offset;
+ int64_t current_dts = 0;
+ int stts_index = 0;
+ int stsc_index = 0;
+ int stss_index = 0;
+ int i, j, k;
+
+ if (sc->sample_sizes || st->codec->codec_type == CODEC_TYPE_VIDEO || sc->dv_audio_container) {
+ int keyframe, sample_size;
+ int current_sample = 0;
+ int stts_sample = 0;
+ int distance = 0;
+
+ st->nb_frames = sc->sample_count;
+ for (i = 0; i < sc->chunk_count; i++) {
+ current_offset = sc->chunk_offsets[i];
+ if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
+ stsc_index++;
+ for (j = 0; j < sc->sample_to_chunk[stsc_index].count; j++) {
+ keyframe = !sc->keyframe_count || current_sample + 1 == sc->keyframes[stss_index];
+ if (keyframe) {
+ distance = 0;
+ if (stss_index + 1 < sc->keyframe_count)
+ stss_index++;
+ }
+ sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample];
+ dprintf("AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", size %d, distance %d, keyframe %d\n",
+ st->index, current_sample, current_offset, current_dts, sample_size, distance, keyframe);
+ av_add_index_entry(st, current_offset, current_dts, sample_size, distance, keyframe ? AVINDEX_KEYFRAME : 0);
+ current_offset += sample_size;
+ assert(sc->stts_data[stts_index].duration % sc->time_rate == 0);
+ current_dts += sc->stts_data[stts_index].duration / sc->time_rate;
+ distance++;
+ stts_sample++;
+ if (current_sample + 1 < sc->sample_count)
+ current_sample++;
+ if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) {
+ stts_sample = 0;
+ stts_index++;
+ }
+ }
+ }
+ } else { /* read whole chunk */
+ int chunk_samples, chunk_size, chunk_duration;
+
+ for (i = 0; i < sc->chunk_count; i++) {
+ current_offset = sc->chunk_offsets[i];
+ if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
+ stsc_index++;
+ chunk_samples = sc->sample_to_chunk[stsc_index].count;
+ /* get chunk size */
+ if (sc->sample_size > 1 || st->codec->codec_id == CODEC_ID_PCM_U8 || st->codec->codec_id == CODEC_ID_PCM_S8)
+ chunk_size = chunk_samples * sc->sample_size;
+ else if (sc->sample_size_v1.den > 0 && (chunk_samples * sc->sample_size_v1.num % sc->sample_size_v1.den == 0))
+ chunk_size = chunk_samples * sc->sample_size_v1.num / sc->sample_size_v1.den;
+ else { /* workaround to find nearest next chunk offset */
+ chunk_size = INT_MAX;
+ for (j = 0; j < mov->total_streams; j++) {
+ MOVStreamContext *msc = mov->streams[j];
+
+ for (k = msc->next_chunk; k < msc->chunk_count; k++) {
+ if (msc->chunk_offsets[k] > current_offset && msc->chunk_offsets[k] - current_offset < chunk_size) {
+ chunk_size = msc->chunk_offsets[k] - current_offset;
+ msc->next_chunk = k;
+ break;
+ }
+ }
+ }
+ /* check for last chunk */
+ if (chunk_size == INT_MAX)
+ for (j = 0; j < mov->mdat_count; j++) {
+ dprintf("mdat %d, offset %"PRIx64", size %"PRId64", current offset %"PRIx64"\n",
+ j, mov->mdat_list[j].offset, mov->mdat_list[j].size, current_offset);
+ if (mov->mdat_list[j].offset <= current_offset && mov->mdat_list[j].offset + mov->mdat_list[j].size > current_offset)
+ chunk_size = mov->mdat_list[j].offset + mov->mdat_list[j].size - current_offset;
+ }
+ assert(chunk_size != INT_MAX);
+ for (j = 0; j < mov->total_streams; j++) {
+ mov->streams[j]->next_chunk = 0;
+ }
+ }
+ av_add_index_entry(st, current_offset, current_dts, chunk_size, 0, AVINDEX_KEYFRAME);
+ /* get chunk duration */
+ chunk_duration = 0;
+ while (chunk_samples > 0) {
+ if (chunk_samples < sc->stts_data[stts_index].count) {
+ chunk_duration += sc->stts_data[stts_index].duration * chunk_samples;
+ sc->stts_data[stts_index].count -= chunk_samples;
+ break;
+ } else {
+ chunk_duration += sc->stts_data[stts_index].duration * chunk_samples;
+ chunk_samples -= sc->stts_data[stts_index].count;
+ if (stts_index + 1 < sc->stts_count) {
+ stts_index++;
+ }
+ }
+ }
+ dprintf("AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", size %d, duration %d\n",
+ st->index, i, current_offset, current_dts, chunk_size, chunk_duration);
+ assert(chunk_duration % sc->time_rate == 0);
+ current_dts += chunk_duration / sc->time_rate;
+ }
+ }
+ /* adjust sample count to avindex entries */
+ sc->sample_count = st->nb_index_entries;
+}
+
+static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MOVContext *mov = (MOVContext *) s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int i, err;
+ MOV_atom_t atom = { 0, 0, 0 };
+
+ mov->fc = s;
+ mov->parse_table = mov_default_parse_table;
+
+ if(!url_is_streamed(pb)) /* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */
+ atom.size = url_fsize(pb);
+ else
+ atom.size = 0x7FFFFFFFFFFFFFFFLL;
+
+ /* check MOV header */
+ err = mov_read_default(mov, pb, atom);
+ if (err<0 || (!mov->found_moov && !mov->found_mdat)) {
+ av_log(s, AV_LOG_ERROR, "mov: header not found !!! (err:%d, moov:%d, mdat:%d) pos:%"PRId64"\n",
+ err, mov->found_moov, mov->found_mdat, url_ftell(pb));
+ return -1;
+ }
+ dprintf("on_parse_exit_offset=%d\n", (int) url_ftell(pb));
+
+ /* some cleanup : make sure we are on the mdat atom */
+ if(!url_is_streamed(pb) && (url_ftell(pb) != mov->mdat_offset))
+ url_fseek(pb, mov->mdat_offset, SEEK_SET);
+
+ mov->total_streams = s->nb_streams;
+
+ for(i=0; i<mov->total_streams; i++) {
+ MOVStreamContext *sc = mov->streams[i];
+
+ if(!sc->time_rate)
+ sc->time_rate=1;
+ if(!sc->time_scale)
+ sc->time_scale= mov->time_scale;
+ av_set_pts_info(s->streams[i], 64, sc->time_rate, sc->time_scale);
+
+ if(s->streams[i]->duration != AV_NOPTS_VALUE){
+ assert(s->streams[i]->duration % sc->time_rate == 0);
+ s->streams[i]->duration /= sc->time_rate;
+ }
+ sc->ffindex = i;
+ mov_build_index(mov, s->streams[i]);
+ }
+
+ for(i=0; i<mov->total_streams; i++) {
+ /* dont need those anymore */
+ av_freep(&mov->streams[i]->chunk_offsets);
+ av_freep(&mov->streams[i]->sample_to_chunk);
+ av_freep(&mov->streams[i]->sample_sizes);
+ av_freep(&mov->streams[i]->keyframes);
+ av_freep(&mov->streams[i]->stts_data);
+ }
+ av_freep(&mov->mdat_list);
+ return 0;
+}
+
+static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MOVContext *mov = s->priv_data;
+ MOVStreamContext *sc = 0;
+ AVIndexEntry *sample = 0;
+ int64_t best_dts = INT64_MAX;
+ int i;
+
+ for (i = 0; i < mov->total_streams; i++) {
+ MOVStreamContext *msc = mov->streams[i];
+
+ if (s->streams[i]->discard != AVDISCARD_ALL && msc->current_sample < msc->sample_count) {
+ AVIndexEntry *current_sample = &s->streams[i]->index_entries[msc->current_sample];
+ int64_t dts = av_rescale(current_sample->timestamp * (int64_t)msc->time_rate, AV_TIME_BASE, msc->time_scale);
+
+ dprintf("stream %d, sample %ld, dts %"PRId64"\n", i, msc->current_sample, dts);
+ if (dts < best_dts) {
+ sample = current_sample;
+ best_dts = dts;
+ sc = msc;
+ }
+ }
+ }
+ if (!sample)
+ return -1;
+ /* must be done just before reading, to avoid infinite loop on sample */
+ sc->current_sample++;
+ if (sample->pos >= url_fsize(&s->pb)) {
+ av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n", sc->ffindex, sample->pos);
+ return -1;
+ }
+#ifdef CONFIG_DV_DEMUXER
+ if (sc->dv_audio_container) {
+ dv_get_packet(mov->dv_demux, pkt);
+ dprintf("dv audio pkt size %d\n", pkt->size);
+ } else {
+#endif
+ url_fseek(&s->pb, sample->pos, SEEK_SET);
+ av_get_packet(&s->pb, pkt, sample->size);
+#ifdef CONFIG_DV_DEMUXER
+ if (mov->dv_demux) {
+ void *pkt_destruct_func = pkt->destruct;
+ dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
+ pkt->destruct = pkt_destruct_func;
+ }
+ }
+#endif
+ pkt->stream_index = sc->ffindex;
+ pkt->dts = sample->timestamp;
+ if (sc->ctts_data) {
+ assert(sc->ctts_data[sc->sample_to_ctime_index].duration % sc->time_rate == 0);
+ pkt->pts = pkt->dts + sc->ctts_data[sc->sample_to_ctime_index].duration / sc->time_rate;
+ /* update ctts context */
+ sc->sample_to_ctime_sample++;
+ if (sc->sample_to_ctime_index < sc->ctts_count && sc->ctts_data[sc->sample_to_ctime_index].count == sc->sample_to_ctime_sample) {
+ sc->sample_to_ctime_index++;
+ sc->sample_to_ctime_sample = 0;
+ }
+ } else {
+ pkt->pts = pkt->dts;
+ }
+ pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0;
+ pkt->pos = sample->pos;
+ dprintf("stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration);
+ return 0;
+}
+
+static int mov_seek_stream(AVStream *st, int64_t timestamp, int flags)
+{
+ MOVStreamContext *sc = st->priv_data;
+ int sample, time_sample;
+ int i;
+
+ sample = av_index_search_timestamp(st, timestamp, flags);
+ dprintf("stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
+ if (sample < 0) /* not sure what to do */
+ return -1;
+ sc->current_sample = sample;
+ dprintf("stream %d, found sample %ld\n", st->index, sc->current_sample);
+ /* adjust ctts index */
+ if (sc->ctts_data) {
+ time_sample = 0;
+ for (i = 0; i < sc->ctts_count; i++) {
+ time_sample += sc->ctts_data[i].count;
+ if (time_sample >= sc->current_sample) {
+ sc->sample_to_ctime_index = i;
+ sc->sample_to_ctime_sample = time_sample - sc->current_sample;
+ break;
+ }
+ }
+ }
+ return sample;
+}
+
+static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
+{
+ AVStream *st;
+ int64_t seek_timestamp, timestamp;
+ int sample;
+ int i;
+
+ if (stream_index >= s->nb_streams)
+ return -1;
+
+ st = s->streams[stream_index];
+ sample = mov_seek_stream(st, sample_time, flags);
+ if (sample < 0)
+ return -1;
+
+ /* adjust seek timestamp to found sample timestamp */
+ seek_timestamp = st->index_entries[sample].timestamp;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (stream_index == i || st->discard == AVDISCARD_ALL)
+ continue;
+
+ timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base);
+ mov_seek_stream(st, timestamp, flags);
+ }
+ return 0;
+}
+
+static int mov_read_close(AVFormatContext *s)
+{
+ int i;
+ MOVContext *mov = (MOVContext *) s->priv_data;
+ for(i=0; i<mov->total_streams; i++)
+ mov_free_stream_context(mov->streams[i]);
+ /* free color tabs */
+ for(i=0; i<mov->ctab_size; i++)
+ av_freep(&mov->ctab[i]);
+ if(mov->dv_demux){
+ for(i=0; i<mov->dv_fctx->nb_streams; i++){
+ av_freep(&mov->dv_fctx->streams[i]->codec);
+ av_freep(&mov->dv_fctx->streams[i]);
+ }
+ av_freep(&mov->dv_fctx);
+ av_freep(&mov->dv_demux);
+ }
+ av_freep(&mov->ctab);
+ return 0;
+}
+
+AVInputFormat mov_demuxer = {
+ "mov,mp4,m4a,3gp,3g2,mj2",
+ "QuickTime/MPEG4/Motion JPEG 2000 format",
+ sizeof(MOVContext),
+ mov_probe,
+ mov_read_header,
+ mov_read_packet,
+ mov_read_close,
+ mov_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/movenc.c b/contrib/ffmpeg/libavformat/movenc.c
new file mode 100644
index 000000000..736d1594a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/movenc.c
@@ -0,0 +1,1724 @@
+/*
+ * MOV, 3GP, MP4 muxer
+ * Copyright (c) 2003 Thomas Raivio.
+ * Copyright (c) 2004 Gildas Bazin <gbazin at videolan dot org>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+#include "avio.h"
+#include "isom.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define MOV_INDEX_CLUSTER_SIZE 16384
+#define globalTimescale 1000
+
+#define MODE_MP4 0
+#define MODE_MOV 1
+#define MODE_3GP 2
+#define MODE_PSP 3 // example working PSP command line:
+// ffmpeg -i testinput.avi -f psp -r 14.985 -s 320x240 -b 768 -ar 24000 -ab 32 M4V00001.MP4
+#define MODE_3G2 4
+
+typedef struct MOVIentry {
+ unsigned int flags, size;
+ uint64_t pos;
+ unsigned int samplesInChunk;
+ char key_frame;
+ unsigned int entries;
+ int64_t cts;
+ int64_t dts;
+} MOVIentry;
+
+typedef struct MOVIndex {
+ int mode;
+ int entry;
+ long timescale;
+ long time;
+ int64_t trackDuration;
+ long sampleCount;
+ long sampleDuration;
+ long sampleSize;
+ int hasKeyframes;
+ int hasBframes;
+ int language;
+ int trackID;
+ int tag;
+ AVCodecContext *enc;
+
+ int vosLen;
+ uint8_t *vosData;
+ MOVIentry *cluster;
+ int audio_vbr;
+} MOVTrack;
+
+typedef struct MOVContext {
+ int mode;
+ int64_t time;
+ int nb_streams;
+ offset_t mdat_pos;
+ uint64_t mdat_size;
+ long timescale;
+ MOVTrack tracks[MAX_STREAMS];
+} MOVContext;
+
+//FIXME supprt 64bit varaint with wide placeholders
+static offset_t updateSize (ByteIOContext *pb, offset_t pos)
+{
+ offset_t curpos = url_ftell(pb);
+ url_fseek(pb, pos, SEEK_SET);
+ put_be32(pb, curpos - pos); /* rewrite size */
+ url_fseek(pb, curpos, SEEK_SET);
+
+ return curpos - pos;
+}
+
+/* Chunk offset atom */
+static int mov_write_stco_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int i;
+ int mode64 = 0; // use 32 bit size variant if possible
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ if (pos > UINT32_MAX) {
+ mode64 = 1;
+ put_tag(pb, "co64");
+ } else
+ put_tag(pb, "stco");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, track->entry); /* entry count */
+ for (i=0; i<track->entry; i++) {
+ if(mode64 == 1)
+ put_be64(pb, track->cluster[i].pos);
+ else
+ put_be32(pb, track->cluster[i].pos);
+ }
+ return updateSize (pb, pos);
+}
+
+/* Sample size atom */
+static int mov_write_stsz_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int equalChunks = 1;
+ int i, j, entries = 0, tst = -1, oldtst = -1;
+
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stsz");
+ put_be32(pb, 0); /* version & flags */
+
+ for (i=0; i<track->entry; i++) {
+ tst = track->cluster[i].size/track->cluster[i].entries;
+ if(oldtst != -1 && tst != oldtst) {
+ equalChunks = 0;
+ }
+ oldtst = tst;
+ entries += track->cluster[i].entries;
+ }
+ if (equalChunks) {
+ int sSize = track->cluster[0].size/track->cluster[0].entries;
+ put_be32(pb, sSize); // sample size
+ put_be32(pb, entries); // sample count
+ }
+ else {
+ put_be32(pb, 0); // sample size
+ put_be32(pb, entries); // sample count
+ for (i=0; i<track->entry; i++) {
+ for ( j=0; j<track->cluster[i].entries; j++) {
+ put_be32(pb, track->cluster[i].size /
+ track->cluster[i].entries);
+ }
+ }
+ }
+ return updateSize (pb, pos);
+}
+
+/* Sample to chunk atom */
+static int mov_write_stsc_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int index = 0, oldval = -1, i;
+ offset_t entryPos, curpos;
+
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stsc");
+ put_be32(pb, 0); // version & flags
+ entryPos = url_ftell(pb);
+ put_be32(pb, track->entry); // entry count
+ for (i=0; i<track->entry; i++) {
+ if(oldval != track->cluster[i].samplesInChunk)
+ {
+ put_be32(pb, i+1); // first chunk
+ put_be32(pb, track->cluster[i].samplesInChunk); // samples per chunk
+ put_be32(pb, 0x1); // sample description index
+ oldval = track->cluster[i].samplesInChunk;
+ index++;
+ }
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, entryPos, SEEK_SET);
+ put_be32(pb, index); // rewrite size
+ url_fseek(pb, curpos, SEEK_SET);
+
+ return updateSize (pb, pos);
+}
+
+/* Sync sample atom */
+static int mov_write_stss_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t curpos, entryPos;
+ int i, index = 0;
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); // size
+ put_tag(pb, "stss");
+ put_be32(pb, 0); // version & flags
+ entryPos = url_ftell(pb);
+ put_be32(pb, track->entry); // entry count
+ for (i=0; i<track->entry; i++) {
+ if(track->cluster[i].key_frame == 1) {
+ put_be32(pb, i+1);
+ index++;
+ }
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, entryPos, SEEK_SET);
+ put_be32(pb, index); // rewrite size
+ url_fseek(pb, curpos, SEEK_SET);
+ return updateSize (pb, pos);
+}
+
+static int mov_write_amr_tag(ByteIOContext *pb, MOVTrack *track)
+{
+ put_be32(pb, 0x11); /* size */
+ if (track->mode == MODE_MOV) put_tag(pb, "samr");
+ else put_tag(pb, "damr");
+ put_tag(pb, "FFMP");
+ put_byte(pb, 0); /* decoder version */
+
+ put_be16(pb, 0x81FF); /* Mode set (all modes for AMR_NB) */
+ put_byte(pb, 0x00); /* Mode change period (no restriction) */
+ put_byte(pb, 0x01); /* Frames per sample */
+ return 0x11;
+}
+
+static int mov_write_enda_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 10);
+ put_tag(pb, "enda");
+ put_be16(pb, 1); /* little endian */
+ return 10;
+}
+
+static unsigned int descrLength(unsigned int len)
+{
+ int i;
+ for(i=1; len>>(7*i); i++);
+ return len + 1 + i;
+}
+
+static void putDescr(ByteIOContext *pb, int tag, unsigned int size)
+{
+ int i= descrLength(size) - size - 2;
+ put_byte(pb, tag);
+ for(; i>0; i--)
+ put_byte(pb, (size>>(7*i)) | 0x80);
+ put_byte(pb, size & 0x7F);
+}
+
+static int mov_write_esds_tag(ByteIOContext *pb, MOVTrack* track) // Basic
+{
+ offset_t pos = url_ftell(pb);
+ int decoderSpecificInfoLen = track->vosLen ? descrLength(track->vosLen):0;
+
+ put_be32(pb, 0); // size
+ put_tag(pb, "esds");
+ put_be32(pb, 0); // Version
+
+ // ES descriptor
+ putDescr(pb, 0x03, 3 + descrLength(13 + decoderSpecificInfoLen) +
+ descrLength(1));
+ put_be16(pb, track->trackID);
+ put_byte(pb, 0x00); // flags (= no flags)
+
+ // DecoderConfig descriptor
+ putDescr(pb, 0x04, 13 + decoderSpecificInfoLen);
+
+ // Object type indication
+ put_byte(pb, codec_get_tag(ff_mov_obj_type, track->enc->codec_id));
+
+ // the following fields is made of 6 bits to identify the streamtype (4 for video, 5 for audio)
+ // plus 1 bit to indicate upstream and 1 bit set to 1 (reserved)
+ if(track->enc->codec_type == CODEC_TYPE_AUDIO)
+ put_byte(pb, 0x15); // flags (= Audiostream)
+ else
+ put_byte(pb, 0x11); // flags (= Visualstream)
+
+ put_byte(pb, track->enc->rc_buffer_size>>(3+16)); // Buffersize DB (24 bits)
+ put_be16(pb, (track->enc->rc_buffer_size>>3)&0xFFFF); // Buffersize DB
+
+ put_be32(pb, FFMAX(track->enc->bit_rate, track->enc->rc_max_rate)); // maxbitrate (FIXME should be max rate in any 1 sec window)
+ if(track->enc->rc_max_rate != track->enc->rc_min_rate || track->enc->rc_min_rate==0)
+ put_be32(pb, 0); // vbr
+ else
+ put_be32(pb, track->enc->rc_max_rate); // avg bitrate
+
+ if (track->vosLen)
+ {
+ // DecoderSpecific info descriptor
+ putDescr(pb, 0x05, track->vosLen);
+ put_buffer(pb, track->vosData, track->vosLen);
+ }
+
+
+ // SL descriptor
+ putDescr(pb, 0x06, 1);
+ put_byte(pb, 0x02);
+ return updateSize (pb, pos);
+}
+
+static int mov_write_wave_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "wave");
+
+ put_be32(pb, 12); /* size */
+ put_tag(pb, "frma");
+ put_le32(pb, track->tag);
+
+ if (track->enc->codec_id == CODEC_ID_AAC) {
+ /* useless atom needed by mplayer, ipod, not needed by quicktime */
+ put_be32(pb, 12); /* size */
+ put_tag(pb, "mp4a");
+ put_be32(pb, 0);
+ mov_write_esds_tag(pb, track);
+ } else if (track->enc->codec_id == CODEC_ID_PCM_S24LE ||
+ track->enc->codec_id == CODEC_ID_PCM_S32LE) {
+ mov_write_enda_tag(pb);
+ } else if (track->enc->codec_id == CODEC_ID_AMR_NB) {
+ mov_write_amr_tag(pb, track);
+ }
+
+ put_be32(pb, 8); /* size */
+ put_be32(pb, 0); /* null tag */
+
+ return updateSize (pb, pos);
+}
+
+static const CodecTag codec_movaudio_tags[] = {
+ { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') },
+ { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') },
+ { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') },
+ { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') },
+ { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') },
+ { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') },
+ { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') },
+ { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') },
+ { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') },
+ { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') },
+ { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_PCM_S32LE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') },
+ { CODEC_ID_NONE, 0 },
+};
+
+static int mov_write_audio_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ int version = track->mode == MODE_MOV &&
+ (track->audio_vbr ||
+ track->enc->codec_id == CODEC_ID_PCM_S32LE ||
+ track->enc->codec_id == CODEC_ID_PCM_S24LE);
+
+ put_be32(pb, 0); /* size */
+ put_le32(pb, track->tag); // store it byteswapped
+ put_be32(pb, 0); /* Reserved */
+ put_be16(pb, 0); /* Reserved */
+ put_be16(pb, 1); /* Data-reference index, XXX == 1 */
+
+ /* SoundDescription */
+ put_be16(pb, version); /* Version */
+ put_be16(pb, 0); /* Revision level */
+ put_be32(pb, 0); /* Reserved */
+
+ put_be16(pb, track->mode == MODE_MOV ? track->enc->channels : 2); /* Number of channels */
+ /* FIXME 8 bit for 'raw ' in mov */
+ put_be16(pb, 16); /* Reserved */
+
+ put_be16(pb, track->mode == MODE_MOV && track->audio_vbr ? -2 : 0); /* compression ID */
+ put_be16(pb, 0); /* packet size (= 0) */
+ put_be16(pb, track->timescale); /* Time scale */
+ put_be16(pb, 0); /* Reserved */
+
+ if(version == 1) { /* SoundDescription V1 extended info */
+ put_be32(pb, track->enc->frame_size); /* Samples per packet */
+ put_be32(pb, track->sampleSize / track->enc->channels); /* Bytes per packet */
+ put_be32(pb, track->sampleSize); /* Bytes per frame */
+ put_be32(pb, 2); /* Bytes per sample */
+ }
+
+ if(track->mode == MODE_MOV &&
+ (track->enc->codec_id == CODEC_ID_AAC ||
+ track->enc->codec_id == CODEC_ID_AMR_NB ||
+ track->enc->codec_id == CODEC_ID_PCM_S24LE ||
+ track->enc->codec_id == CODEC_ID_PCM_S32LE))
+ mov_write_wave_tag(pb, track);
+ else if(track->enc->codec_id == CODEC_ID_AAC)
+ mov_write_esds_tag(pb, track);
+ else if(track->enc->codec_id == CODEC_ID_AMR_NB)
+ mov_write_amr_tag(pb, track);
+
+ return updateSize (pb, pos);
+}
+
+static int mov_write_d263_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 0xf); /* size */
+ put_tag(pb, "d263");
+ put_tag(pb, "FFMP");
+ put_byte(pb, 0); /* decoder version */
+ /* FIXME use AVCodecContext level/profile, when encoder will set values */
+ put_byte(pb, 0xa); /* level */
+ put_byte(pb, 0); /* profile */
+ return 0xf;
+}
+
+/* TODO: No idea about these values */
+static int mov_write_svq3_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 0x15);
+ put_tag(pb, "SMI ");
+ put_tag(pb, "SEQH");
+ put_be32(pb, 0x5);
+ put_be32(pb, 0xe2c0211d);
+ put_be32(pb, 0xc0000000);
+ put_byte(pb, 0);
+ return 0x15;
+}
+
+static uint8_t *avc_find_startcode( uint8_t *p, uint8_t *end )
+{
+ uint8_t *a = p + 4 - ((int)p & 3);
+
+ for( end -= 3; p < a && p < end; p++ ) {
+ if( p[0] == 0 && p[1] == 0 && p[2] == 1 )
+ return p;
+ }
+
+ for( end -= 3; p < end; p += 4 ) {
+ uint32_t x = *(uint32_t*)p;
+// if( (x - 0x01000100) & (~x) & 0x80008000 ) // little endian
+// if( (x - 0x00010001) & (~x) & 0x00800080 ) // big endian
+ if( (x - 0x01010101) & (~x) & 0x80808080 ) { // generic
+ if( p[1] == 0 ) {
+ if( p[0] == 0 && p[2] == 1 )
+ return p-1;
+ if( p[2] == 0 && p[3] == 1 )
+ return p;
+ }
+ if( p[3] == 0 ) {
+ if( p[2] == 0 && p[4] == 1 )
+ return p+1;
+ if( p[4] == 0 && p[5] == 1 )
+ return p+2;
+ }
+ }
+ }
+
+ for( end += 3; p < end; p++ ) {
+ if( p[0] == 0 && p[1] == 0 && p[2] == 1 )
+ return p;
+ }
+
+ return end + 3;
+}
+
+static void avc_parse_nal_units(uint8_t **buf, int *size)
+{
+ ByteIOContext pb;
+ uint8_t *p = *buf;
+ uint8_t *end = p + *size;
+ uint8_t *nal_start, *nal_end;
+
+ url_open_dyn_buf(&pb);
+ nal_start = avc_find_startcode(p, end);
+ while (nal_start < end) {
+ while(!*(nal_start++));
+ nal_end = avc_find_startcode(nal_start, end);
+ put_be32(&pb, nal_end - nal_start);
+ put_buffer(&pb, nal_start, nal_end - nal_start);
+ nal_start = nal_end;
+ }
+ av_freep(buf);
+ *size = url_close_dyn_buf(&pb, buf);
+}
+
+static int mov_write_avcc_tag(ByteIOContext *pb, MOVTrack *track)
+{
+ offset_t pos = url_ftell(pb);
+
+ put_be32(pb, 0);
+ put_tag(pb, "avcC");
+ if (track->vosLen > 6) {
+ /* check for h264 start code */
+ if (BE_32(track->vosData) == 0x00000001) {
+ uint8_t *buf, *end;
+ uint32_t sps_size=0, pps_size=0;
+ uint8_t *sps=0, *pps=0;
+
+ avc_parse_nal_units(&track->vosData, &track->vosLen);
+ buf = track->vosData;
+ end = track->vosData + track->vosLen;
+
+ /* look for sps and pps */
+ while (buf < end) {
+ unsigned int size;
+ uint8_t nal_type;
+ size = BE_32(buf);
+ nal_type = buf[4] & 0x1f;
+ if (nal_type == 7) { /* SPS */
+ sps = buf + 4;
+ sps_size = size;
+ } else if (nal_type == 8) { /* PPS */
+ pps = buf + 4;
+ pps_size = size;
+ }
+ buf += size + 4;
+ }
+ assert(sps);
+ assert(pps);
+
+ put_byte(pb, 1); /* version */
+ put_byte(pb, sps[1]); /* profile */
+ put_byte(pb, sps[2]); /* profile compat */
+ put_byte(pb, sps[3]); /* level */
+ put_byte(pb, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 1 (11) */
+ put_byte(pb, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
+
+ put_be16(pb, sps_size);
+ put_buffer(pb, sps, sps_size);
+ put_byte(pb, 1); /* number of pps */
+ put_be16(pb, pps_size);
+ put_buffer(pb, pps, pps_size);
+ } else {
+ put_buffer(pb, track->vosData, track->vosLen);
+ }
+ }
+ return updateSize(pb, pos);
+}
+
+static const CodecTag codec_movvideo_tags[] = {
+ { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') },
+ { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') },
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_H263, MKTAG('h', '2', '6', '3') },
+ { CODEC_ID_H263, MKTAG('s', '2', '6', '3') },
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
+ /* special handling in mov_find_video_codec_tag */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL */
+ { CODEC_ID_NONE, 0 },
+};
+
+static int mov_find_video_codec_tag(AVFormatContext *s, MOVTrack *track)
+{
+ int tag = track->enc->codec_tag;
+ if (!tag) {
+ if (track->enc->codec_id == CODEC_ID_DVVIDEO) {
+ if (track->enc->height == 480) { /* NTSC */
+ if (track->enc->pix_fmt == PIX_FMT_YUV422P)
+ tag = MKTAG('d', 'v', '5', 'n');
+ else
+ tag = MKTAG('d', 'v', 'c', ' ');
+ } else { /* assume PAL */
+ if (track->enc->pix_fmt == PIX_FMT_YUV422P)
+ tag = MKTAG('d', 'v', '5', 'p');
+ else if (track->enc->pix_fmt == PIX_FMT_YUV420P)
+ tag = MKTAG('d', 'v', 'c', 'p');
+ else
+ tag = MKTAG('d', 'v', 'p', 'p');
+ }
+ } else if (track->enc->codec_id == CODEC_ID_H263) {
+ if (track->mode == MODE_MOV)
+ tag = MKTAG('h', '2', '6', '3');
+ else
+ tag = MKTAG('s', '2', '6', '3');
+ } else {
+ tag = codec_get_tag(codec_movvideo_tags, track->enc->codec_id);
+ }
+ }
+ // if no mac fcc found, try with Microsoft tags
+ if (!tag) {
+ tag = codec_get_tag(codec_bmp_tags, track->enc->codec_id);
+ if (tag) {
+ av_log(s, AV_LOG_INFO, "Warning, using MS style video codec tag, the file may be unplayable!\n");
+ }
+ }
+ assert(tag);
+ return tag;
+}
+
+static int mov_find_audio_codec_tag(AVFormatContext *s, MOVTrack *track)
+{
+ int tag = track->enc->codec_tag;
+ if (!tag) {
+ tag = codec_get_tag(codec_movaudio_tags, track->enc->codec_id);
+ }
+ // if no mac fcc found, try with Microsoft tags
+ if (!tag) {
+ int ms_tag = codec_get_tag(codec_wav_tags, track->enc->codec_id);
+ if (ms_tag) {
+ tag = MKTAG('m', 's', ((ms_tag >> 8) & 0xff), (ms_tag & 0xff));
+ av_log(s, AV_LOG_INFO, "Warning, using MS style audio codec tag, the file may be unplayable!\n");
+ }
+ }
+ assert(tag);
+ return tag;
+}
+
+static int mov_write_video_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ char compressor_name[32];
+
+ put_be32(pb, 0); /* size */
+ put_le32(pb, track->tag); // store it byteswapped
+ put_be32(pb, 0); /* Reserved */
+ put_be16(pb, 0); /* Reserved */
+ put_be16(pb, 1); /* Data-reference index */
+
+ put_be16(pb, 0); /* Codec stream version */
+ put_be16(pb, 0); /* Codec stream revision (=0) */
+ if (track->mode == MODE_MOV) {
+ put_tag(pb, "FFMP"); /* Vendor */
+ if(track->enc->codec_id == CODEC_ID_RAWVIDEO) {
+ put_be32(pb, 0); /* Temporal Quality */
+ put_be32(pb, 0x400); /* Spatial Quality = lossless*/
+ } else {
+ put_be32(pb, 0x200); /* Temporal Quality = normal */
+ put_be32(pb, 0x200); /* Spatial Quality = normal */
+ }
+ } else {
+ put_be32(pb, 0); /* Reserved */
+ put_be32(pb, 0); /* Reserved */
+ put_be32(pb, 0); /* Reserved */
+ }
+ put_be16(pb, track->enc->width); /* Video width */
+ put_be16(pb, track->enc->height); /* Video height */
+ put_be32(pb, 0x00480000); /* Horizontal resolution 72dpi */
+ put_be32(pb, 0x00480000); /* Vertical resolution 72dpi */
+ put_be32(pb, 0); /* Data size (= 0) */
+ put_be16(pb, 1); /* Frame count (= 1) */
+
+ memset(compressor_name,0,32);
+ /* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */
+ if (track->mode == MODE_MOV && track->enc->codec && track->enc->codec->name)
+ strncpy(compressor_name,track->enc->codec->name,31);
+ put_byte(pb, strlen(compressor_name));
+ put_buffer(pb, compressor_name, 31);
+
+ put_be16(pb, 0x18); /* Reserved */
+ put_be16(pb, 0xffff); /* Reserved */
+ if(track->enc->codec_id == CODEC_ID_MPEG4)
+ mov_write_esds_tag(pb, track);
+ else if(track->enc->codec_id == CODEC_ID_H263)
+ mov_write_d263_tag(pb);
+ else if(track->enc->codec_id == CODEC_ID_SVQ3)
+ mov_write_svq3_tag(pb);
+ else if(track->enc->codec_id == CODEC_ID_H264)
+ mov_write_avcc_tag(pb, track);
+
+ return updateSize (pb, pos);
+}
+
+static int mov_write_stsd_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stsd");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, 1); /* entry count */
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO)
+ mov_write_video_tag(pb, track);
+ else if (track->enc->codec_type == CODEC_TYPE_AUDIO)
+ mov_write_audio_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_ctts_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ Time2Sample *ctts_entries;
+ uint32_t entries = 0;
+ uint32_t atom_size;
+ int i;
+
+ ctts_entries = av_malloc((track->entry + 1) * sizeof(*ctts_entries)); /* worst case */
+ ctts_entries[0].count = 1;
+ ctts_entries[0].duration = track->cluster[0].cts;
+ for (i=1; i<track->entry; i++) {
+ if (track->cluster[i].cts == ctts_entries[entries].duration) {
+ ctts_entries[entries].count++; /* compress */
+ } else {
+ entries++;
+ ctts_entries[entries].duration = track->cluster[i].cts;
+ ctts_entries[entries].count = 1;
+ }
+ }
+ entries++; /* last one */
+ atom_size = 16 + (entries * 8);
+ put_be32(pb, atom_size); /* size */
+ put_tag(pb, "ctts");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, entries); /* entry count */
+ for (i=0; i<entries; i++) {
+ put_be32(pb, ctts_entries[i].count);
+ put_be32(pb, ctts_entries[i].duration);
+ }
+ av_free(ctts_entries);
+ return atom_size;
+}
+
+/* Time to sample atom */
+static int mov_write_stts_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ Time2Sample *stts_entries;
+ uint32_t entries = -1;
+ uint32_t atom_size;
+ int i;
+
+ if (track->enc->codec_type == CODEC_TYPE_AUDIO && !track->audio_vbr) {
+ stts_entries = av_malloc(sizeof(*stts_entries)); /* one entry */
+ stts_entries[0].count = track->sampleCount;
+ stts_entries[0].duration = 1;
+ entries = 1;
+ } else {
+ stts_entries = av_malloc(track->entry * sizeof(*stts_entries)); /* worst case */
+ for (i=0; i<track->entry; i++) {
+ int64_t duration = i + 1 == track->entry ?
+ track->trackDuration - track->cluster[i].dts + track->cluster[0].dts : /* readjusting */
+ track->cluster[i+1].dts - track->cluster[i].dts;
+ if (i && duration == stts_entries[entries].duration) {
+ stts_entries[entries].count++; /* compress */
+ } else {
+ entries++;
+ stts_entries[entries].duration = duration;
+ stts_entries[entries].count = 1;
+ }
+ }
+ entries++; /* last one */
+ }
+ atom_size = 16 + (entries * 8);
+ put_be32(pb, atom_size); /* size */
+ put_tag(pb, "stts");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, entries); /* entry count */
+ for (i=0; i<entries; i++) {
+ put_be32(pb, stts_entries[i].count);
+ put_be32(pb, stts_entries[i].duration);
+ }
+ av_free(stts_entries);
+ return atom_size;
+}
+
+static int mov_write_dref_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 28); /* size */
+ put_tag(pb, "dref");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, 1); /* entry count */
+
+ put_be32(pb, 0xc); /* size */
+ put_tag(pb, "url ");
+ put_be32(pb, 1); /* version & flags */
+
+ return 28;
+}
+
+static int mov_write_stbl_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stbl");
+ mov_write_stsd_tag(pb, track);
+ mov_write_stts_tag(pb, track);
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO &&
+ track->hasKeyframes < track->entry)
+ mov_write_stss_tag(pb, track);
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO &&
+ track->hasBframes)
+ mov_write_ctts_tag(pb, track);
+ mov_write_stsc_tag(pb, track);
+ mov_write_stsz_tag(pb, track);
+ mov_write_stco_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_dinf_tag(ByteIOContext *pb)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "dinf");
+ mov_write_dref_tag(pb);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_smhd_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 16); /* size */
+ put_tag(pb, "smhd");
+ put_be32(pb, 0); /* version & flags */
+ put_be16(pb, 0); /* reserved (balance, normally = 0) */
+ put_be16(pb, 0); /* reserved */
+ return 16;
+}
+
+static int mov_write_vmhd_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 0x14); /* size (always 0x14) */
+ put_tag(pb, "vmhd");
+ put_be32(pb, 0x01); /* version & flags */
+ put_be64(pb, 0); /* reserved (graphics mode = copy) */
+ return 0x14;
+}
+
+static int mov_write_hdlr_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ const char *descr, *hdlr, *hdlr_type;
+ offset_t pos = url_ftell(pb);
+
+ if (!track) { /* no media --> data handler */
+ hdlr = "dhlr";
+ hdlr_type = "url ";
+ descr = "DataHandler";
+ } else {
+ hdlr = (track->mode == MODE_MOV) ? "mhlr" : "\0\0\0\0";
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO) {
+ hdlr_type = "vide";
+ descr = "VideoHandler";
+ } else {
+ hdlr_type = "soun";
+ descr = "SoundHandler";
+ }
+ }
+
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "hdlr");
+ put_be32(pb, 0); /* Version & flags */
+ put_buffer(pb, hdlr, 4); /* handler */
+ put_tag(pb, hdlr_type); /* handler type */
+ put_be32(pb ,0); /* reserved */
+ put_be32(pb ,0); /* reserved */
+ put_be32(pb ,0); /* reserved */
+ put_byte(pb, strlen(descr)); /* string counter */
+ put_buffer(pb, descr, strlen(descr)); /* handler description */
+ return updateSize(pb, pos);
+}
+
+static int mov_write_minf_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "minf");
+ if(track->enc->codec_type == CODEC_TYPE_VIDEO)
+ mov_write_vmhd_tag(pb);
+ else
+ mov_write_smhd_tag(pb);
+ if (track->mode == MODE_MOV) /* FIXME: Why do it for MODE_MOV only ? */
+ mov_write_hdlr_tag(pb, NULL);
+ mov_write_dinf_tag(pb);
+ mov_write_stbl_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_mdhd_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int version = track->trackDuration < INT32_MAX ? 0 : 1;
+
+ (version == 1) ? put_be32(pb, 44) : put_be32(pb, 32); /* size */
+ put_tag(pb, "mdhd");
+ put_byte(pb, version);
+ put_be24(pb, 0); /* flags */
+ if (version == 1) {
+ put_be64(pb, track->time);
+ put_be64(pb, track->time);
+ } else {
+ put_be32(pb, track->time); /* creation time */
+ put_be32(pb, track->time); /* modification time */
+ }
+ put_be32(pb, track->timescale); /* time scale (sample rate for audio) */
+ (version == 1) ? put_be64(pb, track->trackDuration) : put_be32(pb, track->trackDuration); /* duration */
+ put_be16(pb, track->language); /* language */
+ put_be16(pb, 0); /* reserved (quality) */
+ return 32;
+}
+
+static int mov_write_mdia_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "mdia");
+ mov_write_mdhd_tag(pb, track);
+ mov_write_hdlr_tag(pb, track);
+ mov_write_minf_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_tkhd_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int64_t duration = av_rescale_rnd(track->trackDuration, globalTimescale, track->timescale, AV_ROUND_UP);
+ int version = duration < INT32_MAX ? 0 : 1;
+
+ (version == 1) ? put_be32(pb, 104) : put_be32(pb, 92); /* size */
+ put_tag(pb, "tkhd");
+ put_byte(pb, version);
+ put_be24(pb, 0xf); /* flags (track enabled) */
+ if (version == 1) {
+ put_be64(pb, track->time);
+ put_be64(pb, track->time);
+ } else {
+ put_be32(pb, track->time); /* creation time */
+ put_be32(pb, track->time); /* modification time */
+ }
+ put_be32(pb, track->trackID); /* track-id */
+ put_be32(pb, 0); /* reserved */
+ (version == 1) ? put_be64(pb, duration) : put_be32(pb, duration);
+
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0x0); /* reserved (Layer & Alternate group) */
+ /* Volume, only for audio */
+ if(track->enc->codec_type == CODEC_TYPE_AUDIO)
+ put_be16(pb, 0x0100);
+ else
+ put_be16(pb, 0);
+ put_be16(pb, 0); /* reserved */
+
+ /* Matrix structure */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x40000000); /* reserved */
+
+ /* Track width and height, for visual only */
+ if(track->enc->codec_type == CODEC_TYPE_VIDEO) {
+ double sample_aspect_ratio = av_q2d(track->enc->sample_aspect_ratio);
+ if( !sample_aspect_ratio ) sample_aspect_ratio = 1;
+ put_be32(pb, sample_aspect_ratio * track->enc->width*0x10000);
+ put_be32(pb, track->enc->height*0x10000);
+ }
+ else {
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ }
+ return 0x5c;
+}
+
+// This box seems important for the psp playback ... without it the movie seems to hang
+static int mov_write_edts_tag(ByteIOContext *pb, MOVTrack *track)
+{
+ put_be32(pb, 0x24); /* size */
+ put_tag(pb, "edts");
+ put_be32(pb, 0x1c); /* size */
+ put_tag(pb, "elst");
+ put_be32(pb, 0x0);
+ put_be32(pb, 0x1);
+
+ put_be32(pb, av_rescale_rnd(track->trackDuration, globalTimescale, track->timescale, AV_ROUND_UP)); /* duration ... doesn't seem to effect psp */
+
+ put_be32(pb, track->cluster[0].cts); /* first pts is cts since dts is 0 */
+ put_be32(pb, 0x00010000);
+ return 0x24;
+}
+
+// goes at the end of each track! ... Critical for PSP playback ("Incompatible data" without it)
+static int mov_write_uuid_tag_psp(ByteIOContext *pb, MOVTrack *mov)
+{
+ put_be32(pb, 0x34); /* size ... reports as 28 in mp4box! */
+ put_tag(pb, "uuid");
+ put_tag(pb, "USMT");
+ put_be32(pb, 0x21d24fce);
+ put_be32(pb, 0xbb88695c);
+ put_be32(pb, 0xfac9c740);
+ put_be32(pb, 0x1c); // another size here!
+ put_tag(pb, "MTDT");
+ put_be32(pb, 0x00010012);
+ put_be32(pb, 0x0a);
+ put_be32(pb, 0x55c40000);
+ put_be32(pb, 0x1);
+ put_be32(pb, 0x0);
+ return 0x34;
+}
+
+static int mov_write_trak_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "trak");
+ mov_write_tkhd_tag(pb, track);
+ if (track->mode == MODE_PSP || track->hasBframes)
+ mov_write_edts_tag(pb, track); // PSP Movies require edts box
+ mov_write_mdia_tag(pb, track);
+ if (track->mode == MODE_PSP)
+ mov_write_uuid_tag_psp(pb,track); // PSP Movies require this uuid box
+ return updateSize(pb, pos);
+}
+
+#if 0
+/* TODO: Not sorted out, but not necessary either */
+static int mov_write_iods_tag(ByteIOContext *pb, MOVContext *mov)
+{
+ put_be32(pb, 0x15); /* size */
+ put_tag(pb, "iods");
+ put_be32(pb, 0); /* version & flags */
+ put_be16(pb, 0x1007);
+ put_byte(pb, 0);
+ put_be16(pb, 0x4fff);
+ put_be16(pb, 0xfffe);
+ put_be16(pb, 0x01ff);
+ return 0x15;
+}
+#endif
+
+static int mov_write_mvhd_tag(ByteIOContext *pb, MOVContext *mov)
+{
+ int maxTrackID = 1, i;
+ int64_t maxTrackLenTemp, maxTrackLen = 0;
+ int version;
+
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry > 0) {
+ maxTrackLenTemp = av_rescale_rnd(mov->tracks[i].trackDuration, globalTimescale, mov->tracks[i].timescale, AV_ROUND_UP);
+ if(maxTrackLen < maxTrackLenTemp)
+ maxTrackLen = maxTrackLenTemp;
+ if(maxTrackID < mov->tracks[i].trackID)
+ maxTrackID = mov->tracks[i].trackID;
+ }
+ }
+
+ version = maxTrackLen < UINT32_MAX ? 0 : 1;
+ (version == 1) ? put_be32(pb, 120) : put_be32(pb, 108); /* size */
+ put_tag(pb, "mvhd");
+ put_byte(pb, version);
+ put_be24(pb, 0); /* flags */
+ if (version == 1) {
+ put_be64(pb, mov->time);
+ put_be64(pb, mov->time);
+ } else {
+ put_be32(pb, mov->time); /* creation time */
+ put_be32(pb, mov->time); /* modification time */
+ }
+ put_be32(pb, mov->timescale); /* timescale */
+ (version == 1) ? put_be64(pb, maxTrackLen) : put_be32(pb, maxTrackLen); /* duration of longest track */
+
+ put_be32(pb, 0x00010000); /* reserved (preferred rate) 1.0 = normal */
+ put_be16(pb, 0x0100); /* reserved (preferred volume) 1.0 = normal */
+ put_be16(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+
+ /* Matrix structure */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x40000000); /* reserved */
+
+ put_be32(pb, 0); /* reserved (preview time) */
+ put_be32(pb, 0); /* reserved (preview duration) */
+ put_be32(pb, 0); /* reserved (poster time) */
+ put_be32(pb, 0); /* reserved (selection time) */
+ put_be32(pb, 0); /* reserved (selection duration) */
+ put_be32(pb, 0); /* reserved (current time) */
+ put_be32(pb, maxTrackID+1); /* Next track id */
+ return 0x6c;
+}
+
+static int mov_write_itunes_hdlr_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "hdlr");
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ put_tag(pb, "mdir");
+ put_tag(pb, "appl");
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ put_be16(pb, 0);
+ return updateSize(pb, pos);
+}
+
+/* helper function to write a data tag with the specified string as data */
+static int mov_write_string_data_tag(ByteIOContext *pb, const char *data, int long_style)
+{
+ if(long_style){
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "data");
+ put_be32(pb, 1);
+ put_be32(pb, 0);
+ put_buffer(pb, data, strlen(data));
+ return updateSize(pb, pos);
+ }else{
+ put_be16(pb, strlen(data)); /* string length */
+ put_be16(pb, 0);
+ put_buffer(pb, data, strlen(data));
+ return strlen(data) + 4;
+ }
+}
+
+static int mov_write_string_tag(ByteIOContext *pb, const char *name, const char *value, int long_style){
+ int size = 0;
+ if ( value && value[0] ) {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, name);
+ mov_write_string_data_tag(pb, value, long_style);
+ size= updateSize(pb, pos);
+ }
+ return size;
+}
+
+/* iTunes year */
+static int mov_write_day_tag(ByteIOContext *pb, int year, int long_style)
+{
+ if(year){
+ char year_str[5];
+ snprintf(year_str, sizeof(year_str), "%04d", year);
+ return mov_write_string_tag(pb, "\251day", year_str, long_style);
+ }else
+ return 0;
+}
+
+/* iTunes track number */
+static int mov_write_trkn_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ int size = 0;
+ if ( s->track ) {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "trkn");
+ {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "data");
+ put_be32(pb, 0); // 8 bytes empty
+ put_be32(pb, 0);
+ put_be16(pb, 0); // empty
+ put_be16(pb, s->track); // track number
+ put_be16(pb, 0); // total track number
+ put_be16(pb, 0); // empty
+ updateSize(pb, pos);
+ }
+ size = updateSize(pb, pos);
+ }
+ return size;
+}
+
+/* iTunes meta data list */
+static int mov_write_ilst_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "ilst");
+ mov_write_string_tag(pb, "\251nam", s->title , 1);
+ mov_write_string_tag(pb, "\251ART", s->author , 1);
+ mov_write_string_tag(pb, "\251wrt", s->author , 1);
+ mov_write_string_tag(pb, "\251alb", s->album , 1);
+ mov_write_day_tag(pb, s->year ,1);
+ if(mov->tracks[0].enc && !(mov->tracks[0].enc->flags & CODEC_FLAG_BITEXACT))
+ mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 1);
+ mov_write_string_tag(pb, "\251cmt", s->comment , 1);
+ mov_write_string_tag(pb, "\251gen", s->genre , 1);
+ mov_write_trkn_tag(pb, mov, s);
+ return updateSize(pb, pos);
+}
+
+/* iTunes meta data tag */
+static int mov_write_meta_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ int size = 0;
+
+ // only save meta tag if required
+ if ( s->title[0] || s->author[0] || s->album[0] || s->year ||
+ s->comment[0] || s->genre[0] || s->track ) {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "meta");
+ put_be32(pb, 0);
+ mov_write_itunes_hdlr_tag(pb, mov, s);
+ mov_write_ilst_tag(pb, mov, s);
+ size = updateSize(pb, pos);
+ }
+ return size;
+}
+
+static int mov_write_udta_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ offset_t pos = url_ftell(pb);
+ int i;
+
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "udta");
+
+ /* iTunes meta data */
+ mov_write_meta_tag(pb, mov, s);
+
+ if(mov->mode == MODE_MOV){ // the title field breaks gtkpod with mp4 and my suspicion is that stuff isnt valid in mp4
+ /* Requirements */
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry <= 0) continue;
+ if (mov->tracks[i].enc->codec_id == CODEC_ID_AAC ||
+ mov->tracks[i].enc->codec_id == CODEC_ID_MPEG4) {
+ mov_write_string_tag(pb, "\251req", "QuickTime 6.0 or greater", 0);
+ break;
+ }
+ }
+
+ mov_write_string_tag(pb, "\251nam", s->title , 0);
+ mov_write_string_tag(pb, "\251aut", s->author , 0);
+ mov_write_string_tag(pb, "\251alb", s->album , 0);
+ mov_write_day_tag(pb, s->year, 0);
+ if(mov->tracks[0].enc && !(mov->tracks[0].enc->flags & CODEC_FLAG_BITEXACT))
+ mov_write_string_tag(pb, "\251enc", LIBAVFORMAT_IDENT, 0);
+ mov_write_string_tag(pb, "\251des", s->comment , 0);
+ mov_write_string_tag(pb, "\251gen", s->genre , 0);
+ }
+
+ return updateSize(pb, pos);
+}
+
+static int utf8len(uint8_t *b){
+ int len=0;
+ int val;
+ while(*b){
+ GET_UTF8(val, *b++, return -1;)
+ len++;
+ }
+ return len;
+}
+
+static int ascii_to_wc (ByteIOContext *pb, uint8_t *b)
+{
+ int val;
+ while(*b){
+ GET_UTF8(val, *b++, return -1;)
+ put_be16(pb, val);
+ }
+ put_be16(pb, 0x00);
+ return 0;
+}
+
+static uint16_t language_code (const char *str)
+{
+ return ((((str[0]-0x60) & 0x1F)<<10) + (((str[1]-0x60) & 0x1F)<<5) + ((str[2]-0x60) & 0x1F));
+}
+
+static int mov_write_uuidusmt_tag (ByteIOContext *pb, AVFormatContext *s)
+{
+ size_t len, size;
+ offset_t pos, curpos;
+
+ size = 0;
+ if (s->title[0]) {
+ pos = url_ftell(pb);
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "uuid");
+ put_tag(pb, "USMT");
+ put_be32(pb, 0x21d24fce ); /* 96 bit UUID */
+ put_be32(pb, 0xbb88695c );
+ put_be32(pb, 0xfac9c740 );
+ size += 24;
+
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "MTDT");
+ put_be16(pb, 4);
+ size += 10;
+
+ // ?
+ put_be16(pb, 0x0C); /* size */
+ put_be32(pb, 0x0B); /* type */
+ put_be16(pb, language_code("und")); /* language */
+ put_be16(pb, 0x0); /* ? */
+ put_be16(pb, 0x021C); /* data */
+ size += 12;
+
+ // Encoder
+ len = utf8len(LIBAVCODEC_IDENT)+1;
+ if(len<=0)
+ goto not_utf8;
+ put_be16(pb, len*2+10); /* size */
+ put_be32(pb, 0x04); /* type */
+ put_be16(pb, language_code("eng")); /* language */
+ put_be16(pb, 0x01); /* ? */
+ ascii_to_wc(pb, LIBAVCODEC_IDENT);
+ size += len*2+10;
+
+ // Title
+ len = utf8len(s->title)+1;
+ if(len<=0)
+ goto not_utf8;
+ put_be16(pb, len*2+10); /* size */
+ put_be32(pb, 0x01); /* type */
+ put_be16(pb, language_code("eng")); /* language */
+ put_be16(pb, 0x01); /* ? */
+ ascii_to_wc (pb, s->title);
+ size += len*2+10;
+
+ // Date
+// snprintf(dt,32,"%04d/%02d/%02d %02d:%02d:%02d",t_st->tm_year+1900,t_st->tm_mon+1,t_st->tm_mday,t_st->tm_hour,t_st->tm_min,t_st->tm_sec);
+ len = utf8len("2006/04/01 11:11:11")+1;
+ if(len<=0)
+ goto not_utf8;
+ put_be16(pb, len*2+10); /* size */
+ put_be32(pb, 0x03); /* type */
+ put_be16(pb, language_code("und")); /* language */
+ put_be16(pb, 0x01); /* ? */
+ ascii_to_wc (pb, "2006/04/01 11:11:11");
+ size += len*2+10;
+
+ // size
+ curpos = url_ftell(pb);
+ url_fseek(pb, pos, SEEK_SET);
+ put_be32(pb, size);
+ url_fseek(pb, pos+24, SEEK_SET);
+ put_be32(pb, size-24);
+ url_fseek(pb, curpos, SEEK_SET);
+ }
+
+ return size;
+not_utf8:
+ av_log(s, AV_LOG_ERROR, "not utf8\n");
+ return -1;
+}
+
+static int mov_write_moov_tag(ByteIOContext *pb, MOVContext *mov,
+ AVFormatContext *s)
+{
+ int i;
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "moov");
+ mov->timescale = globalTimescale;
+
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry <= 0) continue;
+
+ mov->tracks[i].time = mov->time;
+ mov->tracks[i].trackID = i+1;
+ }
+
+ mov_write_mvhd_tag(pb, mov);
+ //mov_write_iods_tag(pb, mov);
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry > 0) {
+ mov_write_trak_tag(pb, &(mov->tracks[i]));
+ }
+ }
+
+ if (mov->mode == MODE_PSP)
+ mov_write_uuidusmt_tag(pb, s);
+ else
+ mov_write_udta_tag(pb, mov, s);
+
+ return updateSize(pb, pos);
+}
+
+static int mov_write_mdat_tag(ByteIOContext *pb, MOVContext* mov)
+{
+ put_be32(pb, 8); // placeholder for extended size field (64 bit)
+ put_tag(pb, mov->mode == MODE_MOV ? "wide" : "free");
+
+ mov->mdat_pos = url_ftell(pb);
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "mdat");
+ return 0;
+}
+
+/* TODO: This needs to be more general */
+static void mov_write_ftyp_tag (ByteIOContext *pb, AVFormatContext *s)
+{
+ MOVContext *mov = s->priv_data;
+
+ put_be32(pb, 0x14 ); /* size */
+ put_tag(pb, "ftyp");
+
+ if ( mov->mode == MODE_3GP )
+ put_tag(pb, "3gp4");
+ else if ( mov->mode == MODE_3G2 )
+ put_tag(pb, "3g2a");
+ else if ( mov->mode == MODE_PSP )
+ put_tag(pb, "MSNV");
+ else if ( mov->mode == MODE_MP4 )
+ put_tag(pb, "isom");
+ else
+ put_tag(pb, "qt ");
+
+ put_be32(pb, 0x200 );
+
+ if ( mov->mode == MODE_3GP )
+ put_tag(pb, "3gp4");
+ else if ( mov->mode == MODE_3G2 )
+ put_tag(pb, "3g2a");
+ else if ( mov->mode == MODE_PSP )
+ put_tag(pb, "MSNV");
+ else if ( mov->mode == MODE_MP4 )
+ put_tag(pb, "mp41");
+ else
+ put_tag(pb, "qt ");
+}
+
+static void mov_write_uuidprof_tag(ByteIOContext *pb, AVFormatContext *s)
+{
+ AVCodecContext *VideoCodec = s->streams[0]->codec;
+ AVCodecContext *AudioCodec = s->streams[1]->codec;
+ int AudioRate = AudioCodec->sample_rate;
+ int FrameRate = ((VideoCodec->time_base.den) * (0x10000))/ (VideoCodec->time_base.num);
+ int audio_kbitrate= AudioCodec->bit_rate / 1000;
+ int video_kbitrate= FFMIN(VideoCodec->bit_rate / 1000, 800 - audio_kbitrate);
+
+ put_be32(pb, 0x94 ); /* size */
+ put_tag(pb, "uuid");
+ put_tag(pb, "PROF");
+
+ put_be32(pb, 0x21d24fce ); /* 96 bit UUID */
+ put_be32(pb, 0xbb88695c );
+ put_be32(pb, 0xfac9c740 );
+
+ put_be32(pb, 0x0 ); /* ? */
+ put_be32(pb, 0x3 ); /* 3 sections ? */
+
+ put_be32(pb, 0x14 ); /* size */
+ put_tag(pb, "FPRF");
+ put_be32(pb, 0x0 ); /* ? */
+ put_be32(pb, 0x0 ); /* ? */
+ put_be32(pb, 0x0 ); /* ? */
+
+ put_be32(pb, 0x2c ); /* size */
+ put_tag(pb, "APRF"); /* audio */
+ put_be32(pb, 0x0 );
+ put_be32(pb, 0x2 ); /* TrackID */
+ put_tag(pb, "mp4a");
+ put_be32(pb, 0x20f );
+ put_be32(pb, 0x0 );
+ put_be32(pb, audio_kbitrate);
+ put_be32(pb, audio_kbitrate);
+ put_be32(pb, AudioRate );
+ put_be32(pb, AudioCodec->channels );
+
+ put_be32(pb, 0x34 ); /* size */
+ put_tag(pb, "VPRF"); /* video */
+ put_be32(pb, 0x0 );
+ put_be32(pb, 0x1 ); /* TrackID */
+ if (VideoCodec->codec_id == CODEC_ID_H264) {
+ put_tag(pb, "avc1");
+ put_be16(pb, 0x014D );
+ put_be16(pb, 0x0015 );
+ } else {
+ put_tag(pb, "mp4v");
+ put_be16(pb, 0x0000 );
+ put_be16(pb, 0x0103 );
+ }
+ put_be32(pb, 0x0 );
+ put_be32(pb, video_kbitrate);
+ put_be32(pb, video_kbitrate);
+ put_be32(pb, FrameRate);
+ put_be32(pb, FrameRate);
+ put_be16(pb, VideoCodec->width);
+ put_be16(pb, VideoCodec->height);
+ put_be32(pb, 0x010001); /* ? */
+}
+
+static int mov_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ MOVContext *mov = s->priv_data;
+ int i;
+
+ /* Default mode == MP4 */
+ mov->mode = MODE_MP4;
+
+ if (s->oformat != NULL) {
+ if (!strcmp("3gp", s->oformat->name)) mov->mode = MODE_3GP;
+ else if (!strcmp("3g2", s->oformat->name)) mov->mode = MODE_3G2;
+ else if (!strcmp("mov", s->oformat->name)) mov->mode = MODE_MOV;
+ else if (!strcmp("psp", s->oformat->name)) mov->mode = MODE_PSP;
+
+ mov_write_ftyp_tag(pb,s);
+ if ( mov->mode == MODE_PSP ) {
+ if ( s->nb_streams != 2 ) {
+ av_log(s, AV_LOG_ERROR, "PSP mode need one video and one audio stream\n");
+ return -1;
+ }
+ mov_write_uuidprof_tag(pb,s);
+ }
+ }
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st= s->streams[i];
+ MOVTrack *track= &mov->tracks[i];
+
+ track->enc = st->codec;
+ track->language = ff_mov_iso639_to_lang(st->language, mov->mode != MODE_MOV);
+ track->mode = mov->mode;
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+ track->tag = mov_find_video_codec_tag(s, track);
+ track->timescale = st->codec->time_base.den;
+ track->sampleDuration = st->codec->time_base.num;
+ av_set_pts_info(st, 64, 1, st->codec->time_base.den);
+ }else if(st->codec->codec_type == CODEC_TYPE_AUDIO){
+ track->tag = mov_find_audio_codec_tag(s, track);
+ track->timescale = st->codec->sample_rate;
+ track->sampleDuration = st->codec->frame_size;
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+ switch(track->enc->codec_id){
+ case CODEC_ID_MP3:
+ case CODEC_ID_AAC:
+ case CODEC_ID_AMR_NB:
+ case CODEC_ID_AMR_WB:
+ track->audio_vbr = 1;
+ break;
+ default:
+ track->sampleSize = (av_get_bits_per_sample(st->codec->codec_id) >> 3) * st->codec->channels;
+ }
+ }
+ if (!track->sampleDuration) {
+ av_log(s, AV_LOG_ERROR, "track %d: sample duration is not set\n", i);
+ return -1;
+ }
+ }
+
+ mov_write_mdat_tag(pb, mov);
+ mov->time = s->timestamp + 0x7C25B080; //1970 based -> 1904 based
+ mov->nb_streams = s->nb_streams;
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MOVContext *mov = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ MOVTrack *trk = &mov->tracks[pkt->stream_index];
+ AVCodecContext *enc = trk->enc;
+ unsigned int samplesInChunk = 0;
+ int size= pkt->size;
+
+ if (url_is_streamed(&s->pb)) return 0; /* Can't handle that */
+ if (!size) return 0; /* Discard 0 sized packets */
+
+ if (enc->codec_id == CODEC_ID_AMR_NB) {
+ /* We must find out how many AMR blocks there are in one packet */
+ static uint16_t packed_size[16] =
+ {13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 0};
+ int len = 0;
+
+ while (len < size && samplesInChunk < 100) {
+ len += packed_size[(pkt->data[len] >> 3) & 0x0F];
+ samplesInChunk++;
+ }
+ if(samplesInChunk > 1){
+ av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, inplement a AVParser for it\n");
+ return -1;
+ }
+ } else if (trk->sampleSize)
+ samplesInChunk = size/trk->sampleSize;
+ else
+ samplesInChunk = 1;
+
+ /* copy extradata if it exists */
+ if (trk->vosLen == 0 && enc->extradata_size > 0) {
+ trk->vosLen = enc->extradata_size;
+ trk->vosData = av_malloc(trk->vosLen);
+ memcpy(trk->vosData, enc->extradata, trk->vosLen);
+ }
+
+ if (enc->codec_id == CODEC_ID_H264 && trk->vosLen > 0 && *(uint8_t *)trk->vosData != 1) {
+ /* from x264 or from bytestream h264 */
+ /* nal reformating needed */
+ avc_parse_nal_units(&pkt->data, &pkt->size);
+ assert(pkt->size);
+ size = pkt->size;
+ }
+
+ if (!(trk->entry % MOV_INDEX_CLUSTER_SIZE)) {
+ trk->cluster = av_realloc(trk->cluster, (trk->entry + MOV_INDEX_CLUSTER_SIZE) * sizeof(*trk->cluster));
+ if (!trk->cluster)
+ return -1;
+ }
+
+ trk->cluster[trk->entry].pos = url_ftell(pb);
+ trk->cluster[trk->entry].samplesInChunk = samplesInChunk;
+ trk->cluster[trk->entry].size = size;
+ trk->cluster[trk->entry].entries = samplesInChunk;
+ trk->cluster[trk->entry].dts = pkt->dts;
+ trk->trackDuration = pkt->dts - trk->cluster[0].dts + pkt->duration;
+
+ if(enc->codec_type == CODEC_TYPE_VIDEO) {
+ if (pkt->dts != pkt->pts)
+ trk->hasBframes = 1;
+ trk->cluster[trk->entry].cts = pkt->pts - pkt->dts;
+ trk->cluster[trk->entry].key_frame = !!(pkt->flags & PKT_FLAG_KEY);
+ if(trk->cluster[trk->entry].key_frame)
+ trk->hasKeyframes++;
+ }
+ trk->entry++;
+ trk->sampleCount += samplesInChunk;
+ mov->mdat_size += size;
+
+ put_buffer(pb, pkt->data, size);
+
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int mov_write_trailer(AVFormatContext *s)
+{
+ MOVContext *mov = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int res = 0;
+ int i;
+
+ offset_t moov_pos = url_ftell(pb);
+
+ /* Write size of mdat tag */
+ if (mov->mdat_size+8 <= UINT32_MAX) {
+ url_fseek(pb, mov->mdat_pos, SEEK_SET);
+ put_be32(pb, mov->mdat_size+8);
+ } else {
+ /* overwrite 'wide' placeholder atom */
+ url_fseek(pb, mov->mdat_pos - 8, SEEK_SET);
+ put_be32(pb, 1); /* special value: real atom size will be 64 bit value after tag field */
+ put_tag(pb, "mdat");
+ put_be64(pb, mov->mdat_size+16);
+ }
+ url_fseek(pb, moov_pos, SEEK_SET);
+
+ mov_write_moov_tag(pb, mov, s);
+
+ for (i=0; i<mov->nb_streams; i++) {
+ av_freep(&mov->tracks[i].cluster);
+
+ if( mov->tracks[i].vosLen ) av_free( mov->tracks[i].vosData );
+
+ }
+
+ put_flush_packet(pb);
+
+ return res;
+}
+
+#ifdef CONFIG_MOV_MUXER
+AVOutputFormat mov_muxer = {
+ "mov",
+ "mov format",
+ NULL,
+ "mov",
+ sizeof(MOVContext),
+ CODEC_ID_AAC,
+ CODEC_ID_MPEG4,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_TGP_MUXER
+AVOutputFormat tgp_muxer = {
+ "3gp",
+ "3gp format",
+ NULL,
+ "3gp",
+ sizeof(MOVContext),
+ CODEC_ID_AMR_NB,
+ CODEC_ID_H263,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_MP4_MUXER
+AVOutputFormat mp4_muxer = {
+ "mp4",
+ "mp4 format",
+ "application/mp4",
+ "mp4,m4a",
+ sizeof(MOVContext),
+ CODEC_ID_AAC,
+ CODEC_ID_MPEG4,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_PSP_MUXER
+AVOutputFormat psp_muxer = {
+ "psp",
+ "psp mp4 format",
+ NULL,
+ "mp4,psp",
+ sizeof(MOVContext),
+ CODEC_ID_AAC,
+ CODEC_ID_MPEG4,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_TG2_MUXER
+AVOutputFormat tg2_muxer = {
+ "3g2",
+ "3gp2 format",
+ NULL,
+ "3g2",
+ sizeof(MOVContext),
+ CODEC_ID_AMR_NB,
+ CODEC_ID_H263,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mp3.c b/contrib/ffmpeg/libavformat/mp3.c
new file mode 100644
index 000000000..723980c83
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mp3.c
@@ -0,0 +1,430 @@
+/*
+ * MP3 muxer and demuxer
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "mpegaudio.h"
+
+#define ID3_HEADER_SIZE 10
+#define ID3_TAG_SIZE 128
+
+#define ID3_GENRE_MAX 125
+
+static const char *id3_genre_str[ID3_GENRE_MAX + 1] = {
+ [0] = "Blues",
+ [1] = "Classic Rock",
+ [2] = "Country",
+ [3] = "Dance",
+ [4] = "Disco",
+ [5] = "Funk",
+ [6] = "Grunge",
+ [7] = "Hip-Hop",
+ [8] = "Jazz",
+ [9] = "Metal",
+ [10] = "New Age",
+ [11] = "Oldies",
+ [12] = "Other",
+ [13] = "Pop",
+ [14] = "R&B",
+ [15] = "Rap",
+ [16] = "Reggae",
+ [17] = "Rock",
+ [18] = "Techno",
+ [19] = "Industrial",
+ [20] = "Alternative",
+ [21] = "Ska",
+ [22] = "Death Metal",
+ [23] = "Pranks",
+ [24] = "Soundtrack",
+ [25] = "Euro-Techno",
+ [26] = "Ambient",
+ [27] = "Trip-Hop",
+ [28] = "Vocal",
+ [29] = "Jazz+Funk",
+ [30] = "Fusion",
+ [31] = "Trance",
+ [32] = "Classical",
+ [33] = "Instrumental",
+ [34] = "Acid",
+ [35] = "House",
+ [36] = "Game",
+ [37] = "Sound Clip",
+ [38] = "Gospel",
+ [39] = "Noise",
+ [40] = "AlternRock",
+ [41] = "Bass",
+ [42] = "Soul",
+ [43] = "Punk",
+ [44] = "Space",
+ [45] = "Meditative",
+ [46] = "Instrumental Pop",
+ [47] = "Instrumental Rock",
+ [48] = "Ethnic",
+ [49] = "Gothic",
+ [50] = "Darkwave",
+ [51] = "Techno-Industrial",
+ [52] = "Electronic",
+ [53] = "Pop-Folk",
+ [54] = "Eurodance",
+ [55] = "Dream",
+ [56] = "Southern Rock",
+ [57] = "Comedy",
+ [58] = "Cult",
+ [59] = "Gangsta",
+ [60] = "Top 40",
+ [61] = "Christian Rap",
+ [62] = "Pop/Funk",
+ [63] = "Jungle",
+ [64] = "Native American",
+ [65] = "Cabaret",
+ [66] = "New Wave",
+ [67] = "Psychadelic",
+ [68] = "Rave",
+ [69] = "Showtunes",
+ [70] = "Trailer",
+ [71] = "Lo-Fi",
+ [72] = "Tribal",
+ [73] = "Acid Punk",
+ [74] = "Acid Jazz",
+ [75] = "Polka",
+ [76] = "Retro",
+ [77] = "Musical",
+ [78] = "Rock & Roll",
+ [79] = "Hard Rock",
+ [80] = "Folk",
+ [81] = "Folk-Rock",
+ [82] = "National Folk",
+ [83] = "Swing",
+ [84] = "Fast Fusion",
+ [85] = "Bebob",
+ [86] = "Latin",
+ [87] = "Revival",
+ [88] = "Celtic",
+ [89] = "Bluegrass",
+ [90] = "Avantgarde",
+ [91] = "Gothic Rock",
+ [92] = "Progressive Rock",
+ [93] = "Psychedelic Rock",
+ [94] = "Symphonic Rock",
+ [95] = "Slow Rock",
+ [96] = "Big Band",
+ [97] = "Chorus",
+ [98] = "Easy Listening",
+ [99] = "Acoustic",
+ [100] = "Humour",
+ [101] = "Speech",
+ [102] = "Chanson",
+ [103] = "Opera",
+ [104] = "Chamber Music",
+ [105] = "Sonata",
+ [106] = "Symphony",
+ [107] = "Booty Bass",
+ [108] = "Primus",
+ [109] = "Porn Groove",
+ [110] = "Satire",
+ [111] = "Slow Jam",
+ [112] = "Club",
+ [113] = "Tango",
+ [114] = "Samba",
+ [115] = "Folklore",
+ [116] = "Ballad",
+ [117] = "Power Ballad",
+ [118] = "Rhythmic Soul",
+ [119] = "Freestyle",
+ [120] = "Duet",
+ [121] = "Punk Rock",
+ [122] = "Drum Solo",
+ [123] = "A capella",
+ [124] = "Euro-House",
+ [125] = "Dance Hall",
+};
+
+/* buf must be ID3_HEADER_SIZE byte long */
+static int id3_match(const uint8_t *buf)
+{
+ return (buf[0] == 'I' &&
+ buf[1] == 'D' &&
+ buf[2] == '3' &&
+ buf[3] != 0xff &&
+ buf[4] != 0xff &&
+ (buf[6] & 0x80) == 0 &&
+ (buf[7] & 0x80) == 0 &&
+ (buf[8] & 0x80) == 0 &&
+ (buf[9] & 0x80) == 0);
+}
+
+static void id3_get_string(char *str, int str_size,
+ const uint8_t *buf, int buf_size)
+{
+ int i, c;
+ char *q;
+
+ q = str;
+ for(i = 0; i < buf_size; i++) {
+ c = buf[i];
+ if (c == '\0')
+ break;
+ if ((q - str) >= str_size - 1)
+ break;
+ *q++ = c;
+ }
+ *q = '\0';
+}
+
+/* 'buf' must be ID3_TAG_SIZE byte long */
+static int id3_parse_tag(AVFormatContext *s, const uint8_t *buf)
+{
+ char str[5];
+ int genre;
+
+ if (!(buf[0] == 'T' &&
+ buf[1] == 'A' &&
+ buf[2] == 'G'))
+ return -1;
+ id3_get_string(s->title, sizeof(s->title), buf + 3, 30);
+ id3_get_string(s->author, sizeof(s->author), buf + 33, 30);
+ id3_get_string(s->album, sizeof(s->album), buf + 63, 30);
+ id3_get_string(str, sizeof(str), buf + 93, 4);
+ s->year = atoi(str);
+ id3_get_string(s->comment, sizeof(s->comment), buf + 97, 30);
+ if (buf[125] == 0 && buf[126] != 0)
+ s->track = buf[126];
+ genre = buf[127];
+ if (genre <= ID3_GENRE_MAX)
+ pstrcpy(s->genre, sizeof(s->genre), id3_genre_str[genre]);
+ return 0;
+}
+
+static void id3_create_tag(AVFormatContext *s, uint8_t *buf)
+{
+ int v, i;
+
+ memset(buf, 0, ID3_TAG_SIZE); /* fail safe */
+ buf[0] = 'T';
+ buf[1] = 'A';
+ buf[2] = 'G';
+ strncpy(buf + 3, s->title, 30);
+ strncpy(buf + 33, s->author, 30);
+ strncpy(buf + 63, s->album, 30);
+ v = s->year;
+ if (v > 0) {
+ for(i = 0;i < 4; i++) {
+ buf[96 - i] = '0' + (v % 10);
+ v = v / 10;
+ }
+ }
+ strncpy(buf + 97, s->comment, 30);
+ if (s->track != 0) {
+ buf[125] = 0;
+ buf[126] = s->track;
+ }
+ for(i = 0; i <= ID3_GENRE_MAX; i++) {
+ if (!strcasecmp(s->genre, id3_genre_str[i])) {
+ buf[127] = i;
+ break;
+ }
+ }
+}
+
+/* mp3 read */
+
+static int mp3_read_probe(AVProbeData *p)
+{
+ int max_frames, first_frames;
+ int fsize, frames, sample_rate;
+ uint32_t header;
+ uint8_t *buf, *buf2, *end;
+ AVCodecContext avctx;
+
+ if(p->buf_size < ID3_HEADER_SIZE)
+ return 0;
+
+ if(id3_match(p->buf))
+ return AVPROBE_SCORE_MAX/2+1; // this must be less then mpeg-ps because some retards put id3 tage before mpeg-ps files
+
+ max_frames = 0;
+ buf = p->buf;
+ end = buf + FFMIN(4096, p->buf_size - sizeof(uint32_t));
+
+ for(; buf < end; buf++) {
+ buf2 = buf;
+
+ for(frames = 0; buf2 < end; frames++) {
+ header = (buf2[0] << 24) | (buf2[1] << 16) | (buf2[2] << 8) | buf2[3];
+ fsize = mpa_decode_header(&avctx, header, &sample_rate);
+ if(fsize < 0)
+ break;
+ buf2 += fsize;
+ }
+ max_frames = FFMAX(max_frames, frames);
+ if(buf == p->buf)
+ first_frames= frames;
+ }
+ if (first_frames>=3) return AVPROBE_SCORE_MAX/2+1;
+ else if(max_frames>=3) return AVPROBE_SCORE_MAX/4;
+ else if(max_frames>=1) return 1;
+ else return 0;
+}
+
+static int mp3_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+ uint8_t buf[ID3_TAG_SIZE];
+ int len, ret, filesize;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_MP3;
+ st->need_parsing = 1;
+
+ /* try to get the TAG */
+ if (!url_is_streamed(&s->pb)) {
+ /* XXX: change that */
+ filesize = url_fsize(&s->pb);
+ if (filesize > 128) {
+ url_fseek(&s->pb, filesize - 128, SEEK_SET);
+ ret = get_buffer(&s->pb, buf, ID3_TAG_SIZE);
+ if (ret == ID3_TAG_SIZE) {
+ id3_parse_tag(s, buf);
+ }
+ url_fseek(&s->pb, 0, SEEK_SET);
+ }
+ }
+
+ /* if ID3 header found, skip it */
+ ret = get_buffer(&s->pb, buf, ID3_HEADER_SIZE);
+ if (ret != ID3_HEADER_SIZE)
+ return -1;
+ if (id3_match(buf)) {
+ /* skip ID3 header */
+ len = ((buf[6] & 0x7f) << 21) |
+ ((buf[7] & 0x7f) << 14) |
+ ((buf[8] & 0x7f) << 7) |
+ (buf[9] & 0x7f);
+ url_fskip(&s->pb, len);
+ } else {
+ url_fseek(&s->pb, 0, SEEK_SET);
+ }
+
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+#define MP3_PACKET_SIZE 1024
+
+static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size;
+ // AVStream *st = s->streams[0];
+
+ size= MP3_PACKET_SIZE;
+
+ ret= av_get_packet(&s->pb, pkt, size);
+
+ pkt->stream_index = 0;
+ if (ret <= 0) {
+ return AVERROR_IO;
+ }
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return ret;
+}
+
+static int mp3_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MUXERS
+/* simple formats */
+static int mp3_write_header(struct AVFormatContext *s)
+{
+ return 0;
+}
+
+static int mp3_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ put_buffer(&s->pb, pkt->data, pkt->size);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int mp3_write_trailer(struct AVFormatContext *s)
+{
+ uint8_t buf[ID3_TAG_SIZE];
+
+ /* write the id3 header */
+ if (s->title[0] != '\0') {
+ id3_create_tag(s, buf);
+ put_buffer(&s->pb, buf, ID3_TAG_SIZE);
+ put_flush_packet(&s->pb);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_MP3_DEMUXER
+AVInputFormat mp3_demuxer = {
+ "mp3",
+ "MPEG audio",
+ 0,
+ mp3_read_probe,
+ mp3_read_header,
+ mp3_read_packet,
+ mp3_read_close,
+ .extensions = "mp2,mp3,m2a", /* XXX: use probe */
+};
+#endif
+#ifdef CONFIG_MP2_MUXER
+AVOutputFormat mp2_muxer = {
+ "mp2",
+ "MPEG audio layer 2",
+ "audio/x-mpeg",
+#ifdef CONFIG_MP3LAME
+ "mp2,m2a",
+#else
+ "mp2,mp3,m2a",
+#endif
+ 0,
+ CODEC_ID_MP2,
+ 0,
+ mp3_write_header,
+ mp3_write_packet,
+ mp3_write_trailer,
+};
+#endif
+#ifdef CONFIG_MP3_MUXER
+AVOutputFormat mp3_muxer = {
+ "mp3",
+ "MPEG audio layer 3",
+ "audio/x-mpeg",
+ "mp3",
+ 0,
+ CODEC_ID_MP3,
+ 0,
+ mp3_write_header,
+ mp3_write_packet,
+ mp3_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mpeg.c b/contrib/ffmpeg/libavformat/mpeg.c
new file mode 100644
index 000000000..709ce16f1
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpeg.c
@@ -0,0 +1,1824 @@
+/*
+ * MPEG1/2 muxer and demuxer
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+#include "fifo.h"
+
+#define MAX_PAYLOAD_SIZE 4096
+//#define DEBUG_SEEK
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct PacketDesc {
+ int64_t pts;
+ int64_t dts;
+ int size;
+ int unwritten_size;
+ int flags;
+ struct PacketDesc *next;
+} PacketDesc;
+
+typedef struct {
+ AVFifoBuffer fifo;
+ uint8_t id;
+ int max_buffer_size; /* in bytes */
+ int buffer_index;
+ PacketDesc *predecode_packet;
+ PacketDesc *premux_packet;
+ PacketDesc **next_packet;
+ int packet_number;
+ uint8_t lpcm_header[3];
+ int lpcm_align;
+ int bytes_to_iframe;
+ int align_iframe;
+ int64_t vobu_start_pts;
+} StreamInfo;
+
+typedef struct {
+ int packet_size; /* required packet size */
+ int packet_number;
+ int pack_header_freq; /* frequency (in packets^-1) at which we send pack headers */
+ int system_header_freq;
+ int system_header_size;
+ int mux_rate; /* bitrate in units of 50 bytes/s */
+ /* stream info */
+ int audio_bound;
+ int video_bound;
+ int is_mpeg2;
+ int is_vcd;
+ int is_svcd;
+ int is_dvd;
+ int64_t last_scr; /* current system clock */
+
+ double vcd_padding_bitrate; //FIXME floats
+ int64_t vcd_padding_bytes_written;
+
+} MpegMuxContext;
+
+#define PACK_START_CODE ((unsigned int)0x000001ba)
+#define SYSTEM_HEADER_START_CODE ((unsigned int)0x000001bb)
+#define SEQUENCE_END_CODE ((unsigned int)0x000001b7)
+#define PACKET_START_CODE_MASK ((unsigned int)0xffffff00)
+#define PACKET_START_CODE_PREFIX ((unsigned int)0x00000100)
+#define ISO_11172_END_CODE ((unsigned int)0x000001b9)
+
+/* mpeg2 */
+#define PROGRAM_STREAM_MAP 0x1bc
+#define PRIVATE_STREAM_1 0x1bd
+#define PADDING_STREAM 0x1be
+#define PRIVATE_STREAM_2 0x1bf
+
+
+#define AUDIO_ID 0xc0
+#define VIDEO_ID 0xe0
+#define AC3_ID 0x80
+#define DTS_ID 0x8a
+#define LPCM_ID 0xa0
+#define SUB_ID 0x20
+
+#define STREAM_TYPE_VIDEO_MPEG1 0x01
+#define STREAM_TYPE_VIDEO_MPEG2 0x02
+#define STREAM_TYPE_AUDIO_MPEG1 0x03
+#define STREAM_TYPE_AUDIO_MPEG2 0x04
+#define STREAM_TYPE_PRIVATE_SECTION 0x05
+#define STREAM_TYPE_PRIVATE_DATA 0x06
+#define STREAM_TYPE_AUDIO_AAC 0x0f
+#define STREAM_TYPE_VIDEO_MPEG4 0x10
+#define STREAM_TYPE_VIDEO_H264 0x1b
+
+#define STREAM_TYPE_AUDIO_AC3 0x81
+#define STREAM_TYPE_AUDIO_DTS 0x8a
+
+static const int lpcm_freq_tab[4] = { 48000, 96000, 44100, 32000 };
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mpeg1system_muxer;
+AVOutputFormat mpeg1vcd_muxer;
+AVOutputFormat mpeg2vob_muxer;
+AVOutputFormat mpeg2svcd_muxer;
+AVOutputFormat mpeg2dvd_muxer;
+
+static int put_pack_header(AVFormatContext *ctx,
+ uint8_t *buf, int64_t timestamp)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ PutBitContext pb;
+
+ init_put_bits(&pb, buf, 128);
+
+ put_bits(&pb, 32, PACK_START_CODE);
+ if (s->is_mpeg2) {
+ put_bits(&pb, 2, 0x1);
+ } else {
+ put_bits(&pb, 4, 0x2);
+ }
+ put_bits(&pb, 3, (uint32_t)((timestamp >> 30) & 0x07));
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 15, (uint32_t)((timestamp >> 15) & 0x7fff));
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 15, (uint32_t)((timestamp) & 0x7fff));
+ put_bits(&pb, 1, 1);
+ if (s->is_mpeg2) {
+ /* clock extension */
+ put_bits(&pb, 9, 0);
+ }
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 22, s->mux_rate);
+ put_bits(&pb, 1, 1);
+ if (s->is_mpeg2) {
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 5, 0x1f); /* reserved */
+ put_bits(&pb, 3, 0); /* stuffing length */
+ }
+ flush_put_bits(&pb);
+ return pbBufPtr(&pb) - pb.buf;
+}
+
+static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_stream_id)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int size, i, private_stream_coded, id;
+ PutBitContext pb;
+
+ init_put_bits(&pb, buf, 128);
+
+ put_bits(&pb, 32, SYSTEM_HEADER_START_CODE);
+ put_bits(&pb, 16, 0);
+ put_bits(&pb, 1, 1);
+
+ put_bits(&pb, 22, s->mux_rate); /* maximum bit rate of the multiplexed stream */
+ put_bits(&pb, 1, 1); /* marker */
+ if (s->is_vcd && only_for_stream_id==VIDEO_ID) {
+ /* This header applies only to the video stream (see VCD standard p. IV-7)*/
+ put_bits(&pb, 6, 0);
+ } else
+ put_bits(&pb, 6, s->audio_bound);
+
+ if (s->is_vcd) {
+ /* see VCD standard, p. IV-7*/
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 1, 1);
+ } else {
+ put_bits(&pb, 1, 0); /* variable bitrate*/
+ put_bits(&pb, 1, 0); /* non constrainted bit stream */
+ }
+
+ if (s->is_vcd || s->is_dvd) {
+ /* see VCD standard p IV-7 */
+ put_bits(&pb, 1, 1); /* audio locked */
+ put_bits(&pb, 1, 1); /* video locked */
+ } else {
+ put_bits(&pb, 1, 0); /* audio locked */
+ put_bits(&pb, 1, 0); /* video locked */
+ }
+
+ put_bits(&pb, 1, 1); /* marker */
+
+ if (s->is_vcd && only_for_stream_id==AUDIO_ID) {
+ /* This header applies only to the audio stream (see VCD standard p. IV-7)*/
+ put_bits(&pb, 5, 0);
+ } else
+ put_bits(&pb, 5, s->video_bound);
+
+ if (s->is_dvd) {
+ put_bits(&pb, 1, 0); /* packet_rate_restriction_flag */
+ put_bits(&pb, 7, 0x7f); /* reserved byte */
+ } else
+ put_bits(&pb, 8, 0xff); /* reserved byte */
+
+ /* DVD-Video Stream_bound entries
+ id (0xB9) video, maximum P-STD for stream 0xE0. (P-STD_buffer_bound_scale = 1)
+ id (0xB8) audio, maximum P-STD for any MPEG audio (0xC0 to 0xC7) streams. If there are none set to 4096 (32x128). (P-STD_buffer_bound_scale = 0)
+ id (0xBD) private stream 1 (audio other than MPEG and subpictures). (P-STD_buffer_bound_scale = 1)
+ id (0xBF) private stream 2, NAV packs, set to 2x1024. */
+ if (s->is_dvd) {
+
+ int P_STD_max_video = 0;
+ int P_STD_max_mpeg_audio = 0;
+ int P_STD_max_mpeg_PS1 = 0;
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ StreamInfo *stream = ctx->streams[i]->priv_data;
+
+ id = stream->id;
+ if (id == 0xbd && stream->max_buffer_size > P_STD_max_mpeg_PS1) {
+ P_STD_max_mpeg_PS1 = stream->max_buffer_size;
+ } else if (id >= 0xc0 && id <= 0xc7 && stream->max_buffer_size > P_STD_max_mpeg_audio) {
+ P_STD_max_mpeg_audio = stream->max_buffer_size;
+ } else if (id == 0xe0 && stream->max_buffer_size > P_STD_max_video) {
+ P_STD_max_video = stream->max_buffer_size;
+ }
+ }
+
+ /* video */
+ put_bits(&pb, 8, 0xb9); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 13, P_STD_max_video / 1024);
+
+ /* audio */
+ if (P_STD_max_mpeg_audio == 0)
+ P_STD_max_mpeg_audio = 4096;
+ put_bits(&pb, 8, 0xb8); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 13, P_STD_max_mpeg_audio / 128);
+
+ /* private stream 1 */
+ put_bits(&pb, 8, 0xbd); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 13, P_STD_max_mpeg_PS1 / 128);
+
+ /* private stream 2 */
+ put_bits(&pb, 8, 0xbf); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 13, 2);
+ }
+ else {
+ /* audio stream info */
+ private_stream_coded = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ StreamInfo *stream = ctx->streams[i]->priv_data;
+
+
+ /* For VCDs, only include the stream info for the stream
+ that the pack which contains this system belongs to.
+ (see VCD standard p. IV-7) */
+ if ( !s->is_vcd || stream->id==only_for_stream_id
+ || only_for_stream_id==0) {
+
+ id = stream->id;
+ if (id < 0xc0) {
+ /* special case for private streams (AC3 use that) */
+ if (private_stream_coded)
+ continue;
+ private_stream_coded = 1;
+ id = 0xbd;
+ }
+ put_bits(&pb, 8, id); /* stream ID */
+ put_bits(&pb, 2, 3);
+ if (id < 0xe0) {
+ /* audio */
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 13, stream->max_buffer_size / 128);
+ } else {
+ /* video */
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 13, stream->max_buffer_size / 1024);
+ }
+ }
+ }
+ }
+
+ flush_put_bits(&pb);
+ size = pbBufPtr(&pb) - pb.buf;
+ /* patch packet size */
+ buf[4] = (size - 6) >> 8;
+ buf[5] = (size - 6) & 0xff;
+
+ return size;
+}
+
+static int get_system_header_size(AVFormatContext *ctx)
+{
+ int buf_index, i, private_stream_coded;
+ StreamInfo *stream;
+ MpegMuxContext *s = ctx->priv_data;
+
+ if (s->is_dvd)
+ return 18; // DVD-Video system headers are 18 bytes fixed length.
+
+ buf_index = 12;
+ private_stream_coded = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ stream = ctx->streams[i]->priv_data;
+ if (stream->id < 0xc0) {
+ if (private_stream_coded)
+ continue;
+ private_stream_coded = 1;
+ }
+ buf_index += 3;
+ }
+ return buf_index;
+}
+
+static int mpeg_mux_init(AVFormatContext *ctx)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int bitrate, i, mpa_id, mpv_id, mps_id, ac3_id, dts_id, lpcm_id, j;
+ AVStream *st;
+ StreamInfo *stream;
+ int audio_bitrate;
+ int video_bitrate;
+
+ s->packet_number = 0;
+ s->is_vcd = (ctx->oformat == &mpeg1vcd_muxer);
+ s->is_svcd = (ctx->oformat == &mpeg2svcd_muxer);
+ s->is_mpeg2 = (ctx->oformat == &mpeg2vob_muxer || ctx->oformat == &mpeg2svcd_muxer || ctx->oformat == &mpeg2dvd_muxer);
+ s->is_dvd = (ctx->oformat == &mpeg2dvd_muxer);
+
+ if(ctx->packet_size)
+ s->packet_size = ctx->packet_size;
+ else
+ s->packet_size = 2048;
+
+ s->vcd_padding_bytes_written = 0;
+ s->vcd_padding_bitrate=0;
+
+ s->audio_bound = 0;
+ s->video_bound = 0;
+ mpa_id = AUDIO_ID;
+ ac3_id = AC3_ID;
+ dts_id = DTS_ID;
+ mpv_id = VIDEO_ID;
+ mps_id = SUB_ID;
+ lpcm_id = LPCM_ID;
+ for(i=0;i<ctx->nb_streams;i++) {
+ st = ctx->streams[i];
+ stream = av_mallocz(sizeof(StreamInfo));
+ if (!stream)
+ goto fail;
+ st->priv_data = stream;
+
+ av_set_pts_info(st, 64, 1, 90000);
+
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (st->codec->codec_id == CODEC_ID_AC3) {
+ stream->id = ac3_id++;
+ } else if (st->codec->codec_id == CODEC_ID_DTS) {
+ stream->id = dts_id++;
+ } else if (st->codec->codec_id == CODEC_ID_PCM_S16BE) {
+ stream->id = lpcm_id++;
+ for(j = 0; j < 4; j++) {
+ if (lpcm_freq_tab[j] == st->codec->sample_rate)
+ break;
+ }
+ if (j == 4)
+ goto fail;
+ if (st->codec->channels > 8)
+ return -1;
+ stream->lpcm_header[0] = 0x0c;
+ stream->lpcm_header[1] = (st->codec->channels - 1) | (j << 4);
+ stream->lpcm_header[2] = 0x80;
+ stream->lpcm_align = st->codec->channels * 2;
+ } else {
+ stream->id = mpa_id++;
+ }
+
+ /* This value HAS to be used for VCD (see VCD standard, p. IV-7).
+ Right now it is also used for everything else.*/
+ stream->max_buffer_size = 4 * 1024;
+ s->audio_bound++;
+ break;
+ case CODEC_TYPE_VIDEO:
+ stream->id = mpv_id++;
+ if (st->codec->rc_buffer_size)
+ stream->max_buffer_size = 6*1024 + st->codec->rc_buffer_size/8;
+ else
+ stream->max_buffer_size = 230*1024; //FIXME this is probably too small as default
+#if 0
+ /* see VCD standard, p. IV-7*/
+ stream->max_buffer_size = 46 * 1024;
+ else
+ /* This value HAS to be used for SVCD (see SVCD standard, p. 26 V.2.3.2).
+ Right now it is also used for everything else.*/
+ stream->max_buffer_size = 230 * 1024;
+#endif
+ s->video_bound++;
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ stream->id = mps_id++;
+ stream->max_buffer_size = 16 * 1024;
+ break;
+ default:
+ return -1;
+ }
+ av_fifo_init(&stream->fifo, 16);
+ }
+ bitrate = 0;
+ audio_bitrate = 0;
+ video_bitrate = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ int codec_rate;
+ st = ctx->streams[i];
+ stream = (StreamInfo*) st->priv_data;
+
+ if(st->codec->rc_max_rate || stream->id==VIDEO_ID)
+ codec_rate= st->codec->rc_max_rate;
+ else
+ codec_rate= st->codec->bit_rate;
+
+ if(!codec_rate)
+ codec_rate= (1<<21)*8*50/ctx->nb_streams;
+
+ bitrate += codec_rate;
+
+ if (stream->id==AUDIO_ID)
+ audio_bitrate += codec_rate;
+ else if (stream->id==VIDEO_ID)
+ video_bitrate += codec_rate;
+ }
+
+ if(ctx->mux_rate){
+ s->mux_rate= (ctx->mux_rate + (8 * 50) - 1) / (8 * 50);
+ } else {
+ /* we increase slightly the bitrate to take into account the
+ headers. XXX: compute it exactly */
+ bitrate += bitrate*5/100;
+ bitrate += 10000;
+ s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50);
+ }
+
+ if (s->is_vcd) {
+ double overhead_rate;
+
+ /* The VCD standard mandates that the mux_rate field is 3528
+ (see standard p. IV-6).
+ The value is actually "wrong", i.e. if you calculate
+ it using the normal formula and the 75 sectors per second transfer
+ rate you get a different value because the real pack size is 2324,
+ not 2352. But the standard explicitly specifies that the mux_rate
+ field in the header must have this value.*/
+// s->mux_rate=2352 * 75 / 50; /* = 3528*/
+
+ /* The VCD standard states that the muxed stream must be
+ exactly 75 packs / second (the data rate of a single speed cdrom).
+ Since the video bitrate (probably 1150000 bits/sec) will be below
+ the theoretical maximum we have to add some padding packets
+ to make up for the lower data rate.
+ (cf. VCD standard p. IV-6 )*/
+
+ /* Add the header overhead to the data rate.
+ 2279 data bytes per audio pack, 2294 data bytes per video pack*/
+ overhead_rate = ((audio_bitrate / 8.0) / 2279) * (2324 - 2279);
+ overhead_rate += ((video_bitrate / 8.0) / 2294) * (2324 - 2294);
+ overhead_rate *= 8;
+
+ /* Add padding so that the full bitrate is 2324*75 bytes/sec */
+ s->vcd_padding_bitrate = 2324 * 75 * 8 - (bitrate + overhead_rate);
+ }
+
+ if (s->is_vcd || s->is_mpeg2)
+ /* every packet */
+ s->pack_header_freq = 1;
+ else
+ /* every 2 seconds */
+ s->pack_header_freq = 2 * bitrate / s->packet_size / 8;
+
+ /* the above seems to make pack_header_freq zero sometimes */
+ if (s->pack_header_freq == 0)
+ s->pack_header_freq = 1;
+
+ if (s->is_mpeg2)
+ /* every 200 packets. Need to look at the spec. */
+ s->system_header_freq = s->pack_header_freq * 40;
+ else if (s->is_vcd)
+ /* the standard mandates that there are only two system headers
+ in the whole file: one in the first packet of each stream.
+ (see standard p. IV-7 and IV-8) */
+ s->system_header_freq = 0x7fffffff;
+ else
+ s->system_header_freq = s->pack_header_freq * 5;
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ stream = ctx->streams[i]->priv_data;
+ stream->packet_number = 0;
+ }
+ s->system_header_size = get_system_header_size(ctx);
+ s->last_scr = 0;
+ return 0;
+ fail:
+ for(i=0;i<ctx->nb_streams;i++) {
+ av_free(ctx->streams[i]->priv_data);
+ }
+ return -ENOMEM;
+}
+
+static inline void put_timestamp(ByteIOContext *pb, int id, int64_t timestamp)
+{
+ put_byte(pb,
+ (id << 4) |
+ (((timestamp >> 30) & 0x07) << 1) |
+ 1);
+ put_be16(pb, (uint16_t)((((timestamp >> 15) & 0x7fff) << 1) | 1));
+ put_be16(pb, (uint16_t)((((timestamp) & 0x7fff) << 1) | 1));
+}
+
+
+/* return the number of padding bytes that should be inserted into
+ the multiplexed stream.*/
+static int get_vcd_padding_size(AVFormatContext *ctx, int64_t pts)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int pad_bytes = 0;
+
+ if (s->vcd_padding_bitrate > 0 && pts!=AV_NOPTS_VALUE)
+ {
+ int64_t full_pad_bytes;
+
+ full_pad_bytes = (int64_t)((s->vcd_padding_bitrate * (pts / 90000.0)) / 8.0); //FIXME this is wrong
+ pad_bytes = (int) (full_pad_bytes - s->vcd_padding_bytes_written);
+
+ if (pad_bytes<0)
+ /* might happen if we have already padded to a later timestamp. This
+ can occur if another stream has already advanced further.*/
+ pad_bytes=0;
+ }
+
+ return pad_bytes;
+}
+
+
+#if 0 /* unused, remove? */
+/* return the exact available payload size for the next packet for
+ stream 'stream_index'. 'pts' and 'dts' are only used to know if
+ timestamps are needed in the packet header. */
+static int get_packet_payload_size(AVFormatContext *ctx, int stream_index,
+ int64_t pts, int64_t dts)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int buf_index;
+ StreamInfo *stream;
+
+ stream = ctx->streams[stream_index]->priv_data;
+
+ buf_index = 0;
+ if (((s->packet_number % s->pack_header_freq) == 0)) {
+ /* pack header size */
+ if (s->is_mpeg2)
+ buf_index += 14;
+ else
+ buf_index += 12;
+
+ if (s->is_vcd) {
+ /* there is exactly one system header for each stream in a VCD MPEG,
+ One in the very first video packet and one in the very first
+ audio packet (see VCD standard p. IV-7 and IV-8).*/
+
+ if (stream->packet_number==0)
+ /* The system headers refer only to the stream they occur in,
+ so they have a constant size.*/
+ buf_index += 15;
+
+ } else {
+ if ((s->packet_number % s->system_header_freq) == 0)
+ buf_index += s->system_header_size;
+ }
+ }
+
+ if ((s->is_vcd && stream->packet_number==0)
+ || (s->is_svcd && s->packet_number==0))
+ /* the first pack of each stream contains only the pack header,
+ the system header and some padding (see VCD standard p. IV-6)
+ Add the padding size, so that the actual payload becomes 0.*/
+ buf_index += s->packet_size - buf_index;
+ else {
+ /* packet header size */
+ buf_index += 6;
+ if (s->is_mpeg2) {
+ buf_index += 3;
+ if (stream->packet_number==0)
+ buf_index += 3; /* PES extension */
+ buf_index += 1; /* obligatory stuffing byte */
+ }
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts)
+ buf_index += 5 + 5;
+ else
+ buf_index += 5;
+
+ } else {
+ if (!s->is_mpeg2)
+ buf_index++;
+ }
+
+ if (stream->id < 0xc0) {
+ /* AC3/LPCM private data header */
+ buf_index += 4;
+ if (stream->id >= 0xa0) {
+ int n;
+ buf_index += 3;
+ /* NOTE: we round the payload size to an integer number of
+ LPCM samples */
+ n = (s->packet_size - buf_index) % stream->lpcm_align;
+ if (n)
+ buf_index += (stream->lpcm_align - n);
+ }
+ }
+
+ if (s->is_vcd && stream->id == AUDIO_ID)
+ /* The VCD standard demands that 20 zero bytes follow
+ each audio packet (see standard p. IV-8).*/
+ buf_index+=20;
+ }
+ return s->packet_size - buf_index;
+}
+#endif
+
+/* Write an MPEG padding packet header. */
+static void put_padding_packet(AVFormatContext *ctx, ByteIOContext *pb,int packet_bytes)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int i;
+
+ put_be32(pb, PADDING_STREAM);
+ put_be16(pb, packet_bytes - 6);
+ if (!s->is_mpeg2) {
+ put_byte(pb, 0x0f);
+ packet_bytes -= 7;
+ } else
+ packet_bytes -= 6;
+
+ for(i=0;i<packet_bytes;i++)
+ put_byte(pb, 0xff);
+}
+
+static int get_nb_frames(AVFormatContext *ctx, StreamInfo *stream, int len){
+ int nb_frames=0;
+ PacketDesc *pkt_desc= stream->premux_packet;
+
+ while(len>0){
+ if(pkt_desc->size == pkt_desc->unwritten_size)
+ nb_frames++;
+ len -= pkt_desc->unwritten_size;
+ pkt_desc= pkt_desc->next;
+ }
+
+ return nb_frames;
+}
+
+/* flush the packet on stream stream_index */
+static int flush_packet(AVFormatContext *ctx, int stream_index,
+ int64_t pts, int64_t dts, int64_t scr, int trailer_size)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ StreamInfo *stream = ctx->streams[stream_index]->priv_data;
+ uint8_t *buf_ptr;
+ int size, payload_size, startcode, id, stuffing_size, i, header_len;
+ int packet_size;
+ uint8_t buffer[128];
+ int zero_trail_bytes = 0;
+ int pad_packet_bytes = 0;
+ int pes_flags;
+ int general_pack = 0; /*"general" pack without data specific to one stream?*/
+ int nb_frames;
+
+ id = stream->id;
+
+#if 0
+ printf("packet ID=%2x PTS=%0.3f\n",
+ id, pts / 90000.0);
+#endif
+
+ buf_ptr = buffer;
+
+ if ((s->packet_number % s->pack_header_freq) == 0 || s->last_scr != scr) {
+ /* output pack and systems header if needed */
+ size = put_pack_header(ctx, buf_ptr, scr);
+ buf_ptr += size;
+ s->last_scr= scr;
+
+ if (s->is_vcd) {
+ /* there is exactly one system header for each stream in a VCD MPEG,
+ One in the very first video packet and one in the very first
+ audio packet (see VCD standard p. IV-7 and IV-8).*/
+
+ if (stream->packet_number==0) {
+ size = put_system_header(ctx, buf_ptr, id);
+ buf_ptr += size;
+ }
+ } else if (s->is_dvd) {
+ if (stream->align_iframe || s->packet_number == 0){
+ int PES_bytes_to_fill = s->packet_size - size - 10;
+
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts)
+ PES_bytes_to_fill -= 5 + 5;
+ else
+ PES_bytes_to_fill -= 5;
+ }
+
+ if (stream->bytes_to_iframe == 0 || s->packet_number == 0) {
+ size = put_system_header(ctx, buf_ptr, 0);
+ buf_ptr += size;
+ size = buf_ptr - buffer;
+ put_buffer(&ctx->pb, buffer, size);
+
+ put_be32(&ctx->pb, PRIVATE_STREAM_2);
+ put_be16(&ctx->pb, 0x03d4); // length
+ put_byte(&ctx->pb, 0x00); // substream ID, 00=PCI
+ for (i = 0; i < 979; i++)
+ put_byte(&ctx->pb, 0x00);
+
+ put_be32(&ctx->pb, PRIVATE_STREAM_2);
+ put_be16(&ctx->pb, 0x03fa); // length
+ put_byte(&ctx->pb, 0x01); // substream ID, 01=DSI
+ for (i = 0; i < 1017; i++)
+ put_byte(&ctx->pb, 0x00);
+
+ memset(buffer, 0, 128);
+ buf_ptr = buffer;
+ s->packet_number++;
+ stream->align_iframe = 0;
+ scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
+ size = put_pack_header(ctx, buf_ptr, scr);
+ s->last_scr= scr;
+ buf_ptr += size;
+ /* GOP Start */
+ } else if (stream->bytes_to_iframe < PES_bytes_to_fill) {
+ pad_packet_bytes = PES_bytes_to_fill - stream->bytes_to_iframe;
+ }
+ }
+ } else {
+ if ((s->packet_number % s->system_header_freq) == 0) {
+ size = put_system_header(ctx, buf_ptr, 0);
+ buf_ptr += size;
+ }
+ }
+ }
+ size = buf_ptr - buffer;
+ put_buffer(&ctx->pb, buffer, size);
+
+ packet_size = s->packet_size - size;
+
+ if (s->is_vcd && id == AUDIO_ID)
+ /* The VCD standard demands that 20 zero bytes follow
+ each audio pack (see standard p. IV-8).*/
+ zero_trail_bytes += 20;
+
+ if ((s->is_vcd && stream->packet_number==0)
+ || (s->is_svcd && s->packet_number==0)) {
+ /* for VCD the first pack of each stream contains only the pack header,
+ the system header and lots of padding (see VCD standard p. IV-6).
+ In the case of an audio pack, 20 zero bytes are also added at
+ the end.*/
+ /* For SVCD we fill the very first pack to increase compatibility with
+ some DVD players. Not mandated by the standard.*/
+ if (s->is_svcd)
+ general_pack = 1; /* the system header refers to both streams and no stream data*/
+ pad_packet_bytes = packet_size - zero_trail_bytes;
+ }
+
+ packet_size -= pad_packet_bytes + zero_trail_bytes;
+
+ if (packet_size > 0) {
+
+ /* packet header size */
+ packet_size -= 6;
+
+ /* packet header */
+ if (s->is_mpeg2) {
+ header_len = 3;
+ if (stream->packet_number==0)
+ header_len += 3; /* PES extension */
+ header_len += 1; /* obligatory stuffing byte */
+ } else {
+ header_len = 0;
+ }
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts)
+ header_len += 5 + 5;
+ else
+ header_len += 5;
+ } else {
+ if (!s->is_mpeg2)
+ header_len++;
+ }
+
+ payload_size = packet_size - header_len;
+ if (id < 0xc0) {
+ startcode = PRIVATE_STREAM_1;
+ payload_size -= 1;
+ if (id >= 0x40) {
+ payload_size -= 3;
+ if (id >= 0xa0)
+ payload_size -= 3;
+ }
+ } else {
+ startcode = 0x100 + id;
+ }
+
+ stuffing_size = payload_size - av_fifo_size(&stream->fifo);
+
+ // first byte doesnt fit -> reset pts/dts + stuffing
+ if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){
+ int timestamp_len=0;
+ if(dts != pts)
+ timestamp_len += 5;
+ if(pts != AV_NOPTS_VALUE)
+ timestamp_len += s->is_mpeg2 ? 5 : 4;
+ pts=dts= AV_NOPTS_VALUE;
+ header_len -= timestamp_len;
+ if (s->is_dvd && stream->align_iframe) {
+ pad_packet_bytes += timestamp_len;
+ packet_size -= timestamp_len;
+ } else {
+ payload_size += timestamp_len;
+ }
+ stuffing_size += timestamp_len;
+ if(payload_size > trailer_size)
+ stuffing_size += payload_size - trailer_size;
+ }
+
+ if (pad_packet_bytes > 0 && pad_packet_bytes <= 7) { // can't use padding, so use stuffing
+ packet_size += pad_packet_bytes;
+ payload_size += pad_packet_bytes; // undo the previous adjustment
+ if (stuffing_size < 0) {
+ stuffing_size = pad_packet_bytes;
+ } else {
+ stuffing_size += pad_packet_bytes;
+ }
+ pad_packet_bytes = 0;
+ }
+
+ if (stuffing_size < 0)
+ stuffing_size = 0;
+ if (stuffing_size > 16) { /*<=16 for MPEG-1, <=32 for MPEG-2*/
+ pad_packet_bytes += stuffing_size;
+ packet_size -= stuffing_size;
+ payload_size -= stuffing_size;
+ stuffing_size = 0;
+ }
+
+ nb_frames= get_nb_frames(ctx, stream, payload_size - stuffing_size);
+
+ put_be32(&ctx->pb, startcode);
+
+ put_be16(&ctx->pb, packet_size);
+
+ if (!s->is_mpeg2)
+ for(i=0;i<stuffing_size;i++)
+ put_byte(&ctx->pb, 0xff);
+
+ if (s->is_mpeg2) {
+ put_byte(&ctx->pb, 0x80); /* mpeg2 id */
+
+ pes_flags=0;
+
+ if (pts != AV_NOPTS_VALUE) {
+ pes_flags |= 0x80;
+ if (dts != pts)
+ pes_flags |= 0x40;
+ }
+
+ /* Both the MPEG-2 and the SVCD standards demand that the
+ P-STD_buffer_size field be included in the first packet of
+ every stream. (see SVCD standard p. 26 V.2.3.1 and V.2.3.2
+ and MPEG-2 standard 2.7.7) */
+ if (stream->packet_number == 0)
+ pes_flags |= 0x01;
+
+ put_byte(&ctx->pb, pes_flags); /* flags */
+ put_byte(&ctx->pb, header_len - 3 + stuffing_size);
+
+ if (pes_flags & 0x80) /*write pts*/
+ put_timestamp(&ctx->pb, (pes_flags & 0x40) ? 0x03 : 0x02, pts);
+ if (pes_flags & 0x40) /*write dts*/
+ put_timestamp(&ctx->pb, 0x01, dts);
+
+ if (pes_flags & 0x01) { /*write pes extension*/
+ put_byte(&ctx->pb, 0x10); /* flags */
+
+ /* P-STD buffer info */
+ if (id == AUDIO_ID)
+ put_be16(&ctx->pb, 0x4000 | stream->max_buffer_size/128);
+ else
+ put_be16(&ctx->pb, 0x6000 | stream->max_buffer_size/1024);
+ }
+
+ } else {
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts) {
+ put_timestamp(&ctx->pb, 0x03, pts);
+ put_timestamp(&ctx->pb, 0x01, dts);
+ } else {
+ put_timestamp(&ctx->pb, 0x02, pts);
+ }
+ } else {
+ put_byte(&ctx->pb, 0x0f);
+ }
+ }
+
+ if (s->is_mpeg2) {
+ /* special stuffing byte that is always written
+ to prevent accidental generation of start codes. */
+ put_byte(&ctx->pb, 0xff);
+
+ for(i=0;i<stuffing_size;i++)
+ put_byte(&ctx->pb, 0xff);
+ }
+
+ if (startcode == PRIVATE_STREAM_1) {
+ put_byte(&ctx->pb, id);
+ if (id >= 0xa0) {
+ /* LPCM (XXX: check nb_frames) */
+ put_byte(&ctx->pb, 7);
+ put_be16(&ctx->pb, 4); /* skip 3 header bytes */
+ put_byte(&ctx->pb, stream->lpcm_header[0]);
+ put_byte(&ctx->pb, stream->lpcm_header[1]);
+ put_byte(&ctx->pb, stream->lpcm_header[2]);
+ } else if (id >= 0x40) {
+ /* AC3 */
+ put_byte(&ctx->pb, nb_frames);
+ put_be16(&ctx->pb, trailer_size+1);
+ }
+ }
+
+ /* output data */
+ if(av_fifo_generic_read(&stream->fifo, payload_size - stuffing_size, &put_buffer, &ctx->pb) < 0)
+ return -1;
+ stream->bytes_to_iframe -= payload_size - stuffing_size;
+ }else{
+ payload_size=
+ stuffing_size= 0;
+ }
+
+ if (pad_packet_bytes > 0)
+ put_padding_packet(ctx,&ctx->pb, pad_packet_bytes);
+
+ for(i=0;i<zero_trail_bytes;i++)
+ put_byte(&ctx->pb, 0x00);
+
+ put_flush_packet(&ctx->pb);
+
+ s->packet_number++;
+
+ /* only increase the stream packet number if this pack actually contains
+ something that is specific to this stream! I.e. a dedicated header
+ or some data.*/
+ if (!general_pack)
+ stream->packet_number++;
+
+ return payload_size - stuffing_size;
+}
+
+static void put_vcd_padding_sector(AVFormatContext *ctx)
+{
+ /* There are two ways to do this padding: writing a sector/pack
+ of 0 values, or writing an MPEG padding pack. Both seem to
+ work with most decoders, BUT the VCD standard only allows a 0-sector
+ (see standard p. IV-4, IV-5).
+ So a 0-sector it is...*/
+
+ MpegMuxContext *s = ctx->priv_data;
+ int i;
+
+ for(i=0;i<s->packet_size;i++)
+ put_byte(&ctx->pb, 0);
+
+ s->vcd_padding_bytes_written += s->packet_size;
+
+ put_flush_packet(&ctx->pb);
+
+ /* increasing the packet number is correct. The SCR of the following packs
+ is calculated from the packet_number and it has to include the padding
+ sector (it represents the sector index, not the MPEG pack index)
+ (see VCD standard p. IV-6)*/
+ s->packet_number++;
+}
+
+#if 0 /* unused, remove? */
+static int64_t get_vcd_scr(AVFormatContext *ctx,int stream_index,int64_t pts)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int64_t scr;
+
+ /* Since the data delivery rate is constant, SCR is computed
+ using the formula C + i * 1200 where C is the start constant
+ and i is the pack index.
+ It is recommended that SCR 0 is at the beginning of the VCD front
+ margin (a sequence of empty Form 2 sectors on the CD).
+ It is recommended that the front margin is 30 sectors long, so
+ we use C = 30*1200 = 36000
+ (Note that even if the front margin is not 30 sectors the file
+ will still be correct according to the standard. It just won't have
+ the "recommended" value).*/
+ scr = 36000 + s->packet_number * 1200;
+
+ return scr;
+}
+#endif
+
+static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){
+// MpegMuxContext *s = ctx->priv_data;
+ int i;
+
+ for(i=0; i<ctx->nb_streams; i++){
+ AVStream *st = ctx->streams[i];
+ StreamInfo *stream = st->priv_data;
+ PacketDesc *pkt_desc= stream->predecode_packet;
+
+ while(pkt_desc && scr > pkt_desc->dts){ //FIXME > vs >=
+ if(stream->buffer_index < pkt_desc->size ||
+ stream->predecode_packet == stream->premux_packet){
+ av_log(ctx, AV_LOG_ERROR, "buffer underflow\n");
+ break;
+ }
+ stream->buffer_index -= pkt_desc->size;
+
+ stream->predecode_packet= pkt_desc->next;
+ av_freep(&pkt_desc);
+ }
+ }
+
+ return 0;
+}
+
+static int output_packet(AVFormatContext *ctx, int flush){
+ MpegMuxContext *s = ctx->priv_data;
+ AVStream *st;
+ StreamInfo *stream;
+ int i, avail_space, es_size, trailer_size;
+ int best_i= -1;
+ int best_score= INT_MIN;
+ int ignore_constraints=0;
+ int64_t scr= s->last_scr;
+ PacketDesc *timestamp_packet;
+ const int64_t max_delay= av_rescale(ctx->max_delay, 90000, AV_TIME_BASE);
+
+retry:
+ for(i=0; i<ctx->nb_streams; i++){
+ AVStream *st = ctx->streams[i];
+ StreamInfo *stream = st->priv_data;
+ const int avail_data= av_fifo_size(&stream->fifo);
+ const int space= stream->max_buffer_size - stream->buffer_index;
+ int rel_space= 1024*space / stream->max_buffer_size;
+ PacketDesc *next_pkt= stream->premux_packet;
+
+ /* for subtitle, a single PES packet must be generated,
+ so we flush after every single subtitle packet */
+ if(s->packet_size > avail_data && !flush
+ && st->codec->codec_type != CODEC_TYPE_SUBTITLE)
+ return 0;
+ if(avail_data==0)
+ continue;
+ assert(avail_data>0);
+
+ if(space < s->packet_size && !ignore_constraints)
+ continue;
+
+ if(next_pkt && next_pkt->dts - scr > max_delay)
+ continue;
+
+ if(rel_space > best_score){
+ best_score= rel_space;
+ best_i = i;
+ avail_space= space;
+ }
+ }
+
+ if(best_i < 0){
+ int64_t best_dts= INT64_MAX;
+
+ for(i=0; i<ctx->nb_streams; i++){
+ AVStream *st = ctx->streams[i];
+ StreamInfo *stream = st->priv_data;
+ PacketDesc *pkt_desc= stream->predecode_packet;
+ if(pkt_desc && pkt_desc->dts < best_dts)
+ best_dts= pkt_desc->dts;
+ }
+
+#if 0
+ av_log(ctx, AV_LOG_DEBUG, "bumping scr, scr:%f, dts:%f\n",
+ scr/90000.0, best_dts/90000.0);
+#endif
+ if(best_dts == INT64_MAX)
+ return 0;
+
+ if(scr >= best_dts+1 && !ignore_constraints){
+ av_log(ctx, AV_LOG_ERROR, "packet too large, ignoring buffer limits to mux it\n");
+ ignore_constraints= 1;
+ }
+ scr= FFMAX(best_dts+1, scr);
+ if(remove_decoded_packets(ctx, scr) < 0)
+ return -1;
+ goto retry;
+ }
+
+ assert(best_i >= 0);
+
+ st = ctx->streams[best_i];
+ stream = st->priv_data;
+
+ assert(av_fifo_size(&stream->fifo) > 0);
+
+ assert(avail_space >= s->packet_size || ignore_constraints);
+
+ timestamp_packet= stream->premux_packet;
+ if(timestamp_packet->unwritten_size == timestamp_packet->size){
+ trailer_size= 0;
+ }else{
+ trailer_size= timestamp_packet->unwritten_size;
+ timestamp_packet= timestamp_packet->next;
+ }
+
+ if(timestamp_packet){
+//av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f scr:%f stream:%d\n", timestamp_packet->dts/90000.0, timestamp_packet->pts/90000.0, scr/90000.0, best_i);
+ es_size= flush_packet(ctx, best_i, timestamp_packet->pts, timestamp_packet->dts, scr, trailer_size);
+ }else{
+ assert(av_fifo_size(&stream->fifo) == trailer_size);
+ es_size= flush_packet(ctx, best_i, AV_NOPTS_VALUE, AV_NOPTS_VALUE, scr, trailer_size);
+ }
+
+ if (s->is_vcd) {
+ /* Write one or more padding sectors, if necessary, to reach
+ the constant overall bitrate.*/
+ int vcd_pad_bytes;
+
+ while((vcd_pad_bytes = get_vcd_padding_size(ctx,stream->premux_packet->pts) ) >= s->packet_size){ //FIXME pts cannot be correct here
+ put_vcd_padding_sector(ctx);
+ s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
+ }
+ }
+
+ stream->buffer_index += es_size;
+ s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
+
+ while(stream->premux_packet && stream->premux_packet->unwritten_size <= es_size){
+ es_size -= stream->premux_packet->unwritten_size;
+ stream->premux_packet= stream->premux_packet->next;
+ }
+ if(es_size)
+ stream->premux_packet->unwritten_size -= es_size;
+
+ if(remove_decoded_packets(ctx, s->last_scr) < 0)
+ return -1;
+
+ return 1;
+}
+
+static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int stream_index= pkt->stream_index;
+ int size= pkt->size;
+ uint8_t *buf= pkt->data;
+ AVStream *st = ctx->streams[stream_index];
+ StreamInfo *stream = st->priv_data;
+ int64_t pts, dts;
+ PacketDesc *pkt_desc;
+ const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE);
+ const int is_iframe = st->codec->codec_type == CODEC_TYPE_VIDEO && (pkt->flags & PKT_FLAG_KEY);
+
+ pts= pkt->pts;
+ dts= pkt->dts;
+
+ if(pts != AV_NOPTS_VALUE) pts += preload;
+ if(dts != AV_NOPTS_VALUE) dts += preload;
+
+//av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f flags:%d stream:%d nopts:%d\n", dts/90000.0, pts/90000.0, pkt->flags, pkt->stream_index, pts != AV_NOPTS_VALUE);
+ if (!stream->premux_packet)
+ stream->next_packet = &stream->premux_packet;
+ *stream->next_packet=
+ pkt_desc= av_mallocz(sizeof(PacketDesc));
+ pkt_desc->pts= pts;
+ pkt_desc->dts= dts;
+ pkt_desc->unwritten_size=
+ pkt_desc->size= size;
+ if(!stream->predecode_packet)
+ stream->predecode_packet= pkt_desc;
+ stream->next_packet= &pkt_desc->next;
+
+ av_fifo_realloc(&stream->fifo, av_fifo_size(&stream->fifo) + size + 1);
+
+ if (s->is_dvd){
+ if (is_iframe && (s->packet_number == 0 || (pts - stream->vobu_start_pts >= 36000))) { // min VOBU length 0.4 seconds (mpucoder)
+ stream->bytes_to_iframe = av_fifo_size(&stream->fifo);
+ stream->align_iframe = 1;
+ stream->vobu_start_pts = pts;
+ } else {
+ stream->align_iframe = 0;
+ }
+ }
+
+ av_fifo_write(&stream->fifo, buf, size);
+
+ for(;;){
+ int ret= output_packet(ctx, 0);
+ if(ret<=0)
+ return ret;
+ }
+}
+
+static int mpeg_mux_end(AVFormatContext *ctx)
+{
+// MpegMuxContext *s = ctx->priv_data;
+ StreamInfo *stream;
+ int i;
+
+ for(;;){
+ int ret= output_packet(ctx, 1);
+ if(ret<0)
+ return ret;
+ else if(ret==0)
+ break;
+ }
+
+ /* End header according to MPEG1 systems standard. We do not write
+ it as it is usually not needed by decoders and because it
+ complicates MPEG stream concatenation. */
+ //put_be32(&ctx->pb, ISO_11172_END_CODE);
+ //put_flush_packet(&ctx->pb);
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ stream = ctx->streams[i]->priv_data;
+
+ assert(av_fifo_size(&stream->fifo) == 0);
+ av_fifo_free(&stream->fifo);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/*********************************************/
+/* demux code */
+
+#define MAX_SYNC_SIZE 100000
+
+static int cdxa_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
+ p->buf[2] == 'F' && p->buf[3] == 'F' &&
+ p->buf[8] == 'C' && p->buf[9] == 'D' &&
+ p->buf[10] == 'X' && p->buf[11] == 'A')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int mpegps_probe(AVProbeData *p)
+{
+ uint32_t code= -1;
+ int sys=0, pspack=0, priv1=0, vid=0, audio=0;
+ int i;
+ int score=0;
+
+ score = cdxa_probe(p);
+ if (score > 0) return score;
+
+ /* Search for MPEG stream */
+ for(i=0; i<p->buf_size; i++){
+ code = (code<<8) + p->buf[i];
+ if ((code & 0xffffff00) == 0x100) {
+ if(code == SYSTEM_HEADER_START_CODE) sys++;
+ else if(code == PRIVATE_STREAM_1) priv1++;
+ else if(code == PACK_START_CODE) pspack++;
+ else if((code & 0xf0) == VIDEO_ID) vid++;
+ else if((code & 0xe0) == AUDIO_ID) audio++;
+ }
+ }
+
+ if(vid || audio) /* invalid VDR files nd short PES streams */
+ score= AVPROBE_SCORE_MAX/4;
+
+//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d\n", sys, priv1, pspack,vid, audio);
+ if(sys && sys*9 <= pspack*10)
+ return AVPROBE_SCORE_MAX/2+2; // +1 for .mpg
+ if((priv1 || vid || audio) && (priv1+vid+audio)*9 <= pspack*10)
+ return AVPROBE_SCORE_MAX/2+2; // +1 for .mpg
+ if((!!vid ^ !!audio) && (audio+vid > 1) && !sys && !pspack) /* PES stream */
+ return AVPROBE_SCORE_MAX/2+2;
+
+ //02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1
+ return score;
+}
+
+
+typedef struct MpegDemuxContext {
+ int32_t header_state;
+ unsigned char psm_es_type[256];
+} MpegDemuxContext;
+
+static int mpegps_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MpegDemuxContext *m = s->priv_data;
+ m->header_state = 0xff;
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+
+ /* no need to do more */
+ return 0;
+}
+
+static int64_t get_pts(ByteIOContext *pb, int c)
+{
+ int64_t pts;
+ int val;
+
+ if (c < 0)
+ c = get_byte(pb);
+ pts = (int64_t)((c >> 1) & 0x07) << 30;
+ val = get_be16(pb);
+ pts |= (int64_t)(val >> 1) << 15;
+ val = get_be16(pb);
+ pts |= (int64_t)(val >> 1);
+ return pts;
+}
+
+static int find_next_start_code(ByteIOContext *pb, int *size_ptr,
+ int32_t *header_state)
+{
+ unsigned int state, v;
+ int val, n;
+
+ state = *header_state;
+ n = *size_ptr;
+ while (n > 0) {
+ if (url_feof(pb))
+ break;
+ v = get_byte(pb);
+ n--;
+ if (state == 0x000001) {
+ state = ((state << 8) | v) & 0xffffff;
+ val = state;
+ goto found;
+ }
+ state = ((state << 8) | v) & 0xffffff;
+ }
+ val = -1;
+ found:
+ *header_state = state;
+ *size_ptr = n;
+ return val;
+}
+
+#if 0 /* unused, remove? */
+/* XXX: optimize */
+static int find_prev_start_code(ByteIOContext *pb, int *size_ptr)
+{
+ int64_t pos, pos_start;
+ int max_size, start_code;
+
+ max_size = *size_ptr;
+ pos_start = url_ftell(pb);
+
+ /* in order to go faster, we fill the buffer */
+ pos = pos_start - 16386;
+ if (pos < 0)
+ pos = 0;
+ url_fseek(pb, pos, SEEK_SET);
+ get_byte(pb);
+
+ pos = pos_start;
+ for(;;) {
+ pos--;
+ if (pos < 0 || (pos_start - pos) >= max_size) {
+ start_code = -1;
+ goto the_end;
+ }
+ url_fseek(pb, pos, SEEK_SET);
+ start_code = get_be32(pb);
+ if ((start_code & 0xffffff00) == 0x100)
+ break;
+ }
+ the_end:
+ *size_ptr = pos_start - pos;
+ return start_code;
+}
+#endif
+
+/**
+ * Extracts stream types from a program stream map
+ * According to ISO/IEC 13818-1 ('MPEG-2 Systems') table 2-35
+ *
+ * @return number of bytes occupied by PSM in the bitstream
+ */
+static long mpegps_psm_parse(MpegDemuxContext *m, ByteIOContext *pb)
+{
+ int psm_length, ps_info_length, es_map_length;
+
+ psm_length = get_be16(pb);
+ get_byte(pb);
+ get_byte(pb);
+ ps_info_length = get_be16(pb);
+
+ /* skip program_stream_info */
+ url_fskip(pb, ps_info_length);
+ es_map_length = get_be16(pb);
+
+ /* at least one es available? */
+ while (es_map_length >= 4){
+ unsigned char type = get_byte(pb);
+ unsigned char es_id = get_byte(pb);
+ uint16_t es_info_length = get_be16(pb);
+ /* remember mapping from stream id to stream type */
+ m->psm_es_type[es_id] = type;
+ /* skip program_stream_info */
+ url_fskip(pb, es_info_length);
+ es_map_length -= 4 + es_info_length;
+ }
+ get_be32(pb); /* crc32 */
+ return 2 + psm_length;
+}
+
+/* read the next PES header. Return its position in ppos
+ (if not NULL), and its start code, pts and dts.
+ */
+static int mpegps_read_pes_header(AVFormatContext *s,
+ int64_t *ppos, int *pstart_code,
+ int64_t *ppts, int64_t *pdts)
+{
+ MpegDemuxContext *m = s->priv_data;
+ int len, size, startcode, c, flags, header_len;
+ int64_t pts, dts, last_pos;
+
+ last_pos = -1;
+ redo:
+ /* next start code (should be immediately after) */
+ m->header_state = 0xff;
+ size = MAX_SYNC_SIZE;
+ startcode = find_next_start_code(&s->pb, &size, &m->header_state);
+ //printf("startcode=%x pos=0x%"PRIx64"\n", startcode, url_ftell(&s->pb));
+ if (startcode < 0)
+ return AVERROR_IO;
+ if (startcode == PACK_START_CODE)
+ goto redo;
+ if (startcode == SYSTEM_HEADER_START_CODE)
+ goto redo;
+ if (startcode == PADDING_STREAM ||
+ startcode == PRIVATE_STREAM_2) {
+ /* skip them */
+ len = get_be16(&s->pb);
+ url_fskip(&s->pb, len);
+ goto redo;
+ }
+ if (startcode == PROGRAM_STREAM_MAP) {
+ mpegps_psm_parse(m, &s->pb);
+ goto redo;
+ }
+
+ /* find matching stream */
+ if (!((startcode >= 0x1c0 && startcode <= 0x1df) ||
+ (startcode >= 0x1e0 && startcode <= 0x1ef) ||
+ (startcode == 0x1bd)))
+ goto redo;
+ if (ppos) {
+ *ppos = url_ftell(&s->pb) - 4;
+ }
+ len = get_be16(&s->pb);
+ pts = AV_NOPTS_VALUE;
+ dts = AV_NOPTS_VALUE;
+ /* stuffing */
+ for(;;) {
+ if (len < 1)
+ goto redo;
+ c = get_byte(&s->pb);
+ len--;
+ /* XXX: for mpeg1, should test only bit 7 */
+ if (c != 0xff)
+ break;
+ }
+ if ((c & 0xc0) == 0x40) {
+ /* buffer scale & size */
+ if (len < 2)
+ goto redo;
+ get_byte(&s->pb);
+ c = get_byte(&s->pb);
+ len -= 2;
+ }
+ if ((c & 0xf0) == 0x20) {
+ if (len < 4)
+ goto redo;
+ dts = pts = get_pts(&s->pb, c);
+ len -= 4;
+ } else if ((c & 0xf0) == 0x30) {
+ if (len < 9)
+ goto redo;
+ pts = get_pts(&s->pb, c);
+ dts = get_pts(&s->pb, -1);
+ len -= 9;
+ } else if ((c & 0xc0) == 0x80) {
+ /* mpeg 2 PES */
+#if 0 /* some streams have this field set for no apparent reason */
+ if ((c & 0x30) != 0) {
+ /* Encrypted multiplex not handled */
+ goto redo;
+ }
+#endif
+ flags = get_byte(&s->pb);
+ header_len = get_byte(&s->pb);
+ len -= 2;
+ if (header_len > len)
+ goto redo;
+ if ((flags & 0xc0) == 0x80) {
+ dts = pts = get_pts(&s->pb, -1);
+ if (header_len < 5)
+ goto redo;
+ header_len -= 5;
+ len -= 5;
+ } if ((flags & 0xc0) == 0xc0) {
+ pts = get_pts(&s->pb, -1);
+ dts = get_pts(&s->pb, -1);
+ if (header_len < 10)
+ goto redo;
+ header_len -= 10;
+ len -= 10;
+ }
+ len -= header_len;
+ while (header_len > 0) {
+ get_byte(&s->pb);
+ header_len--;
+ }
+ }
+ else if( c!= 0xf )
+ goto redo;
+
+ if (startcode == PRIVATE_STREAM_1 && !m->psm_es_type[startcode & 0xff]) {
+ if (len < 1)
+ goto redo;
+ startcode = get_byte(&s->pb);
+ len--;
+ if (startcode >= 0x80 && startcode <= 0xbf) {
+ /* audio: skip header */
+ if (len < 3)
+ goto redo;
+ get_byte(&s->pb);
+ get_byte(&s->pb);
+ get_byte(&s->pb);
+ len -= 3;
+ }
+ }
+ if(dts != AV_NOPTS_VALUE && ppos){
+ int i;
+ for(i=0; i<s->nb_streams; i++){
+ if(startcode == s->streams[i]->id) {
+ av_add_index_entry(s->streams[i], *ppos, dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
+ }
+ }
+ }
+
+ *pstart_code = startcode;
+ *ppts = pts;
+ *pdts = dts;
+ return len;
+}
+
+static int mpegps_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MpegDemuxContext *m = s->priv_data;
+ AVStream *st;
+ int len, startcode, i, type, codec_id = 0, es_type;
+ int64_t pts, dts, dummy_pos; //dummy_pos is needed for the index building to work
+
+ redo:
+ len = mpegps_read_pes_header(s, &dummy_pos, &startcode, &pts, &dts);
+ if (len < 0)
+ return len;
+
+ /* now find stream */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (st->id == startcode)
+ goto found;
+ }
+
+ es_type = m->psm_es_type[startcode & 0xff];
+ if(es_type > 0){
+ if(es_type == STREAM_TYPE_VIDEO_MPEG1){
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_VIDEO_MPEG2){
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_AUDIO_MPEG1 ||
+ es_type == STREAM_TYPE_AUDIO_MPEG2){
+ codec_id = CODEC_ID_MP3;
+ type = CODEC_TYPE_AUDIO;
+ } else if(es_type == STREAM_TYPE_AUDIO_AAC){
+ codec_id = CODEC_ID_AAC;
+ type = CODEC_TYPE_AUDIO;
+ } else if(es_type == STREAM_TYPE_VIDEO_MPEG4){
+ codec_id = CODEC_ID_MPEG4;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_VIDEO_H264){
+ codec_id = CODEC_ID_H264;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_AUDIO_AC3){
+ codec_id = CODEC_ID_AC3;
+ type = CODEC_TYPE_AUDIO;
+ } else {
+ goto skip;
+ }
+ } else if (startcode >= 0x1e0 && startcode <= 0x1ef) {
+ static const unsigned char avs_seqh[4] = { 0, 0, 1, 0xb0 };
+ unsigned char buf[8];
+ get_buffer(&s->pb, buf, 8);
+ url_fseek(&s->pb, -8, SEEK_CUR);
+ if(!memcmp(buf, avs_seqh, 4) && (buf[6] != 0 || buf[7] != 1))
+ codec_id = CODEC_ID_CAVS;
+ else
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ type = CODEC_TYPE_VIDEO;
+ } else if (startcode >= 0x1c0 && startcode <= 0x1df) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MP2;
+ } else if (startcode >= 0x80 && startcode <= 0x87) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
+ } else if (startcode >= 0x88 && startcode <= 0x9f) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_DTS;
+ } else if (startcode >= 0xa0 && startcode <= 0xbf) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_PCM_S16BE;
+ } else if (startcode >= 0x20 && startcode <= 0x3f) {
+ type = CODEC_TYPE_SUBTITLE;
+ codec_id = CODEC_ID_DVD_SUBTITLE;
+ } else {
+ skip:
+ /* skip packet */
+ url_fskip(&s->pb, len);
+ goto redo;
+ }
+ /* no stream found: add a new stream */
+ st = av_new_stream(s, startcode);
+ if (!st)
+ goto skip;
+ st->codec->codec_type = type;
+ st->codec->codec_id = codec_id;
+ if (codec_id != CODEC_ID_PCM_S16BE)
+ st->need_parsing = 1;
+ found:
+ if(st->discard >= AVDISCARD_ALL)
+ goto skip;
+ if (startcode >= 0xa0 && startcode <= 0xbf) {
+ int b1, freq;
+
+ /* for LPCM, we just skip the header and consider it is raw
+ audio data */
+ if (len <= 3)
+ goto skip;
+ get_byte(&s->pb); /* emphasis (1), muse(1), reserved(1), frame number(5) */
+ b1 = get_byte(&s->pb); /* quant (2), freq(2), reserved(1), channels(3) */
+ get_byte(&s->pb); /* dynamic range control (0x80 = off) */
+ len -= 3;
+ freq = (b1 >> 4) & 3;
+ st->codec->sample_rate = lpcm_freq_tab[freq];
+ st->codec->channels = 1 + (b1 & 7);
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * 2;
+ }
+ av_new_packet(pkt, len);
+ get_buffer(&s->pb, pkt->data, pkt->size);
+ pkt->pts = pts;
+ pkt->dts = dts;
+ pkt->stream_index = st->index;
+#if 0
+ av_log(s, AV_LOG_DEBUG, "%d: pts=%0.3f dts=%0.3f size=%d\n",
+ pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0, pkt->size);
+#endif
+
+ return 0;
+}
+
+static int mpegps_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index,
+ int64_t *ppos, int64_t pos_limit)
+{
+ int len, startcode;
+ int64_t pos, pts, dts;
+
+ pos = *ppos;
+#ifdef DEBUG_SEEK
+ printf("read_dts: pos=0x%"PRIx64" next=%d -> ", pos, find_next);
+#endif
+ url_fseek(&s->pb, pos, SEEK_SET);
+ for(;;) {
+ len = mpegps_read_pes_header(s, &pos, &startcode, &pts, &dts);
+ if (len < 0) {
+#ifdef DEBUG_SEEK
+ printf("none (ret=%d)\n", len);
+#endif
+ return AV_NOPTS_VALUE;
+ }
+ if (startcode == s->streams[stream_index]->id &&
+ dts != AV_NOPTS_VALUE) {
+ break;
+ }
+ url_fskip(&s->pb, len);
+ }
+#ifdef DEBUG_SEEK
+ printf("pos=0x%"PRIx64" dts=0x%"PRIx64" %0.3f\n", pos, dts, dts / 90000.0);
+#endif
+ *ppos = pos;
+ return dts;
+}
+
+#ifdef CONFIG_MPEG1SYSTEM_MUXER
+AVOutputFormat mpeg1system_muxer = {
+ "mpeg",
+ "MPEG1 System format",
+ "video/mpeg",
+ "mpg,mpeg",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG1VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+#ifdef CONFIG_MPEG1VCD_MUXER
+AVOutputFormat mpeg1vcd_muxer = {
+ "vcd",
+ "MPEG1 System format (VCD)",
+ "video/mpeg",
+ NULL,
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG1VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+#ifdef CONFIG_MPEG2VOB_MUXER
+AVOutputFormat mpeg2vob_muxer = {
+ "vob",
+ "MPEG2 PS format (VOB)",
+ "video/mpeg",
+ "vob",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+
+/* Same as mpeg2vob_mux except that the pack size is 2324 */
+#ifdef CONFIG_MPEG2SVCD_MUXER
+AVOutputFormat mpeg2svcd_muxer = {
+ "svcd",
+ "MPEG2 PS format (VOB)",
+ "video/mpeg",
+ "vob",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+
+/* Same as mpeg2vob_mux except the 'is_dvd' flag is set to produce NAV pkts */
+#ifdef CONFIG_MPEG2DVD_MUXER
+AVOutputFormat mpeg2dvd_muxer = {
+ "dvd",
+ "MPEG2 PS format (DVD VOB)",
+ "video/mpeg",
+ "dvd",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+
+#ifdef CONFIG_MPEGPS_DEMUXER
+AVInputFormat mpegps_demuxer = {
+ "mpeg",
+ "MPEG PS format",
+ sizeof(MpegDemuxContext),
+ mpegps_probe,
+ mpegps_read_header,
+ mpegps_read_packet,
+ mpegps_read_close,
+ NULL, //mpegps_read_seek,
+ mpegps_read_dts,
+ .flags = AVFMT_SHOW_IDS,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mpegts.c b/contrib/ffmpeg/libavformat/mpegts.c
new file mode 100644
index 000000000..dd5f0adca
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpegts.c
@@ -0,0 +1,1527 @@
+/*
+ * MPEG2 transport stream (aka DVB) demuxer
+ * Copyright (c) 2002-2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "crc.h"
+#include "mpegts.h"
+
+//#define DEBUG_SI
+//#define DEBUG_SEEK
+
+/* 1.0 second at 24Mbit/s */
+#define MAX_SCAN_PACKETS 32000
+
+/* maximum size in which we look for synchronisation if
+ synchronisation is lost */
+#define MAX_RESYNC_SIZE 4096
+
+typedef struct PESContext PESContext;
+
+static PESContext* add_pes_stream(MpegTSContext *ts, int pid, int stream_type);
+static AVStream* new_pes_av_stream(PESContext *pes, uint32_t code);
+
+enum MpegTSFilterType {
+ MPEGTS_PES,
+ MPEGTS_SECTION,
+};
+
+typedef void PESCallback(void *opaque, const uint8_t *buf, int len, int is_start);
+
+typedef struct MpegTSPESFilter {
+ PESCallback *pes_cb;
+ void *opaque;
+} MpegTSPESFilter;
+
+typedef void SectionCallback(void *opaque, const uint8_t *buf, int len);
+
+typedef void SetServiceCallback(void *opaque, int ret);
+
+typedef struct MpegTSSectionFilter {
+ int section_index;
+ int section_h_size;
+ uint8_t *section_buf;
+ int check_crc:1;
+ int end_of_section_reached:1;
+ SectionCallback *section_cb;
+ void *opaque;
+} MpegTSSectionFilter;
+
+typedef struct MpegTSFilter {
+ int pid;
+ int last_cc; /* last cc code (-1 if first packet) */
+ enum MpegTSFilterType type;
+ union {
+ MpegTSPESFilter pes_filter;
+ MpegTSSectionFilter section_filter;
+ } u;
+} MpegTSFilter;
+
+typedef struct MpegTSService {
+ int running:1;
+ int sid;
+ char *provider_name;
+ char *name;
+} MpegTSService;
+
+struct MpegTSContext {
+ /* user data */
+ AVFormatContext *stream;
+ int raw_packet_size; /* raw packet size, including FEC if present */
+ int auto_guess; /* if true, all pids are analized to find streams */
+ int set_service_ret;
+
+ int mpeg2ts_raw; /* force raw MPEG2 transport stream output, if possible */
+ int mpeg2ts_compute_pcr; /* compute exact PCR for each transport stream packet */
+
+ /* used to estimate the exact PCR */
+ int64_t cur_pcr;
+ int pcr_incr;
+ int pcr_pid;
+
+ /* data needed to handle file based ts */
+ int stop_parse; /* stop parsing loop */
+ AVPacket *pkt; /* packet containing av data */
+
+ /******************************************/
+ /* private mpegts data */
+ /* scan context */
+ MpegTSFilter *sdt_filter;
+ int nb_services;
+ MpegTSService **services;
+
+ /* set service context (XXX: allocated it ?) */
+ SetServiceCallback *set_service_cb;
+ void *set_service_opaque;
+ MpegTSFilter *pat_filter;
+ MpegTSFilter *pmt_filter;
+ int req_sid;
+
+ MpegTSFilter *pids[NB_PID_MAX];
+};
+
+static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
+ const uint8_t *buf, int buf_size, int is_start)
+{
+ MpegTSSectionFilter *tss = &tss1->u.section_filter;
+ int len;
+
+ if (is_start) {
+ memcpy(tss->section_buf, buf, buf_size);
+ tss->section_index = buf_size;
+ tss->section_h_size = -1;
+ tss->end_of_section_reached = 0;
+ } else {
+ if (tss->end_of_section_reached)
+ return;
+ len = 4096 - tss->section_index;
+ if (buf_size < len)
+ len = buf_size;
+ memcpy(tss->section_buf + tss->section_index, buf, len);
+ tss->section_index += len;
+ }
+
+ /* compute section length if possible */
+ if (tss->section_h_size == -1 && tss->section_index >= 3) {
+ len = (((tss->section_buf[1] & 0xf) << 8) | tss->section_buf[2]) + 3;
+ if (len > 4096)
+ return;
+ tss->section_h_size = len;
+ }
+
+ if (tss->section_h_size != -1 && tss->section_index >= tss->section_h_size) {
+ tss->end_of_section_reached = 1;
+ if (!tss->check_crc ||
+ av_crc(av_crc04C11DB7, -1, tss->section_buf, tss->section_h_size) == 0)
+ tss->section_cb(tss->opaque, tss->section_buf, tss->section_h_size);
+ }
+}
+
+static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
+ SectionCallback *section_cb, void *opaque,
+ int check_crc)
+
+{
+ MpegTSFilter *filter;
+ MpegTSSectionFilter *sec;
+
+#ifdef DEBUG_SI
+ printf("Filter: pid=0x%x\n", pid);
+#endif
+ if (pid >= NB_PID_MAX || ts->pids[pid])
+ return NULL;
+ filter = av_mallocz(sizeof(MpegTSFilter));
+ if (!filter)
+ return NULL;
+ ts->pids[pid] = filter;
+ filter->type = MPEGTS_SECTION;
+ filter->pid = pid;
+ filter->last_cc = -1;
+ sec = &filter->u.section_filter;
+ sec->section_cb = section_cb;
+ sec->opaque = opaque;
+ sec->section_buf = av_malloc(MAX_SECTION_SIZE);
+ sec->check_crc = check_crc;
+ if (!sec->section_buf) {
+ av_free(filter);
+ return NULL;
+ }
+ return filter;
+}
+
+static MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
+ PESCallback *pes_cb,
+ void *opaque)
+{
+ MpegTSFilter *filter;
+ MpegTSPESFilter *pes;
+
+ if (pid >= NB_PID_MAX || ts->pids[pid])
+ return NULL;
+ filter = av_mallocz(sizeof(MpegTSFilter));
+ if (!filter)
+ return NULL;
+ ts->pids[pid] = filter;
+ filter->type = MPEGTS_PES;
+ filter->pid = pid;
+ filter->last_cc = -1;
+ pes = &filter->u.pes_filter;
+ pes->pes_cb = pes_cb;
+ pes->opaque = opaque;
+ return filter;
+}
+
+static void mpegts_close_filter(MpegTSContext *ts, MpegTSFilter *filter)
+{
+ int pid;
+
+ pid = filter->pid;
+ if (filter->type == MPEGTS_SECTION)
+ av_freep(&filter->u.section_filter.section_buf);
+ else if (filter->type == MPEGTS_PES)
+ av_freep(&filter->u.pes_filter.opaque);
+
+ av_free(filter);
+ ts->pids[pid] = NULL;
+}
+
+static int analyze(const uint8_t *buf, int size, int packet_size, int *index){
+ int stat[packet_size];
+ int i;
+ int x=0;
+ int best_score=0;
+
+ memset(stat, 0, packet_size*sizeof(int));
+
+ for(x=i=0; i<size; i++){
+ if(buf[i] == 0x47){
+ stat[x]++;
+ if(stat[x] > best_score){
+ best_score= stat[x];
+ if(index) *index= x;
+ }
+ }
+
+ x++;
+ if(x == packet_size) x= 0;
+ }
+
+ return best_score;
+}
+
+/* autodetect fec presence. Must have at least 1024 bytes */
+static int get_packet_size(const uint8_t *buf, int size)
+{
+ int score, fec_score, dvhs_score;
+
+ if (size < (TS_FEC_PACKET_SIZE * 5 + 1))
+ return -1;
+
+ score = analyze(buf, size, TS_PACKET_SIZE, NULL);
+ dvhs_score = analyze(buf, size, TS_DVHS_PACKET_SIZE, NULL);
+ fec_score= analyze(buf, size, TS_FEC_PACKET_SIZE, NULL);
+// av_log(NULL, AV_LOG_DEBUG, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score);
+
+ if (score > fec_score && score > dvhs_score) return TS_PACKET_SIZE;
+ else if(dvhs_score > score && dvhs_score > fec_score) return TS_DVHS_PACKET_SIZE;
+ else if(score < fec_score && dvhs_score < fec_score) return TS_FEC_PACKET_SIZE;
+ else return -1;
+}
+
+typedef struct SectionHeader {
+ uint8_t tid;
+ uint16_t id;
+ uint8_t version;
+ uint8_t sec_num;
+ uint8_t last_sec_num;
+} SectionHeader;
+
+static inline int get8(const uint8_t **pp, const uint8_t *p_end)
+{
+ const uint8_t *p;
+ int c;
+
+ p = *pp;
+ if (p >= p_end)
+ return -1;
+ c = *p++;
+ *pp = p;
+ return c;
+}
+
+static inline int get16(const uint8_t **pp, const uint8_t *p_end)
+{
+ const uint8_t *p;
+ int c;
+
+ p = *pp;
+ if ((p + 1) >= p_end)
+ return -1;
+ c = (p[0] << 8) | p[1];
+ p += 2;
+ *pp = p;
+ return c;
+}
+
+/* read and allocate a DVB string preceeded by its length */
+static char *getstr8(const uint8_t **pp, const uint8_t *p_end)
+{
+ int len;
+ const uint8_t *p;
+ char *str;
+
+ p = *pp;
+ len = get8(&p, p_end);
+ if (len < 0)
+ return NULL;
+ if ((p + len) > p_end)
+ return NULL;
+ str = av_malloc(len + 1);
+ if (!str)
+ return NULL;
+ memcpy(str, p, len);
+ str[len] = '\0';
+ p += len;
+ *pp = p;
+ return str;
+}
+
+static int parse_section_header(SectionHeader *h,
+ const uint8_t **pp, const uint8_t *p_end)
+{
+ int val;
+
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->tid = val;
+ *pp += 2;
+ val = get16(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->id = val;
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->version = (val >> 1) & 0x1f;
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->sec_num = val;
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->last_sec_num = val;
+ return 0;
+}
+
+static MpegTSService *new_service(MpegTSContext *ts, int sid,
+ char *provider_name, char *name)
+{
+ MpegTSService *service;
+
+#ifdef DEBUG_SI
+ printf("new_service: sid=0x%04x provider='%s' name='%s'\n",
+ sid, provider_name, name);
+#endif
+
+ service = av_mallocz(sizeof(MpegTSService));
+ if (!service)
+ return NULL;
+ service->sid = sid;
+ service->provider_name = provider_name;
+ service->name = name;
+ dynarray_add(&ts->services, &ts->nb_services, service);
+ return service;
+}
+
+static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ PESContext *pes;
+ AVStream *st;
+ const uint8_t *p, *p_end, *desc_list_end, *desc_end;
+ int program_info_length, pcr_pid, pid, stream_type;
+ int desc_list_len, desc_len, desc_tag;
+ int comp_page = 0, anc_page = 0; /* initialize to kill warnings */
+ char language[4];
+
+#ifdef DEBUG_SI
+ printf("PMT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+#ifdef DEBUG_SI
+ printf("sid=0x%x sec_num=%d/%d\n", h->id, h->sec_num, h->last_sec_num);
+#endif
+ if (h->tid != PMT_TID || (ts->req_sid >= 0 && h->id != ts->req_sid) )
+ return;
+
+ pcr_pid = get16(&p, p_end) & 0x1fff;
+ if (pcr_pid < 0)
+ return;
+ ts->pcr_pid = pcr_pid;
+#ifdef DEBUG_SI
+ printf("pcr_pid=0x%x\n", pcr_pid);
+#endif
+ program_info_length = get16(&p, p_end) & 0xfff;
+ if (program_info_length < 0)
+ return;
+ p += program_info_length;
+ if (p >= p_end)
+ return;
+ for(;;) {
+ language[0] = 0;
+ st = 0;
+ stream_type = get8(&p, p_end);
+ if (stream_type < 0)
+ break;
+ pid = get16(&p, p_end) & 0x1fff;
+ if (pid < 0)
+ break;
+ desc_list_len = get16(&p, p_end) & 0xfff;
+ if (desc_list_len < 0)
+ break;
+ desc_list_end = p + desc_list_len;
+ if (desc_list_end > p_end)
+ break;
+ for(;;) {
+ desc_tag = get8(&p, desc_list_end);
+ if (desc_tag < 0)
+ break;
+ if (stream_type == STREAM_TYPE_PRIVATE_DATA) {
+ if((desc_tag == 0x6A) || (desc_tag == 0x7A)) {
+ /*assume DVB AC-3 Audio*/
+ stream_type = STREAM_TYPE_AUDIO_AC3;
+ } else if(desc_tag == 0x7B) {
+ /* DVB DTS audio */
+ stream_type = STREAM_TYPE_AUDIO_DTS;
+ }
+ }
+ desc_len = get8(&p, desc_list_end);
+ desc_end = p + desc_len;
+ if (desc_end > desc_list_end)
+ break;
+#ifdef DEBUG_SI
+ printf("tag: 0x%02x len=%d\n", desc_tag, desc_len);
+#endif
+ switch(desc_tag) {
+ case DVB_SUBT_DESCID:
+ if (stream_type == STREAM_TYPE_PRIVATE_DATA)
+ stream_type = STREAM_TYPE_SUBTITLE_DVB;
+
+ language[0] = get8(&p, desc_end);
+ language[1] = get8(&p, desc_end);
+ language[2] = get8(&p, desc_end);
+ language[3] = 0;
+ get8(&p, desc_end);
+ comp_page = get16(&p, desc_end);
+ anc_page = get16(&p, desc_end);
+
+ break;
+ case 0x0a: /* ISO 639 language descriptor */
+ language[0] = get8(&p, desc_end);
+ language[1] = get8(&p, desc_end);
+ language[2] = get8(&p, desc_end);
+ language[3] = 0;
+ break;
+ default:
+ break;
+ }
+ p = desc_end;
+ }
+ p = desc_list_end;
+
+#ifdef DEBUG_SI
+ printf("stream_type=%d pid=0x%x\n", stream_type, pid);
+#endif
+
+ /* now create ffmpeg stream */
+ switch(stream_type) {
+ case STREAM_TYPE_AUDIO_MPEG1:
+ case STREAM_TYPE_AUDIO_MPEG2:
+ case STREAM_TYPE_VIDEO_MPEG1:
+ case STREAM_TYPE_VIDEO_MPEG2:
+ case STREAM_TYPE_VIDEO_MPEG4:
+ case STREAM_TYPE_VIDEO_H264:
+ case STREAM_TYPE_AUDIO_AAC:
+ case STREAM_TYPE_AUDIO_AC3:
+ case STREAM_TYPE_AUDIO_DTS:
+ case STREAM_TYPE_SUBTITLE_DVB:
+ pes = add_pes_stream(ts, pid, stream_type);
+ if (pes)
+ st = new_pes_av_stream(pes, 0);
+ break;
+ default:
+ /* we ignore the other streams */
+ break;
+ }
+
+ if (st) {
+ if (language[0] != 0) {
+ st->language[0] = language[0];
+ st->language[1] = language[1];
+ st->language[2] = language[2];
+ st->language[3] = language[3];
+ }
+
+ if (stream_type == STREAM_TYPE_SUBTITLE_DVB) {
+ st->codec->sub_id = (anc_page << 16) | comp_page;
+ }
+ }
+ }
+ /* all parameters are there */
+ ts->set_service_cb(ts->set_service_opaque, 0);
+ mpegts_close_filter(ts, ts->pmt_filter);
+ ts->pmt_filter = NULL;
+}
+
+static void pat_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ const uint8_t *p, *p_end;
+ int sid, pmt_pid;
+
+#ifdef DEBUG_SI
+ printf("PAT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+ if (h->tid != PAT_TID)
+ return;
+
+ for(;;) {
+ sid = get16(&p, p_end);
+ if (sid < 0)
+ break;
+ pmt_pid = get16(&p, p_end) & 0x1fff;
+ if (pmt_pid < 0)
+ break;
+#ifdef DEBUG_SI
+ printf("sid=0x%x pid=0x%x\n", sid, pmt_pid);
+#endif
+ if (sid == 0x0000) {
+ /* NIT info */
+ } else {
+ if (ts->req_sid == sid) {
+ ts->pmt_filter = mpegts_open_section_filter(ts, pmt_pid,
+ pmt_cb, ts, 1);
+ goto found;
+ }
+ }
+ }
+ /* not found */
+ ts->set_service_cb(ts->set_service_opaque, -1);
+
+ found:
+ mpegts_close_filter(ts, ts->pat_filter);
+ ts->pat_filter = NULL;
+}
+
+/* add all services found in the PAT */
+static void pat_scan_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ const uint8_t *p, *p_end;
+ int sid, pmt_pid;
+ char *provider_name, *name;
+ char buf[256];
+
+#ifdef DEBUG_SI
+ printf("PAT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+ if (h->tid != PAT_TID)
+ return;
+
+ for(;;) {
+ sid = get16(&p, p_end);
+ if (sid < 0)
+ break;
+ pmt_pid = get16(&p, p_end) & 0x1fff;
+ if (pmt_pid < 0)
+ break;
+#ifdef DEBUG_SI
+ printf("sid=0x%x pid=0x%x\n", sid, pmt_pid);
+#endif
+ if (sid == 0x0000) {
+ /* NIT info */
+ } else {
+ /* add the service with a dummy name */
+ snprintf(buf, sizeof(buf), "Service %x\n", sid);
+ name = av_strdup(buf);
+ provider_name = av_strdup("");
+ if (name && provider_name) {
+ new_service(ts, sid, provider_name, name);
+ } else {
+ av_freep(&name);
+ av_freep(&provider_name);
+ }
+ }
+ }
+ ts->stop_parse = 1;
+
+ /* remove filter */
+ mpegts_close_filter(ts, ts->pat_filter);
+ ts->pat_filter = NULL;
+}
+
+static void mpegts_set_service(MpegTSContext *ts, int sid,
+ SetServiceCallback *set_service_cb, void *opaque)
+{
+ ts->set_service_cb = set_service_cb;
+ ts->set_service_opaque = opaque;
+ ts->req_sid = sid;
+ ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
+ pat_cb, ts, 1);
+}
+
+static void sdt_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ const uint8_t *p, *p_end, *desc_list_end, *desc_end;
+ int onid, val, sid, desc_list_len, desc_tag, desc_len, service_type;
+ char *name, *provider_name;
+
+#ifdef DEBUG_SI
+ printf("SDT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+ if (h->tid != SDT_TID)
+ return;
+ onid = get16(&p, p_end);
+ if (onid < 0)
+ return;
+ val = get8(&p, p_end);
+ if (val < 0)
+ return;
+ for(;;) {
+ sid = get16(&p, p_end);
+ if (sid < 0)
+ break;
+ val = get8(&p, p_end);
+ if (val < 0)
+ break;
+ desc_list_len = get16(&p, p_end) & 0xfff;
+ if (desc_list_len < 0)
+ break;
+ desc_list_end = p + desc_list_len;
+ if (desc_list_end > p_end)
+ break;
+ for(;;) {
+ desc_tag = get8(&p, desc_list_end);
+ if (desc_tag < 0)
+ break;
+ desc_len = get8(&p, desc_list_end);
+ desc_end = p + desc_len;
+ if (desc_end > desc_list_end)
+ break;
+#ifdef DEBUG_SI
+ printf("tag: 0x%02x len=%d\n", desc_tag, desc_len);
+#endif
+ switch(desc_tag) {
+ case 0x48:
+ service_type = get8(&p, p_end);
+ if (service_type < 0)
+ break;
+ provider_name = getstr8(&p, p_end);
+ if (!provider_name)
+ break;
+ name = getstr8(&p, p_end);
+ if (!name)
+ break;
+ new_service(ts, sid, provider_name, name);
+ break;
+ default:
+ break;
+ }
+ p = desc_end;
+ }
+ p = desc_list_end;
+ }
+ ts->stop_parse = 1;
+
+ /* remove filter */
+ mpegts_close_filter(ts, ts->sdt_filter);
+ ts->sdt_filter = NULL;
+}
+
+/* scan services in a transport stream by looking at the SDT */
+static void mpegts_scan_sdt(MpegTSContext *ts)
+{
+ ts->sdt_filter = mpegts_open_section_filter(ts, SDT_PID,
+ sdt_cb, ts, 1);
+}
+
+/* scan services in a transport stream by looking at the PAT (better
+ than nothing !) */
+static void mpegts_scan_pat(MpegTSContext *ts)
+{
+ ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
+ pat_scan_cb, ts, 1);
+}
+
+/* TS stream handling */
+
+enum MpegTSState {
+ MPEGTS_HEADER = 0,
+ MPEGTS_PESHEADER_FILL,
+ MPEGTS_PAYLOAD,
+ MPEGTS_SKIP,
+};
+
+/* enough for PES header + length */
+#define PES_START_SIZE 9
+#define MAX_PES_HEADER_SIZE (9 + 255)
+
+struct PESContext {
+ int pid;
+ int stream_type;
+ MpegTSContext *ts;
+ AVFormatContext *stream;
+ AVStream *st;
+ enum MpegTSState state;
+ /* used to get the format */
+ int data_index;
+ int total_size;
+ int pes_header_size;
+ int64_t pts, dts;
+ uint8_t header[MAX_PES_HEADER_SIZE];
+};
+
+static int64_t get_pts(const uint8_t *p)
+{
+ int64_t pts;
+ int val;
+
+ pts = (int64_t)((p[0] >> 1) & 0x07) << 30;
+ val = (p[1] << 8) | p[2];
+ pts |= (int64_t)(val >> 1) << 15;
+ val = (p[3] << 8) | p[4];
+ pts |= (int64_t)(val >> 1);
+ return pts;
+}
+
+/* return non zero if a packet could be constructed */
+static void mpegts_push_data(void *opaque,
+ const uint8_t *buf, int buf_size, int is_start)
+{
+ PESContext *pes = opaque;
+ MpegTSContext *ts = pes->ts;
+ const uint8_t *p;
+ int len, code;
+
+ if (is_start) {
+ pes->state = MPEGTS_HEADER;
+ pes->data_index = 0;
+ }
+ p = buf;
+ while (buf_size > 0) {
+ switch(pes->state) {
+ case MPEGTS_HEADER:
+ len = PES_START_SIZE - pes->data_index;
+ if (len > buf_size)
+ len = buf_size;
+ memcpy(pes->header + pes->data_index, p, len);
+ pes->data_index += len;
+ p += len;
+ buf_size -= len;
+ if (pes->data_index == PES_START_SIZE) {
+ /* we got all the PES or section header. We can now
+ decide */
+#if 0
+ av_hex_dump(pes->header, pes->data_index);
+#endif
+ if (pes->header[0] == 0x00 && pes->header[1] == 0x00 &&
+ pes->header[2] == 0x01) {
+ /* it must be an mpeg2 PES stream */
+ code = pes->header[3] | 0x100;
+ if (!((code >= 0x1c0 && code <= 0x1df) ||
+ (code >= 0x1e0 && code <= 0x1ef) ||
+ (code == 0x1bd)))
+ goto skip;
+ if (!pes->st) {
+ /* allocate stream */
+ new_pes_av_stream(pes, code);
+ }
+ pes->state = MPEGTS_PESHEADER_FILL;
+ pes->total_size = (pes->header[4] << 8) | pes->header[5];
+ /* NOTE: a zero total size means the PES size is
+ unbounded */
+ if (pes->total_size)
+ pes->total_size += 6;
+ pes->pes_header_size = pes->header[8] + 9;
+ } else {
+ /* otherwise, it should be a table */
+ /* skip packet */
+ skip:
+ pes->state = MPEGTS_SKIP;
+ continue;
+ }
+ }
+ break;
+ /**********************************************/
+ /* PES packing parsing */
+ case MPEGTS_PESHEADER_FILL:
+ len = pes->pes_header_size - pes->data_index;
+ if (len > buf_size)
+ len = buf_size;
+ memcpy(pes->header + pes->data_index, p, len);
+ pes->data_index += len;
+ p += len;
+ buf_size -= len;
+ if (pes->data_index == pes->pes_header_size) {
+ const uint8_t *r;
+ unsigned int flags;
+
+ flags = pes->header[7];
+ r = pes->header + 9;
+ pes->pts = AV_NOPTS_VALUE;
+ pes->dts = AV_NOPTS_VALUE;
+ if ((flags & 0xc0) == 0x80) {
+ pes->pts = get_pts(r);
+ r += 5;
+ } else if ((flags & 0xc0) == 0xc0) {
+ pes->pts = get_pts(r);
+ r += 5;
+ pes->dts = get_pts(r);
+ r += 5;
+ }
+ /* we got the full header. We parse it and get the payload */
+ pes->state = MPEGTS_PAYLOAD;
+ }
+ break;
+ case MPEGTS_PAYLOAD:
+ if (pes->total_size) {
+ len = pes->total_size - pes->data_index;
+ if (len > buf_size)
+ len = buf_size;
+ } else {
+ len = buf_size;
+ }
+ if (len > 0) {
+ AVPacket *pkt = ts->pkt;
+ if (pes->st && av_new_packet(pkt, len) == 0) {
+ memcpy(pkt->data, p, len);
+ pkt->stream_index = pes->st->index;
+ pkt->pts = pes->pts;
+ pkt->dts = pes->dts;
+ /* reset pts values */
+ pes->pts = AV_NOPTS_VALUE;
+ pes->dts = AV_NOPTS_VALUE;
+ ts->stop_parse = 1;
+ return;
+ }
+ }
+ buf_size = 0;
+ break;
+ case MPEGTS_SKIP:
+ buf_size = 0;
+ break;
+ }
+ }
+}
+
+static AVStream* new_pes_av_stream(PESContext *pes, uint32_t code)
+{
+ AVStream *st;
+ int codec_type, codec_id;
+
+ switch(pes->stream_type){
+ case STREAM_TYPE_AUDIO_MPEG1:
+ case STREAM_TYPE_AUDIO_MPEG2:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MP3;
+ break;
+ case STREAM_TYPE_VIDEO_MPEG1:
+ case STREAM_TYPE_VIDEO_MPEG2:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ break;
+ case STREAM_TYPE_VIDEO_MPEG4:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_MPEG4;
+ break;
+ case STREAM_TYPE_VIDEO_H264:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_H264;
+ break;
+ case STREAM_TYPE_AUDIO_AAC:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AAC;
+ break;
+ case STREAM_TYPE_AUDIO_AC3:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
+ break;
+ case STREAM_TYPE_AUDIO_DTS:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_DTS;
+ break;
+ case STREAM_TYPE_SUBTITLE_DVB:
+ codec_type = CODEC_TYPE_SUBTITLE;
+ codec_id = CODEC_ID_DVB_SUBTITLE;
+ break;
+ default:
+ if (code >= 0x1c0 && code <= 0x1df) {
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MP2;
+ } else if (code == 0x1bd) {
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
+ } else {
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_MPEG1VIDEO;
+ }
+ break;
+ }
+ st = av_new_stream(pes->stream, pes->pid);
+ if (st) {
+ av_set_pts_info(st, 33, 1, 90000);
+ st->priv_data = pes;
+ st->codec->codec_type = codec_type;
+ st->codec->codec_id = codec_id;
+ st->need_parsing = 1;
+ pes->st = st;
+ }
+ return st;
+}
+
+
+static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int stream_type)
+{
+ MpegTSFilter *tss;
+ PESContext *pes;
+
+ /* if no pid found, then add a pid context */
+ pes = av_mallocz(sizeof(PESContext));
+ if (!pes)
+ return 0;
+ pes->ts = ts;
+ pes->stream = ts->stream;
+ pes->pid = pid;
+ pes->stream_type = stream_type;
+ tss = mpegts_open_pes_filter(ts, pid, mpegts_push_data, pes);
+ if (!tss) {
+ av_free(pes);
+ return 0;
+ }
+ return pes;
+}
+
+/* handle one TS packet */
+static void handle_packet(MpegTSContext *ts, const uint8_t *packet)
+{
+ AVFormatContext *s = ts->stream;
+ MpegTSFilter *tss;
+ int len, pid, cc, cc_ok, afc, is_start;
+ const uint8_t *p, *p_end;
+
+ pid = ((packet[1] & 0x1f) << 8) | packet[2];
+ is_start = packet[1] & 0x40;
+ tss = ts->pids[pid];
+ if (ts->auto_guess && tss == NULL && is_start) {
+ add_pes_stream(ts, pid, 0);
+ tss = ts->pids[pid];
+ }
+ if (!tss)
+ return;
+
+ /* continuity check (currently not used) */
+ cc = (packet[3] & 0xf);
+ cc_ok = (tss->last_cc < 0) || ((((tss->last_cc + 1) & 0x0f) == cc));
+ tss->last_cc = cc;
+
+ /* skip adaptation field */
+ afc = (packet[3] >> 4) & 3;
+ p = packet + 4;
+ if (afc == 0) /* reserved value */
+ return;
+ if (afc == 2) /* adaptation field only */
+ return;
+ if (afc == 3) {
+ /* skip adapation field */
+ p += p[0] + 1;
+ }
+ /* if past the end of packet, ignore */
+ p_end = packet + TS_PACKET_SIZE;
+ if (p >= p_end)
+ return;
+
+ if (tss->type == MPEGTS_SECTION) {
+ if (is_start) {
+ /* pointer field present */
+ len = *p++;
+ if (p + len > p_end)
+ return;
+ if (len && cc_ok) {
+ /* write remaining section bytes */
+ write_section_data(s, tss,
+ p, len, 0);
+ /* check whether filter has been closed */
+ if (!ts->pids[pid])
+ return;
+ }
+ p += len;
+ if (p < p_end) {
+ write_section_data(s, tss,
+ p, p_end - p, 1);
+ }
+ } else {
+ if (cc_ok) {
+ write_section_data(s, tss,
+ p, p_end - p, 0);
+ }
+ }
+ } else {
+ tss->u.pes_filter.pes_cb(tss->u.pes_filter.opaque,
+ p, p_end - p, is_start);
+ }
+}
+
+/* XXX: try to find a better synchro over several packets (use
+ get_packet_size() ?) */
+static int mpegts_resync(ByteIOContext *pb)
+{
+ int c, i;
+
+ for(i = 0;i < MAX_RESYNC_SIZE; i++) {
+ c = url_fgetc(pb);
+ if (c < 0)
+ return -1;
+ if (c == 0x47) {
+ url_fseek(pb, -1, SEEK_CUR);
+ return 0;
+ }
+ }
+ /* no sync found */
+ return -1;
+}
+
+/* return -1 if error or EOF. Return 0 if OK. */
+static int read_packet(ByteIOContext *pb, uint8_t *buf, int raw_packet_size)
+{
+ int skip, len;
+
+ for(;;) {
+ len = get_buffer(pb, buf, TS_PACKET_SIZE);
+ if (len != TS_PACKET_SIZE)
+ return AVERROR_IO;
+ /* check paquet sync byte */
+ if (buf[0] != 0x47) {
+ /* find a new packet start */
+ url_fseek(pb, -TS_PACKET_SIZE, SEEK_CUR);
+ if (mpegts_resync(pb) < 0)
+ return AVERROR_INVALIDDATA;
+ else
+ continue;
+ } else {
+ skip = raw_packet_size - TS_PACKET_SIZE;
+ if (skip > 0)
+ url_fskip(pb, skip);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int handle_packets(MpegTSContext *ts, int nb_packets)
+{
+ AVFormatContext *s = ts->stream;
+ ByteIOContext *pb = &s->pb;
+ uint8_t packet[TS_PACKET_SIZE];
+ int packet_num, ret;
+
+ ts->stop_parse = 0;
+ packet_num = 0;
+ for(;;) {
+ if (ts->stop_parse)
+ break;
+ packet_num++;
+ if (nb_packets != 0 && packet_num >= nb_packets)
+ break;
+ ret = read_packet(pb, packet, ts->raw_packet_size);
+ if (ret != 0)
+ return ret;
+ handle_packet(ts, packet);
+ }
+ return 0;
+}
+
+static int mpegts_probe(AVProbeData *p)
+{
+#if 1
+ const int size= p->buf_size;
+ int score, fec_score, dvhs_score;
+#define CHECK_COUNT 10
+
+ if (size < (TS_FEC_PACKET_SIZE * CHECK_COUNT))
+ return -1;
+
+ score = analyze(p->buf, TS_PACKET_SIZE *CHECK_COUNT, TS_PACKET_SIZE, NULL);
+ dvhs_score = analyze(p->buf, TS_DVHS_PACKET_SIZE *CHECK_COUNT, TS_DVHS_PACKET_SIZE, NULL);
+ fec_score= analyze(p->buf, TS_FEC_PACKET_SIZE*CHECK_COUNT, TS_FEC_PACKET_SIZE, NULL);
+// av_log(NULL, AV_LOG_DEBUG, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score);
+
+// we need a clear definition for the returned score otherwise things will become messy sooner or later
+ if (score > fec_score && score > dvhs_score && score > 6) return AVPROBE_SCORE_MAX + score - CHECK_COUNT;
+ else if(dvhs_score > score && dvhs_score > fec_score && dvhs_score > 6) return AVPROBE_SCORE_MAX + dvhs_score - CHECK_COUNT;
+ else if( fec_score > 6) return AVPROBE_SCORE_MAX + fec_score - CHECK_COUNT;
+ else return -1;
+#else
+ /* only use the extension for safer guess */
+ if (match_ext(p->filename, "ts"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+#endif
+}
+
+static void set_service_cb(void *opaque, int ret)
+{
+ MpegTSContext *ts = opaque;
+ ts->set_service_ret = ret;
+ ts->stop_parse = 1;
+}
+
+/* return the 90 kHz PCR and the extension for the 27 MHz PCR. return
+ (-1) if not available */
+static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
+ const uint8_t *packet)
+{
+ int afc, len, flags;
+ const uint8_t *p;
+ unsigned int v;
+
+ afc = (packet[3] >> 4) & 3;
+ if (afc <= 1)
+ return -1;
+ p = packet + 4;
+ len = p[0];
+ p++;
+ if (len == 0)
+ return -1;
+ flags = *p++;
+ len--;
+ if (!(flags & 0x10))
+ return -1;
+ if (len < 6)
+ return -1;
+ v = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+ *ppcr_high = ((int64_t)v << 1) | (p[4] >> 7);
+ *ppcr_low = ((p[4] & 1) << 8) | p[5];
+ return 0;
+}
+
+static int mpegts_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MpegTSContext *ts = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint8_t buf[1024];
+ int len, sid, i;
+ int64_t pos;
+ MpegTSService *service;
+
+ if (ap) {
+ ts->mpeg2ts_raw = ap->mpeg2ts_raw;
+ ts->mpeg2ts_compute_pcr = ap->mpeg2ts_compute_pcr;
+ }
+
+ /* read the first 1024 bytes to get packet size */
+ pos = url_ftell(pb);
+ len = get_buffer(pb, buf, sizeof(buf));
+ if (len != sizeof(buf))
+ goto fail;
+ ts->raw_packet_size = get_packet_size(buf, sizeof(buf));
+ if (ts->raw_packet_size <= 0)
+ goto fail;
+ ts->stream = s;
+ ts->auto_guess = 0;
+
+goto_auto_guess:
+ if (!ts->mpeg2ts_raw) {
+ /* normal demux */
+
+ if (!ts->auto_guess) {
+ ts->set_service_ret = -1;
+
+ /* first do a scaning to get all the services */
+ url_fseek(pb, pos, SEEK_SET);
+ mpegts_scan_sdt(ts);
+
+ handle_packets(ts, s->probesize);
+
+ if (ts->nb_services <= 0) {
+ /* no SDT found, we try to look at the PAT */
+
+ /* First remove the SDT filters from each PID */
+ int i;
+ for (i=0; i < NB_PID_MAX; i++) {
+ if (ts->pids[i])
+ mpegts_close_filter(ts, ts->pids[i]);
+ }
+ url_fseek(pb, pos, SEEK_SET);
+ mpegts_scan_pat(ts);
+
+ handle_packets(ts, s->probesize);
+ }
+
+ if (ts->nb_services <= 0) {
+ /* raw transport stream */
+ ts->auto_guess = 1;
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ goto do_pcr;
+ }
+
+ /* tune to first service found */
+ for(i=0; i<ts->nb_services && ts->set_service_ret; i++){
+ service = ts->services[i];
+ sid = service->sid;
+#ifdef DEBUG_SI
+ printf("tuning to '%s'\n", service->name);
+#endif
+
+ /* now find the info for the first service if we found any,
+ otherwise try to filter all PATs */
+
+ url_fseek(pb, pos, SEEK_SET);
+ mpegts_set_service(ts, sid, set_service_cb, ts);
+
+ handle_packets(ts, s->probesize);
+ }
+ /* if could not find service, exit */
+
+ if (ts->set_service_ret != 0) {
+ if(ts->auto_guess)
+ return -1;
+ else {
+ //let's retry with auto_guess set
+ ts->auto_guess = 1;
+ goto goto_auto_guess;
+ }
+ }
+
+#ifdef DEBUG_SI
+ printf("tuning done\n");
+#endif
+ }
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ } else {
+ AVStream *st;
+ int pcr_pid, pid, nb_packets, nb_pcrs, ret, pcr_l;
+ int64_t pcrs[2], pcr_h;
+ int packet_count[2];
+ uint8_t packet[TS_PACKET_SIZE];
+
+ /* only read packets */
+
+ do_pcr:
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ av_set_pts_info(st, 60, 1, 27000000);
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_id = CODEC_ID_MPEG2TS;
+
+ /* we iterate until we find two PCRs to estimate the bitrate */
+ pcr_pid = -1;
+ nb_pcrs = 0;
+ nb_packets = 0;
+ for(;;) {
+ ret = read_packet(&s->pb, packet, ts->raw_packet_size);
+ if (ret < 0)
+ return -1;
+ pid = ((packet[1] & 0x1f) << 8) | packet[2];
+ if ((pcr_pid == -1 || pcr_pid == pid) &&
+ parse_pcr(&pcr_h, &pcr_l, packet) == 0) {
+ pcr_pid = pid;
+ packet_count[nb_pcrs] = nb_packets;
+ pcrs[nb_pcrs] = pcr_h * 300 + pcr_l;
+ nb_pcrs++;
+ if (nb_pcrs >= 2)
+ break;
+ }
+ nb_packets++;
+ }
+ ts->pcr_pid = pcr_pid;
+
+ /* NOTE1: the bitrate is computed without the FEC */
+ /* NOTE2: it is only the bitrate of the start of the stream */
+ ts->pcr_incr = (pcrs[1] - pcrs[0]) / (packet_count[1] - packet_count[0]);
+ ts->cur_pcr = pcrs[0] - ts->pcr_incr * packet_count[0];
+ s->bit_rate = (TS_PACKET_SIZE * 8) * 27e6 / ts->pcr_incr;
+ st->codec->bit_rate = s->bit_rate;
+ st->start_time = ts->cur_pcr;
+#if 0
+ printf("start=%0.3f pcr=%0.3f incr=%d\n",
+ st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr);
+#endif
+ }
+
+ url_fseek(pb, pos, SEEK_SET);
+ return 0;
+ fail:
+ return -1;
+}
+
+#define MAX_PACKET_READAHEAD ((128 * 1024) / 188)
+
+static int mpegts_raw_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MpegTSContext *ts = s->priv_data;
+ int ret, i;
+ int64_t pcr_h, next_pcr_h, pos;
+ int pcr_l, next_pcr_l;
+ uint8_t pcr_buf[12];
+
+ if (av_new_packet(pkt, TS_PACKET_SIZE) < 0)
+ return -ENOMEM;
+ pkt->pos= url_ftell(&s->pb);
+ ret = read_packet(&s->pb, pkt->data, ts->raw_packet_size);
+ if (ret < 0) {
+ av_free_packet(pkt);
+ return ret;
+ }
+ if (ts->mpeg2ts_compute_pcr) {
+ /* compute exact PCR for each packet */
+ if (parse_pcr(&pcr_h, &pcr_l, pkt->data) == 0) {
+ /* we read the next PCR (XXX: optimize it by using a bigger buffer */
+ pos = url_ftell(&s->pb);
+ for(i = 0; i < MAX_PACKET_READAHEAD; i++) {
+ url_fseek(&s->pb, pos + i * ts->raw_packet_size, SEEK_SET);
+ get_buffer(&s->pb, pcr_buf, 12);
+ if (parse_pcr(&next_pcr_h, &next_pcr_l, pcr_buf) == 0) {
+ /* XXX: not precise enough */
+ ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
+ (i + 1);
+ break;
+ }
+ }
+ url_fseek(&s->pb, pos, SEEK_SET);
+ /* no next PCR found: we use previous increment */
+ ts->cur_pcr = pcr_h * 300 + pcr_l;
+ }
+ pkt->pts = ts->cur_pcr;
+ pkt->duration = ts->pcr_incr;
+ ts->cur_pcr += ts->pcr_incr;
+ }
+ pkt->stream_index = 0;
+ return 0;
+}
+
+static int mpegts_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MpegTSContext *ts = s->priv_data;
+
+ if (!ts->mpeg2ts_raw) {
+ ts->pkt = pkt;
+ return handle_packets(ts, 0);
+ } else {
+ return mpegts_raw_read_packet(s, pkt);
+ }
+}
+
+static int mpegts_read_close(AVFormatContext *s)
+{
+ MpegTSContext *ts = s->priv_data;
+ int i;
+ for(i=0;i<NB_PID_MAX;i++)
+ if (ts->pids[i]) mpegts_close_filter(ts, ts->pids[i]);
+
+ for(i = 0; i < ts->nb_services; i++){
+ av_free(ts->services[i]->provider_name);
+ av_free(ts->services[i]->name);
+ av_free(ts->services[i]);
+ }
+ av_freep(&ts->services);
+
+ return 0;
+}
+
+static int64_t mpegts_get_pcr(AVFormatContext *s, int stream_index,
+ int64_t *ppos, int64_t pos_limit)
+{
+ MpegTSContext *ts = s->priv_data;
+ int64_t pos, timestamp;
+ uint8_t buf[TS_PACKET_SIZE];
+ int pcr_l, pid;
+ const int find_next= 1;
+ pos = ((*ppos + ts->raw_packet_size - 1) / ts->raw_packet_size) * ts->raw_packet_size;
+ if (find_next) {
+ for(;;) {
+ url_fseek(&s->pb, pos, SEEK_SET);
+ if (get_buffer(&s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
+ return AV_NOPTS_VALUE;
+ pid = ((buf[1] & 0x1f) << 8) | buf[2];
+ if (pid == ts->pcr_pid &&
+ parse_pcr(&timestamp, &pcr_l, buf) == 0) {
+ break;
+ }
+ pos += ts->raw_packet_size;
+ }
+ } else {
+ for(;;) {
+ pos -= ts->raw_packet_size;
+ if (pos < 0)
+ return AV_NOPTS_VALUE;
+ url_fseek(&s->pb, pos, SEEK_SET);
+ if (get_buffer(&s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
+ return AV_NOPTS_VALUE;
+ pid = ((buf[1] & 0x1f) << 8) | buf[2];
+ if (pid == ts->pcr_pid &&
+ parse_pcr(&timestamp, &pcr_l, buf) == 0) {
+ break;
+ }
+ }
+ }
+ *ppos = pos;
+
+ return timestamp;
+}
+
+static int read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+ MpegTSContext *ts = s->priv_data;
+ uint8_t buf[TS_PACKET_SIZE];
+ int64_t pos;
+
+ if(av_seek_frame_binary(s, stream_index, target_ts, flags) < 0)
+ return -1;
+
+ pos= url_ftell(&s->pb);
+
+ for(;;) {
+ url_fseek(&s->pb, pos, SEEK_SET);
+ if (get_buffer(&s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
+ return -1;
+// pid = ((buf[1] & 0x1f) << 8) | buf[2];
+ if(buf[1] & 0x40) break;
+ pos += ts->raw_packet_size;
+ }
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+ return 0;
+}
+
+/**************************************************************/
+/* parsing functions - called from other demuxers such as RTP */
+
+MpegTSContext *mpegts_parse_open(AVFormatContext *s)
+{
+ MpegTSContext *ts;
+
+ ts = av_mallocz(sizeof(MpegTSContext));
+ if (!ts)
+ return NULL;
+ /* no stream case, currently used by RTP */
+ ts->raw_packet_size = TS_PACKET_SIZE;
+ ts->stream = s;
+ ts->auto_guess = 1;
+ return ts;
+}
+
+/* return the consumed length if a packet was output, or -1 if no
+ packet is output */
+int mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
+ const uint8_t *buf, int len)
+{
+ int len1;
+
+ len1 = len;
+ ts->pkt = pkt;
+ ts->stop_parse = 0;
+ for(;;) {
+ if (ts->stop_parse)
+ break;
+ if (len < TS_PACKET_SIZE)
+ return -1;
+ if (buf[0] != 0x47) {
+ buf++;
+ len--;
+ } else {
+ handle_packet(ts, buf);
+ buf += TS_PACKET_SIZE;
+ len -= TS_PACKET_SIZE;
+ }
+ }
+ return len1 - len;
+}
+
+void mpegts_parse_close(MpegTSContext *ts)
+{
+ int i;
+
+ for(i=0;i<NB_PID_MAX;i++)
+ av_free(ts->pids[i]);
+ av_free(ts);
+}
+
+AVInputFormat mpegts_demuxer = {
+ "mpegts",
+ "MPEG2 transport stream format",
+ sizeof(MpegTSContext),
+ mpegts_probe,
+ mpegts_read_header,
+ mpegts_read_packet,
+ mpegts_read_close,
+ read_seek,
+ mpegts_get_pcr,
+ .flags = AVFMT_SHOW_IDS,
+};
diff --git a/contrib/ffmpeg/libavformat/mpegts.h b/contrib/ffmpeg/libavformat/mpegts.h
new file mode 100644
index 000000000..b3eb3cda7
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpegts.h
@@ -0,0 +1,63 @@
+/*
+ * MPEG2 transport stream defines
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define TS_FEC_PACKET_SIZE 204
+#define TS_DVHS_PACKET_SIZE 192
+#define TS_PACKET_SIZE 188
+#define NB_PID_MAX 8192
+#define MAX_SECTION_SIZE 4096
+
+/* pids */
+#define PAT_PID 0x0000
+#define SDT_PID 0x0011
+
+/* table ids */
+#define PAT_TID 0x00
+#define PMT_TID 0x02
+#define SDT_TID 0x42
+
+/* descriptor ids */
+#define DVB_SUBT_DESCID 0x59
+
+#define STREAM_TYPE_VIDEO_MPEG1 0x01
+#define STREAM_TYPE_VIDEO_MPEG2 0x02
+#define STREAM_TYPE_AUDIO_MPEG1 0x03
+#define STREAM_TYPE_AUDIO_MPEG2 0x04
+#define STREAM_TYPE_PRIVATE_SECTION 0x05
+#define STREAM_TYPE_PRIVATE_DATA 0x06
+#define STREAM_TYPE_AUDIO_AAC 0x0f
+#define STREAM_TYPE_VIDEO_MPEG4 0x10
+#define STREAM_TYPE_VIDEO_H264 0x1b
+
+#define STREAM_TYPE_AUDIO_AC3 0x81
+#define STREAM_TYPE_AUDIO_DTS 0x8a
+
+#define STREAM_TYPE_SUBTITLE_DVB 0x100
+
+unsigned int mpegts_crc32(const uint8_t *data, int len);
+extern AVOutputFormat mpegts_muxer;
+
+typedef struct MpegTSContext MpegTSContext;
+
+MpegTSContext *mpegts_parse_open(AVFormatContext *s);
+int mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
+ const uint8_t *buf, int len);
+void mpegts_parse_close(MpegTSContext *ts);
diff --git a/contrib/ffmpeg/libavformat/mpegtsenc.c b/contrib/ffmpeg/libavformat/mpegtsenc.c
new file mode 100644
index 000000000..39868bea4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpegtsenc.c
@@ -0,0 +1,676 @@
+/*
+ * MPEG2 transport stream (aka DVB) muxer
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "crc.h"
+#include "mpegts.h"
+
+/* write DVB SI sections */
+
+/*********************************************/
+/* mpegts section writer */
+
+typedef struct MpegTSSection {
+ int pid;
+ int cc;
+ void (*write_packet)(struct MpegTSSection *s, const uint8_t *packet);
+ void *opaque;
+} MpegTSSection;
+
+/* NOTE: 4 bytes must be left at the end for the crc32 */
+static void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len)
+{
+ unsigned int crc;
+ unsigned char packet[TS_PACKET_SIZE];
+ const unsigned char *buf_ptr;
+ unsigned char *q;
+ int first, b, len1, left;
+
+ crc = bswap_32(av_crc(av_crc04C11DB7, -1, buf, len - 4));
+ buf[len - 4] = (crc >> 24) & 0xff;
+ buf[len - 3] = (crc >> 16) & 0xff;
+ buf[len - 2] = (crc >> 8) & 0xff;
+ buf[len - 1] = (crc) & 0xff;
+
+ /* send each packet */
+ buf_ptr = buf;
+ while (len > 0) {
+ first = (buf == buf_ptr);
+ q = packet;
+ *q++ = 0x47;
+ b = (s->pid >> 8);
+ if (first)
+ b |= 0x40;
+ *q++ = b;
+ *q++ = s->pid;
+ s->cc = (s->cc + 1) & 0xf;
+ *q++ = 0x10 | s->cc;
+ if (first)
+ *q++ = 0; /* 0 offset */
+ len1 = TS_PACKET_SIZE - (q - packet);
+ if (len1 > len)
+ len1 = len;
+ memcpy(q, buf_ptr, len1);
+ q += len1;
+ /* add known padding data */
+ left = TS_PACKET_SIZE - (q - packet);
+ if (left > 0)
+ memset(q, 0xff, left);
+
+ s->write_packet(s, packet);
+
+ buf_ptr += len1;
+ len -= len1;
+ }
+}
+
+static inline void put16(uint8_t **q_ptr, int val)
+{
+ uint8_t *q;
+ q = *q_ptr;
+ *q++ = val >> 8;
+ *q++ = val;
+ *q_ptr = q;
+}
+
+static int mpegts_write_section1(MpegTSSection *s, int tid, int id,
+ int version, int sec_num, int last_sec_num,
+ uint8_t *buf, int len)
+{
+ uint8_t section[1024], *q;
+ unsigned int tot_len;
+
+ tot_len = 3 + 5 + len + 4;
+ /* check if not too big */
+ if (tot_len > 1024)
+ return -1;
+
+ q = section;
+ *q++ = tid;
+ put16(&q, 0xb000 | (len + 5 + 4)); /* 5 byte header + 4 byte CRC */
+ put16(&q, id);
+ *q++ = 0xc1 | (version << 1); /* current_next_indicator = 1 */
+ *q++ = sec_num;
+ *q++ = last_sec_num;
+ memcpy(q, buf, len);
+
+ mpegts_write_section(s, section, tot_len);
+ return 0;
+}
+
+/*********************************************/
+/* mpegts writer */
+
+#define DEFAULT_PMT_START_PID 0x1000
+#define DEFAULT_START_PID 0x0100
+#define DEFAULT_PROVIDER_NAME "FFmpeg"
+#define DEFAULT_SERVICE_NAME "Service01"
+
+/* default network id, transport stream and service identifiers */
+#define DEFAULT_ONID 0x0001
+#define DEFAULT_TSID 0x0001
+#define DEFAULT_SID 0x0001
+
+/* a PES packet header is generated every DEFAULT_PES_HEADER_FREQ packets */
+#define DEFAULT_PES_HEADER_FREQ 16
+#define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170)
+
+/* we retransmit the SI info at this rate */
+#define SDT_RETRANS_TIME 500
+#define PAT_RETRANS_TIME 100
+#define PCR_RETRANS_TIME 20
+
+typedef struct MpegTSWriteStream {
+ struct MpegTSService *service;
+ int pid; /* stream associated pid */
+ int cc;
+ int payload_index;
+ int64_t payload_pts;
+ uint8_t payload[DEFAULT_PES_PAYLOAD_SIZE];
+} MpegTSWriteStream;
+
+typedef struct MpegTSService {
+ MpegTSSection pmt; /* MPEG2 pmt table context */
+ int sid; /* service ID */
+ char *name;
+ char *provider_name;
+ int pcr_pid;
+ int pcr_packet_count;
+ int pcr_packet_freq;
+} MpegTSService;
+
+typedef struct MpegTSWrite {
+ MpegTSSection pat; /* MPEG2 pat table */
+ MpegTSSection sdt; /* MPEG2 sdt table context */
+ MpegTSService **services;
+ int sdt_packet_count;
+ int sdt_packet_freq;
+ int pat_packet_count;
+ int pat_packet_freq;
+ int nb_services;
+ int onid;
+ int tsid;
+} MpegTSWrite;
+
+static void mpegts_write_pat(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSService *service;
+ uint8_t data[1012], *q;
+ int i;
+
+ q = data;
+ for(i = 0; i < ts->nb_services; i++) {
+ service = ts->services[i];
+ put16(&q, service->sid);
+ put16(&q, 0xe000 | service->pmt.pid);
+ }
+ mpegts_write_section1(&ts->pat, PAT_TID, ts->tsid, 0, 0, 0,
+ data, q - data);
+}
+
+static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
+{
+ // MpegTSWrite *ts = s->priv_data;
+ uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
+ int val, stream_type, i;
+
+ q = data;
+ put16(&q, 0xe000 | service->pcr_pid);
+
+ program_info_length_ptr = q;
+ q += 2; /* patched after */
+
+ /* put program info here */
+
+ val = 0xf000 | (q - program_info_length_ptr - 2);
+ program_info_length_ptr[0] = val >> 8;
+ program_info_length_ptr[1] = val;
+
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ MpegTSWriteStream *ts_st = st->priv_data;
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MPEG1VIDEO:
+ case CODEC_ID_MPEG2VIDEO:
+ stream_type = STREAM_TYPE_VIDEO_MPEG2;
+ break;
+ case CODEC_ID_MPEG4:
+ stream_type = STREAM_TYPE_VIDEO_MPEG4;
+ break;
+ case CODEC_ID_H264:
+ stream_type = STREAM_TYPE_VIDEO_H264;
+ break;
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ stream_type = STREAM_TYPE_AUDIO_MPEG1;
+ break;
+ case CODEC_ID_AAC:
+ stream_type = STREAM_TYPE_AUDIO_AAC;
+ break;
+ case CODEC_ID_AC3:
+ stream_type = STREAM_TYPE_AUDIO_AC3;
+ break;
+ default:
+ stream_type = STREAM_TYPE_PRIVATE_DATA;
+ break;
+ }
+ *q++ = stream_type;
+ put16(&q, 0xe000 | ts_st->pid);
+ desc_length_ptr = q;
+ q += 2; /* patched after */
+
+ /* write optional descriptors here */
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (strlen(st->language) == 3) {
+ *q++ = 0x0a; /* ISO 639 language descriptor */
+ *q++ = 4;
+ *q++ = st->language[0];
+ *q++ = st->language[1];
+ *q++ = st->language[2];
+ *q++ = 0; /* undefined type */
+ }
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ {
+ const char *language;
+ language = st->language;
+ if (strlen(language) != 3)
+ language = "eng";
+ *q++ = 0x59;
+ *q++ = 8;
+ *q++ = language[0];
+ *q++ = language[1];
+ *q++ = language[2];
+ *q++ = 0x10; /* normal subtitles (0x20 = if hearing pb) */
+ put16(&q, 1); /* page id */
+ put16(&q, 1); /* ancillary page id */
+ }
+ break;
+ }
+
+ val = 0xf000 | (q - desc_length_ptr - 2);
+ desc_length_ptr[0] = val >> 8;
+ desc_length_ptr[1] = val;
+ }
+ mpegts_write_section1(&service->pmt, PMT_TID, service->sid, 0, 0, 0,
+ data, q - data);
+}
+
+/* NOTE: str == NULL is accepted for an empty string */
+static void putstr8(uint8_t **q_ptr, const char *str)
+{
+ uint8_t *q;
+ int len;
+
+ q = *q_ptr;
+ if (!str)
+ len = 0;
+ else
+ len = strlen(str);
+ *q++ = len;
+ memcpy(q, str, len);
+ q += len;
+ *q_ptr = q;
+}
+
+static void mpegts_write_sdt(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSService *service;
+ uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
+ int i, running_status, free_ca_mode, val;
+
+ q = data;
+ put16(&q, ts->onid);
+ *q++ = 0xff;
+ for(i = 0; i < ts->nb_services; i++) {
+ service = ts->services[i];
+ put16(&q, service->sid);
+ *q++ = 0xfc | 0x00; /* currently no EIT info */
+ desc_list_len_ptr = q;
+ q += 2;
+ running_status = 4; /* running */
+ free_ca_mode = 0;
+
+ /* write only one descriptor for the service name and provider */
+ *q++ = 0x48;
+ desc_len_ptr = q;
+ q++;
+ *q++ = 0x01; /* digital television service */
+ putstr8(&q, service->provider_name);
+ putstr8(&q, service->name);
+ desc_len_ptr[0] = q - desc_len_ptr - 1;
+
+ /* fill descriptor length */
+ val = (running_status << 13) | (free_ca_mode << 12) |
+ (q - desc_list_len_ptr - 2);
+ desc_list_len_ptr[0] = val >> 8;
+ desc_list_len_ptr[1] = val;
+ }
+ mpegts_write_section1(&ts->sdt, SDT_TID, ts->tsid, 0, 0, 0,
+ data, q - data);
+}
+
+static MpegTSService *mpegts_add_service(MpegTSWrite *ts,
+ int sid,
+ const char *provider_name,
+ const char *name)
+{
+ MpegTSService *service;
+
+ service = av_mallocz(sizeof(MpegTSService));
+ if (!service)
+ return NULL;
+ service->pmt.pid = DEFAULT_PMT_START_PID + ts->nb_services - 1;
+ service->sid = sid;
+ service->provider_name = av_strdup(provider_name);
+ service->name = av_strdup(name);
+ service->pcr_pid = 0x1fff;
+ dynarray_add(&ts->services, &ts->nb_services, service);
+ return service;
+}
+
+static void section_write_packet(MpegTSSection *s, const uint8_t *packet)
+{
+ AVFormatContext *ctx = s->opaque;
+ put_buffer(&ctx->pb, packet, TS_PACKET_SIZE);
+}
+
+static int mpegts_write_header(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSWriteStream *ts_st;
+ MpegTSService *service;
+ AVStream *st;
+ int i, total_bit_rate;
+ const char *service_name;
+
+ ts->tsid = DEFAULT_TSID;
+ ts->onid = DEFAULT_ONID;
+ /* allocate a single DVB service */
+ service_name = s->title;
+ if (service_name[0] == '\0')
+ service_name = DEFAULT_SERVICE_NAME;
+ service = mpegts_add_service(ts, DEFAULT_SID,
+ DEFAULT_PROVIDER_NAME, service_name);
+ service->pmt.write_packet = section_write_packet;
+ service->pmt.opaque = s;
+
+ ts->pat.pid = PAT_PID;
+ ts->pat.cc = 0;
+ ts->pat.write_packet = section_write_packet;
+ ts->pat.opaque = s;
+
+ ts->sdt.pid = SDT_PID;
+ ts->sdt.cc = 0;
+ ts->sdt.write_packet = section_write_packet;
+ ts->sdt.opaque = s;
+
+ /* assign pids to each stream */
+ total_bit_rate = 0;
+ for(i = 0;i < s->nb_streams; i++) {
+ st = s->streams[i];
+ ts_st = av_mallocz(sizeof(MpegTSWriteStream));
+ if (!ts_st)
+ goto fail;
+ st->priv_data = ts_st;
+ ts_st->service = service;
+ ts_st->pid = DEFAULT_START_PID + i;
+ ts_st->payload_pts = AV_NOPTS_VALUE;
+ /* update PCR pid by using the first video stream */
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO &&
+ service->pcr_pid == 0x1fff)
+ service->pcr_pid = ts_st->pid;
+ total_bit_rate += st->codec->bit_rate;
+ }
+
+ /* if no video stream, use the first stream as PCR */
+ if (service->pcr_pid == 0x1fff && s->nb_streams > 0) {
+ ts_st = s->streams[0]->priv_data;
+ service->pcr_pid = ts_st->pid;
+ }
+
+ if (total_bit_rate <= 8 * 1024)
+ total_bit_rate = 8 * 1024;
+ service->pcr_packet_freq = (total_bit_rate * PCR_RETRANS_TIME) /
+ (TS_PACKET_SIZE * 8 * 1000);
+ ts->sdt_packet_freq = (total_bit_rate * SDT_RETRANS_TIME) /
+ (TS_PACKET_SIZE * 8 * 1000);
+ ts->pat_packet_freq = (total_bit_rate * PAT_RETRANS_TIME) /
+ (TS_PACKET_SIZE * 8 * 1000);
+#if 0
+ printf("%d %d %d\n",
+ total_bit_rate, ts->sdt_packet_freq, ts->pat_packet_freq);
+#endif
+
+ /* write info at the start of the file, so that it will be fast to
+ find them */
+ mpegts_write_sdt(s);
+ mpegts_write_pat(s);
+ for(i = 0; i < ts->nb_services; i++) {
+ mpegts_write_pmt(s, ts->services[i]);
+ }
+ put_flush_packet(&s->pb);
+
+ return 0;
+
+ fail:
+ for(i = 0;i < s->nb_streams; i++) {
+ st = s->streams[i];
+ av_free(st->priv_data);
+ }
+ return -1;
+}
+
+/* send SDT, PAT and PMT tables regulary */
+static void retransmit_si_info(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ int i;
+
+ if (++ts->sdt_packet_count == ts->sdt_packet_freq) {
+ ts->sdt_packet_count = 0;
+ mpegts_write_sdt(s);
+ }
+ if (++ts->pat_packet_count == ts->pat_packet_freq) {
+ ts->pat_packet_count = 0;
+ mpegts_write_pat(s);
+ for(i = 0; i < ts->nb_services; i++) {
+ mpegts_write_pmt(s, ts->services[i]);
+ }
+ }
+}
+
+/* NOTE: pes_data contains all the PES packet */
+static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
+ const uint8_t *payload, int payload_size,
+ int64_t pts)
+{
+ MpegTSWriteStream *ts_st = st->priv_data;
+ uint8_t buf[TS_PACKET_SIZE];
+ uint8_t *q;
+ int val, is_start, len, header_len, write_pcr, private_code;
+ int afc_len, stuffing_len;
+ int64_t pcr = -1; /* avoid warning */
+
+ is_start = 1;
+ while (payload_size > 0) {
+ retransmit_si_info(s);
+
+ write_pcr = 0;
+ if (ts_st->pid == ts_st->service->pcr_pid) {
+ ts_st->service->pcr_packet_count++;
+ if (ts_st->service->pcr_packet_count >=
+ ts_st->service->pcr_packet_freq) {
+ ts_st->service->pcr_packet_count = 0;
+ write_pcr = 1;
+ /* XXX: this is incorrect, but at least we have a PCR
+ value */
+ pcr = pts;
+ }
+ }
+
+ /* prepare packet header */
+ q = buf;
+ *q++ = 0x47;
+ val = (ts_st->pid >> 8);
+ if (is_start)
+ val |= 0x40;
+ *q++ = val;
+ *q++ = ts_st->pid;
+ *q++ = 0x10 | ts_st->cc | (write_pcr ? 0x20 : 0);
+ ts_st->cc = (ts_st->cc + 1) & 0xf;
+ if (write_pcr) {
+ *q++ = 7; /* AFC length */
+ *q++ = 0x10; /* flags: PCR present */
+ *q++ = pcr >> 25;
+ *q++ = pcr >> 17;
+ *q++ = pcr >> 9;
+ *q++ = pcr >> 1;
+ *q++ = (pcr & 1) << 7;
+ *q++ = 0;
+ }
+ if (is_start) {
+ /* write PES header */
+ *q++ = 0x00;
+ *q++ = 0x00;
+ *q++ = 0x01;
+ private_code = 0;
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ *q++ = 0xe0;
+ } else if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
+ (st->codec->codec_id == CODEC_ID_MP2 ||
+ st->codec->codec_id == CODEC_ID_MP3)) {
+ *q++ = 0xc0;
+ } else {
+ *q++ = 0xbd;
+ if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) {
+ private_code = 0x20;
+ }
+ }
+ if (pts != AV_NOPTS_VALUE)
+ header_len = 8;
+ else
+ header_len = 3;
+ if (private_code != 0)
+ header_len++;
+ len = payload_size + header_len;
+ *q++ = len >> 8;
+ *q++ = len;
+ val = 0x80;
+ /* data alignment indicator is required for subtitle data */
+ if (st->codec->codec_type == CODEC_TYPE_SUBTITLE)
+ val |= 0x04;
+ *q++ = val;
+ if (pts != AV_NOPTS_VALUE) {
+ *q++ = 0x80; /* PTS only */
+ *q++ = 0x05; /* header len */
+ val = (0x02 << 4) |
+ (((pts >> 30) & 0x07) << 1) | 1;
+ *q++ = val;
+ val = (((pts >> 15) & 0x7fff) << 1) | 1;
+ *q++ = val >> 8;
+ *q++ = val;
+ val = (((pts) & 0x7fff) << 1) | 1;
+ *q++ = val >> 8;
+ *q++ = val;
+ } else {
+ *q++ = 0x00;
+ *q++ = 0x00;
+ }
+ if (private_code != 0)
+ *q++ = private_code;
+ is_start = 0;
+ }
+ /* header size */
+ header_len = q - buf;
+ /* data len */
+ len = TS_PACKET_SIZE - header_len;
+ if (len > payload_size)
+ len = payload_size;
+ stuffing_len = TS_PACKET_SIZE - header_len - len;
+ if (stuffing_len > 0) {
+ /* add stuffing with AFC */
+ if (buf[3] & 0x20) {
+ /* stuffing already present: increase its size */
+ afc_len = buf[4] + 1;
+ memmove(buf + 4 + afc_len + stuffing_len,
+ buf + 4 + afc_len,
+ header_len - (4 + afc_len));
+ buf[4] += stuffing_len;
+ memset(buf + 4 + afc_len, 0xff, stuffing_len);
+ } else {
+ /* add stuffing */
+ memmove(buf + 4 + stuffing_len, buf + 4, header_len - 4);
+ buf[3] |= 0x20;
+ buf[4] = stuffing_len - 1;
+ if (stuffing_len >= 2) {
+ buf[5] = 0x00;
+ memset(buf + 6, 0xff, stuffing_len - 2);
+ }
+ }
+ }
+ memcpy(buf + TS_PACKET_SIZE - len, payload, len);
+ payload += len;
+ payload_size -= len;
+ put_buffer(&s->pb, buf, TS_PACKET_SIZE);
+ }
+ put_flush_packet(&s->pb);
+}
+
+static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st = s->streams[pkt->stream_index];
+ int size= pkt->size;
+ uint8_t *buf= pkt->data;
+ MpegTSWriteStream *ts_st = st->priv_data;
+ int len, max_payload_size;
+
+ if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) {
+ /* for subtitle, a single PES packet must be generated */
+ mpegts_write_pes(s, st, buf, size, pkt->pts);
+ return 0;
+ }
+
+ max_payload_size = DEFAULT_PES_PAYLOAD_SIZE;
+ while (size > 0) {
+ len = max_payload_size - ts_st->payload_index;
+ if (len > size)
+ len = size;
+ memcpy(ts_st->payload + ts_st->payload_index, buf, len);
+ buf += len;
+ size -= len;
+ ts_st->payload_index += len;
+ if (ts_st->payload_pts == AV_NOPTS_VALUE)
+ ts_st->payload_pts = pkt->pts;
+ if (ts_st->payload_index >= max_payload_size) {
+ mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
+ ts_st->payload_pts);
+ ts_st->payload_pts = AV_NOPTS_VALUE;
+ ts_st->payload_index = 0;
+ }
+ }
+ return 0;
+}
+
+static int mpegts_write_end(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSWriteStream *ts_st;
+ MpegTSService *service;
+ AVStream *st;
+ int i;
+
+ /* flush current packets */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ ts_st = st->priv_data;
+ if (ts_st->payload_index > 0) {
+ mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
+ ts_st->payload_pts);
+ }
+ }
+ put_flush_packet(&s->pb);
+
+ for(i = 0; i < ts->nb_services; i++) {
+ service = ts->services[i];
+ av_freep(&service->provider_name);
+ av_freep(&service->name);
+ av_free(service);
+ }
+ av_free(ts->services);
+
+ return 0;
+}
+
+AVOutputFormat mpegts_muxer = {
+ "mpegts",
+ "MPEG2 transport stream format",
+ "video/x-mpegts",
+ "ts",
+ sizeof(MpegTSWrite),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpegts_write_header,
+ mpegts_write_packet,
+ mpegts_write_end,
+};
diff --git a/contrib/ffmpeg/libavformat/mpjpeg.c b/contrib/ffmpeg/libavformat/mpjpeg.c
new file mode 100644
index 000000000..937917313
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpjpeg.c
@@ -0,0 +1,67 @@
+/*
+ * Multipart JPEG format
+ * Copyright (c) 2000, 2001, 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+/* Multipart JPEG */
+
+#define BOUNDARY_TAG "ffserver"
+
+static int mpjpeg_write_header(AVFormatContext *s)
+{
+ uint8_t buf1[256];
+
+ snprintf(buf1, sizeof(buf1), "--%s\n", BOUNDARY_TAG);
+ put_buffer(&s->pb, buf1, strlen(buf1));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int mpjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ uint8_t buf1[256];
+
+ snprintf(buf1, sizeof(buf1), "Content-type: image/jpeg\n\n");
+ put_buffer(&s->pb, buf1, strlen(buf1));
+ put_buffer(&s->pb, pkt->data, pkt->size);
+
+ snprintf(buf1, sizeof(buf1), "\n--%s\n", BOUNDARY_TAG);
+ put_buffer(&s->pb, buf1, strlen(buf1));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int mpjpeg_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVOutputFormat mpjpeg_muxer = {
+ "mpjpeg",
+ "Mime multipart JPEG format",
+ "multipart/x-mixed-replace;boundary=" BOUNDARY_TAG,
+ "mjpg",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_MJPEG,
+ mpjpeg_write_header,
+ mpjpeg_write_packet,
+ mpjpeg_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/mtv.c b/contrib/ffmpeg/libavformat/mtv.c
new file mode 100644
index 000000000..7a68ea97f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mtv.c
@@ -0,0 +1,187 @@
+/*
+ * mtv demuxer
+ * Copyright (c) 2006 Reynaldo H. Verdejo Pinochet
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mtv.c
+ * MTV demuxer.
+ */
+
+#include "avformat.h"
+#include "bswap.h"
+
+#define MTV_ASUBCHUNK_DATA_SIZE 500
+#define MTV_HEADER_SIZE 512
+#define MTV_AUDIO_PADDING_SIZE 12
+#define AUDIO_SAMPLING_RATE 44100
+#define VIDEO_SID 0
+#define AUDIO_SID 1
+
+typedef struct MTVDemuxContext {
+
+ unsigned int file_size; ///< filesize, not always right
+ unsigned int segments; ///< number of 512 byte segments
+ unsigned int audio_identifier; ///< 'MP3' on all files I have seen
+ unsigned int audio_br; ///< bitrate of audio chanel (mp3)
+ unsigned int img_colorfmt; ///< frame colorfmt rgb 565/555
+ unsigned int img_bpp; ///< frame bits per pixel
+ unsigned int img_width; //
+ unsigned int img_height; //
+ unsigned int img_segment_size; ///< size of image segment
+ unsigned int video_fps; //
+ unsigned int audio_subsegments; ///< audio subsegments on one segment
+
+ uint8_t audio_packet_count;
+
+} MTVDemuxContext;
+
+static int mtv_probe(AVProbeData *p)
+{
+ if(p->buf_size < 3)
+ return 0;
+
+ /* Magic is 'AMV' */
+
+ if(*(p->buf) != 'A' || *(p->buf+1) != 'M' || *(p->buf+2) != 'V')
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int mtv_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MTVDemuxContext *mtv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+
+ url_fskip(pb, 3);
+ mtv->file_size = get_le32(pb);
+ mtv->segments = get_le32(pb);
+ url_fskip(pb, 32);
+ mtv->audio_identifier = get_le24(pb);
+ mtv->audio_br = get_le16(pb);
+ mtv->img_colorfmt = get_le24(pb);
+ mtv->img_bpp = get_byte(pb);
+ mtv->img_width = get_le16(pb);
+ mtv->img_height = get_le16(pb);
+ mtv->img_segment_size = get_le16(pb);
+ url_fskip(pb, 4);
+ mtv->audio_subsegments = get_le16(pb);
+ mtv->video_fps = (mtv->audio_br / 4) / mtv->audio_subsegments;
+
+ /* FIXME Add sanity check here */
+
+ /* first packet is allways audio*/
+
+ mtv->audio_packet_count = 1;
+
+ /* all systems go! init decoders */
+
+ /* video - raw rgb565 */
+
+ st = av_new_stream(s, VIDEO_SID);
+ if(!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 64, 1, mtv->video_fps);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->codec_tag = MKTAG('R', 'G', 'B', mtv->img_bpp);
+ st->codec->width = mtv->img_width;
+ st->codec->height = mtv->img_height;
+ st->codec->bits_per_sample = mtv->img_bpp;
+ st->codec->sample_rate = mtv->video_fps;
+
+ /* audio - mp3 */
+
+ st = av_new_stream(s, AUDIO_SID);
+ if(!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 64, 1, AUDIO_SAMPLING_RATE);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_MP3;
+ st->codec->bit_rate = mtv->audio_br;
+ st->need_parsing=1;
+
+ /* Jump over header */
+
+ if(url_fseek(pb, MTV_HEADER_SIZE, SEEK_SET) != MTV_HEADER_SIZE)
+ return AVERROR_IO;
+
+ return(0);
+
+}
+
+static int mtv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MTVDemuxContext *mtv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret;
+#ifndef WORDS_BIGENDIAN
+ int i;
+#endif
+
+ ret = 0;
+
+ if(mtv->audio_subsegments >= mtv->audio_packet_count)
+ {
+ url_fskip(pb, MTV_AUDIO_PADDING_SIZE);
+
+ ret = av_get_packet(pb, pkt, MTV_ASUBCHUNK_DATA_SIZE);
+ if(ret != MTV_ASUBCHUNK_DATA_SIZE)
+ return AVERROR_IO;
+
+ mtv->audio_packet_count++;
+ pkt->stream_index = AUDIO_SID;
+
+ }else
+ {
+ ret = av_get_packet(pb, pkt, mtv->img_segment_size);
+ if(ret != mtv->img_segment_size)
+ return AVERROR_IO;
+
+#ifndef WORDS_BIGENDIAN
+
+ /* pkt->data is GGGRRRR BBBBBGGG
+ * and we need RRRRRGGG GGGBBBBB
+ * for PIX_FMT_RGB565 so here we
+ * just swap bytes as they come
+ */
+
+ for(i=0;i<mtv->img_segment_size/2;i++)
+ *((uint16_t *)pkt->data+i) = bswap_16(*((uint16_t *)pkt->data+i));
+#endif
+ mtv->audio_packet_count = 1;
+ pkt->stream_index = VIDEO_SID;
+ }
+
+ return(ret);
+}
+
+AVInputFormat mtv_demuxer = {
+ "MTV",
+ "MTV format",
+ sizeof(MTVDemuxContext),
+ mtv_probe,
+ mtv_read_header,
+ mtv_read_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/mxf.c b/contrib/ffmpeg/libavformat/mxf.c
new file mode 100644
index 000000000..b20679943
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mxf.c
@@ -0,0 +1,1082 @@
+/*
+ * MXF demuxer.
+ * Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * References
+ * SMPTE 336M KLV Data Encoding Protocol Using Key-Length-Value
+ * SMPTE 377M MXF File Format Specifications
+ * SMPTE 378M Operational Pattern 1a
+ * SMPTE 379M MXF Generic Container
+ * SMPTE 381M Mapping MPEG Streams into the MXF Generic Container
+ * SMPTE 382M Mapping AES3 and Broadcast Wave Audio into the MXF Generic Container
+ * SMPTE 383M Mapping DV-DIF Data to the MXF Generic Container
+ *
+ * Principle
+ * Search for Track numbers which will identify essence element KLV packets.
+ * Search for SourcePackage which define tracks which contains Track numbers.
+ * Material Package contains tracks with reference to SourcePackage tracks.
+ * Search for Descriptors (Picture, Sound) which contains codec info and parameters.
+ * Assign Descriptors to correct Tracks.
+ *
+ * Metadata reading functions read Local Tags, get InstanceUID(0x3C0A) then add MetaDataSet to MXFContext.
+ * Metadata parsing resolves Strong References to objects.
+ *
+ * Simple demuxer, only OP1A supported and some files might not work at all.
+ * Only tracks with associated descriptors will be decoded. "Highly Desirable" SMPTE 377M D.1
+ */
+
+//#define DEBUG
+
+#include "avformat.h"
+
+typedef uint8_t UID[16];
+
+enum MXFMetadataSetType {
+ MaterialPackage,
+ SourcePackage,
+ SourceClip,
+ TimecodeComponent,
+ Sequence,
+ MultipleDescriptor,
+ Descriptor,
+ Track,
+ EssenceContainerData,
+};
+
+typedef struct MXFStructuralComponent {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID source_package_uid;
+ UID data_definition_ul;
+ int64_t duration;
+ int64_t start_position;
+ int source_track_id;
+} MXFStructuralComponent;
+
+typedef struct MXFSequence {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID data_definition_ul;
+ UID *structural_components_refs;
+ int structural_components_count;
+ int64_t duration;
+} MXFSequence;
+
+typedef struct MXFTrack {
+ UID uid;
+ enum MXFMetadataSetType type;
+ MXFSequence *sequence; /* mandatory, and only one */
+ UID sequence_ref;
+ int track_id;
+ uint8_t track_number[4];
+ AVRational edit_rate;
+} MXFTrack;
+
+typedef struct MXFDescriptor {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID essence_container_ul;
+ UID essence_codec_ul;
+ AVRational sample_rate;
+ AVRational aspect_ratio;
+ int width;
+ int height;
+ int channels;
+ int bits_per_sample;
+ UID *sub_descriptors_refs;
+ int sub_descriptors_count;
+ int linked_track_id;
+ uint8_t *extradata;
+ int extradata_size;
+} MXFDescriptor;
+
+typedef struct MXFPackage {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID package_uid;
+ UID *tracks_refs;
+ int tracks_count;
+ MXFDescriptor *descriptor; /* only one */
+ UID descriptor_ref;
+} MXFPackage;
+
+typedef struct MXFEssenceContainerData {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID linked_package_uid;
+} MXFEssenceContainerData;
+
+typedef struct {
+ UID uid;
+ enum MXFMetadataSetType type;
+} MXFMetadataSet;
+
+typedef struct MXFContext {
+ UID *packages_refs;
+ int packages_count;
+ UID *essence_container_data_sets_refs;
+ int essence_container_data_sets_count;
+ UID *essence_containers_uls; /* Universal Labels SMPTE RP224 */
+ int essence_containers_uls_count;
+ UID operational_pattern_ul;
+ UID content_storage_uid;
+ MXFMetadataSet **metadata_sets;
+ int metadata_sets_count;
+ AVFormatContext *fc;
+} MXFContext;
+
+typedef struct KLVPacket {
+ UID key;
+ offset_t offset;
+ uint64_t length;
+} KLVPacket;
+
+enum MXFWrappingScheme {
+ Frame,
+ Clip,
+};
+
+typedef struct MXFCodecUL {
+ UID uid;
+ enum CodecID id;
+ enum MXFWrappingScheme wrapping;
+} MXFCodecUL;
+
+typedef struct MXFDataDefinitionUL {
+ UID uid;
+ enum CodecType type;
+} MXFDataDefinitionUL;
+
+typedef struct MXFMetadataReadTableEntry {
+ const UID key;
+ int (*read)(MXFContext *mxf, KLVPacket *klv);
+} MXFMetadataReadTableEntry;
+
+/* partial keys to match */
+static const uint8_t mxf_header_partition_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02 };
+static const uint8_t mxf_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01 };
+
+#define IS_KLV_KEY(x, y) (!memcmp(x, y, sizeof(y)))
+
+#define PRINT_KEY(s, x) dprintf("%s %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", s, \
+ (x)[0], (x)[1], (x)[2], (x)[3], (x)[4], (x)[5], (x)[6], (x)[7], (x)[8], (x)[9], (x)[10], (x)[11], (x)[12], (x)[13], (x)[14], (x)[15])
+
+static int64_t klv_decode_ber_length(ByteIOContext *pb)
+{
+ int64_t size = 0;
+ uint8_t length = get_byte(pb);
+ int type = length >> 7;
+
+ if (type) { /* long form */
+ int bytes_num = length & 0x7f;
+ /* SMPTE 379M 5.3.4 guarantee that bytes_num must not exceed 8 bytes */
+ if (bytes_num > 8)
+ return -1;
+ while (bytes_num--)
+ size = size << 8 | get_byte(pb);
+ } else {
+ size = length & 0x7f;
+ }
+ return size;
+}
+
+static int klv_read_packet(KLVPacket *klv, ByteIOContext *pb)
+{
+ klv->offset = url_ftell(pb);
+ get_buffer(pb, klv->key, 16);
+ klv->length = klv_decode_ber_length(pb);
+ return klv->length == -1 ? -1 : 0;
+}
+
+static int mxf_get_stream_index(AVFormatContext *s, KLVPacket *klv)
+{
+ int i;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ MXFTrack *track = s->streams[i]->priv_data;
+ /* SMPTE 379M 7.3 */
+ if (!memcmp(klv->key + sizeof(mxf_essence_element_key), track->track_number, sizeof(track->track_number)))
+ return i;
+ }
+ /* return 0 if only one stream, for OP Atom files with 0 as track number */
+ return s->nb_streams == 1 ? 0 : -1;
+}
+
+/* XXX: use AVBitStreamFilter */
+static int mxf_get_d10_aes3_packet(ByteIOContext *pb, AVStream *st, AVPacket *pkt, int64_t length)
+{
+ uint8_t buffer[61444];
+ uint8_t *buf_ptr, *end_ptr, *data_ptr;
+
+ if (length > 61444) /* worst case PAL 1920 samples 8 channels */
+ return -1;
+ get_buffer(pb, buffer, length);
+ av_new_packet(pkt, length);
+ data_ptr = pkt->data;
+ end_ptr = buffer + length;
+ buf_ptr = buffer + 4; /* skip SMPTE 331M header */
+ for (; buf_ptr < end_ptr; buf_ptr += 4) {
+ if (st->codec->bits_per_sample == 24) {
+ data_ptr[0] = (buf_ptr[2] >> 4) | ((buf_ptr[3] & 0x0f) << 4);
+ data_ptr[1] = (buf_ptr[1] >> 4) | ((buf_ptr[2] & 0x0f) << 4);
+ data_ptr[2] = (buf_ptr[0] >> 4) | ((buf_ptr[1] & 0x0f) << 4);
+ data_ptr += 3;
+ } else {
+ data_ptr[0] = (buf_ptr[2] >> 4) | ((buf_ptr[3] & 0x0f) << 4);
+ data_ptr[1] = (buf_ptr[1] >> 4) | ((buf_ptr[2] & 0x0f) << 4);
+ data_ptr += 2;
+ }
+ }
+ pkt->size = data_ptr - pkt->data;
+ return 0;
+}
+
+static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ KLVPacket klv;
+
+ while (!url_feof(&s->pb)) {
+ if (klv_read_packet(&klv, &s->pb) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading KLV packet\n");
+ return -1;
+ }
+#ifdef DEBUG
+ PRINT_KEY("read packet", klv.key);
+#endif
+ if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
+ int index = mxf_get_stream_index(s, &klv);
+ if (index < 0) {
+ av_log(s, AV_LOG_ERROR, "error getting stream index\n");
+ url_fskip(&s->pb, klv.length);
+ return -1;
+ }
+ /* check for 8 channels AES3 element */
+ if (klv.key[12] == 0x06 && klv.key[13] == 0x01 && klv.key[14] == 0x10) {
+ if (mxf_get_d10_aes3_packet(&s->pb, s->streams[index], pkt, klv.length) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading D-10 aes3 frame\n");
+ return -1;
+ }
+ } else
+ av_get_packet(&s->pb, pkt, klv.length);
+ pkt->stream_index = index;
+ return 0;
+ } else
+ url_fskip(&s->pb, klv.length);
+ }
+ return AVERROR_IO;
+}
+
+static int mxf_add_metadata_set(MXFContext *mxf, void *metadata_set)
+{
+ mxf->metadata_sets = av_realloc(mxf->metadata_sets, (mxf->metadata_sets_count + 1) * sizeof(*mxf->metadata_sets));
+ mxf->metadata_sets[mxf->metadata_sets_count] = metadata_set;
+ mxf->metadata_sets_count++;
+ return 0;
+}
+
+static int mxf_read_metadata_preface(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
+
+ switch (tag) {
+ case 0x3B03:
+ get_buffer(pb, mxf->content_storage_uid, 16);
+ break;
+ case 0x3B09:
+ get_buffer(pb, mxf->operational_pattern_ul, 16);
+ break;
+ case 0x3B0A:
+ mxf->essence_containers_uls_count = get_be32(pb);
+ if (mxf->essence_containers_uls_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->essence_containers_uls = av_malloc(mxf->essence_containers_uls_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->essence_containers_uls, mxf->essence_containers_uls_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ return 0;
+}
+
+static int mxf_read_metadata_content_storage(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x1901:
+ mxf->packages_count = get_be32(pb);
+ if (mxf->packages_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
+ break;
+ case 0x1902:
+ mxf->essence_container_data_sets_count = get_be32(pb);
+ if (mxf->essence_container_data_sets_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->essence_container_data_sets_refs = av_malloc(mxf->essence_container_data_sets_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->essence_container_data_sets_refs, mxf->essence_container_data_sets_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ return 0;
+}
+
+static int mxf_read_metadata_source_clip(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFStructuralComponent *source_clip = av_mallocz(sizeof(*source_clip));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
+
+ bytes_read += size + 4;
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
+ av_log(mxf->fc, AV_LOG_ERROR, "local tag 0x%04X with 0 size\n", tag);
+ continue;
+ }
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, source_clip->uid, 16);
+ break;
+ case 0x0202:
+ source_clip->duration = get_be64(pb);
+ break;
+ case 0x1201:
+ source_clip->start_position = get_be64(pb);
+ break;
+ case 0x1101:
+ /* UMID, only get last 16 bytes */
+ url_fskip(pb, 16);
+ get_buffer(pb, source_clip->source_package_uid, 16);
+ break;
+ case 0x1102:
+ source_clip->source_track_id = get_be32(pb);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ }
+ source_clip->type = SourceClip;
+ return mxf_add_metadata_set(mxf, source_clip);
+}
+
+static int mxf_read_metadata_material_package(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFPackage *package = av_mallocz(sizeof(*package));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, package->uid, 16);
+ break;
+ case 0x4403:
+ package->tracks_count = get_be32(pb);
+ if (package->tracks_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ package->type = MaterialPackage;
+ return mxf_add_metadata_set(mxf, package);
+}
+
+static int mxf_read_metadata_track(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFTrack *track = av_mallocz(sizeof(*track));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, track->uid, 16);
+ break;
+ case 0x4801:
+ track->track_id = get_be32(pb);
+ break;
+ case 0x4804:
+ get_buffer(pb, track->track_number, 4);
+ break;
+ case 0x4B01:
+ track->edit_rate.den = get_be32(pb);
+ track->edit_rate.num = get_be32(pb);
+ break;
+ case 0x4803:
+ get_buffer(pb, track->sequence_ref, 16);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ track->type = Track;
+ return mxf_add_metadata_set(mxf, track);
+}
+
+static int mxf_read_metadata_sequence(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFSequence *sequence = av_mallocz(sizeof(*sequence));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, sequence->uid, 16);
+ break;
+ case 0x0202:
+ sequence->duration = get_be64(pb);
+ break;
+ case 0x0201:
+ get_buffer(pb, sequence->data_definition_ul, 16);
+ break;
+ case 0x1001:
+ sequence->structural_components_count = get_be32(pb);
+ if (sequence->structural_components_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ sequence->structural_components_refs = av_malloc(sequence->structural_components_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)sequence->structural_components_refs, sequence->structural_components_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ sequence->type = Sequence;
+ return mxf_add_metadata_set(mxf, sequence);
+}
+
+static int mxf_read_metadata_source_package(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFPackage *package = av_mallocz(sizeof(*package));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, package->uid, 16);
+ break;
+ case 0x4403:
+ package->tracks_count = get_be32(pb);
+ if (package->tracks_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
+ break;
+ case 0x4401:
+ /* UMID, only get last 16 bytes */
+ url_fskip(pb, 16);
+ get_buffer(pb, package->package_uid, 16);
+ break;
+ case 0x4701:
+ get_buffer(pb, package->descriptor_ref, 16);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ package->type = SourcePackage;
+ return mxf_add_metadata_set(mxf, package);
+}
+
+static int mxf_read_metadata_multiple_descriptor(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFDescriptor *descriptor = av_mallocz(sizeof(*descriptor));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, descriptor->uid, 16);
+ break;
+ case 0x3F01:
+ descriptor->sub_descriptors_count = get_be32(pb);
+ if (descriptor->sub_descriptors_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ descriptor->sub_descriptors_refs = av_malloc(descriptor->sub_descriptors_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)descriptor->sub_descriptors_refs, descriptor->sub_descriptors_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ descriptor->type = MultipleDescriptor;
+ return mxf_add_metadata_set(mxf, descriptor);
+}
+
+static void mxf_read_metadata_pixel_layout(ByteIOContext *pb, MXFDescriptor *descriptor)
+{
+ int code;
+
+ do {
+ code = get_byte(pb);
+ dprintf("pixel layout: code 0x%x\n", code);
+ switch (code) {
+ case 0x52: /* R */
+ descriptor->bits_per_sample += get_byte(pb);
+ break;
+ case 0x47: /* G */
+ descriptor->bits_per_sample += get_byte(pb);
+ break;
+ case 0x42: /* B */
+ descriptor->bits_per_sample += get_byte(pb);
+ break;
+ default:
+ get_byte(pb);
+ }
+ } while (code != 0); /* SMPTE 377M E.2.46 */
+}
+
+static int mxf_read_metadata_generic_descriptor(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFDescriptor *descriptor = av_mallocz(sizeof(*descriptor));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, descriptor->uid, 16);
+ break;
+ case 0x3004:
+ get_buffer(pb, descriptor->essence_container_ul, 16);
+ break;
+ case 0x3006:
+ descriptor->linked_track_id = get_be32(pb);
+ break;
+ case 0x3201: /* PictureEssenceCoding */
+ get_buffer(pb, descriptor->essence_codec_ul, 16);
+ break;
+ case 0x3203:
+ descriptor->width = get_be32(pb);
+ break;
+ case 0x3202:
+ descriptor->height = get_be32(pb);
+ break;
+ case 0x320E:
+ descriptor->aspect_ratio.num = get_be32(pb);
+ descriptor->aspect_ratio.den = get_be32(pb);
+ break;
+ case 0x3D03:
+ descriptor->sample_rate.num = get_be32(pb);
+ descriptor->sample_rate.den = get_be32(pb);
+ break;
+ case 0x3D06: /* SoundEssenceCompression */
+ get_buffer(pb, descriptor->essence_codec_ul, 16);
+ break;
+ case 0x3D07:
+ descriptor->channels = get_be32(pb);
+ break;
+ case 0x3D01:
+ descriptor->bits_per_sample = get_be32(pb);
+ break;
+ case 0x3401:
+ mxf_read_metadata_pixel_layout(pb, descriptor);
+ break;
+ case 0x8201: /* Private tag used by SONY C0023S01.mxf */
+ descriptor->extradata = av_malloc(size);
+ descriptor->extradata_size = size;
+ get_buffer(pb, descriptor->extradata, size);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ descriptor->type = Descriptor;
+ return mxf_add_metadata_set(mxf, descriptor);
+}
+
+/* SMPTE RP224 http://www.smpte-ra.org/mdd/index.html */
+static const MXFDataDefinitionUL mxf_data_definition_uls[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x01,0x00,0x00,0x00 }, CODEC_TYPE_VIDEO },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x02,0x00,0x00,0x00 }, CODEC_TYPE_AUDIO },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x05,0x01,0x03,0x02,0x02,0x02,0x02,0x00,0x00 }, CODEC_TYPE_AUDIO },
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_TYPE_DATA },
+};
+
+static const MXFCodecUL mxf_codec_uls[] = {
+ /* PictureEssenceCoding */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x02,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@ML I-Frame */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x02,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@HL I-Frame */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x03,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@HL Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x11,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* MP@ML Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x03,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@ML Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x03,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* MP@HL Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x20,0x02,0x03 }, CODEC_ID_MPEG4, Frame }, /* XDCAM proxy_pal030926.mxf */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x20,0x02,0x04 }, CODEC_ID_MPEG4, Frame }, /* XDCAM Proxy C0023S01.mxf */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x05 }, CODEC_ID_MPEG2VIDEO, Frame }, /* D-10 30Mbps PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x01 }, CODEC_ID_MPEG2VIDEO, Frame }, /* D-10 50Mbps PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x04,0x00 }, CODEC_ID_DVVIDEO, Frame }, /* DVCPRO50 PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x02,0x00 }, CODEC_ID_DVVIDEO, Frame }, /* DVCPRO25 PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x01,0x02,0x00 }, CODEC_ID_DVVIDEO, Frame }, /* DV25 IEC PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x01,0x02,0x02,0x03,0x01,0x01,0x00 }, CODEC_ID_JPEG2000, Frame }, /* JPEG2000 Codestream */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x01,0x7F,0x00,0x00,0x00 }, CODEC_ID_RAWVIDEO, Frame }, /* Uncompressed */
+ /* SoundEssenceCompression */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 }, CODEC_ID_PCM_S16LE, Frame }, /* Uncompressed */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x7F,0x00,0x00,0x00 }, CODEC_ID_PCM_S16LE, Frame },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x02,0x02,0x01,0x7E,0x00,0x00,0x00 }, CODEC_ID_PCM_S16BE, Frame }, /* From Omneon MXF file */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x01,0x01,0x00 }, CODEC_ID_PCM_ALAW, Frame },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x04,0x04,0x02,0x02,0x02,0x03,0x01,0x01,0x00 }, CODEC_ID_PCM_ALAW, Frame }, /* XDCAM Proxy C0023S01.mxf */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x01,0x00 }, CODEC_ID_AC3, Frame },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x05,0x00 }, CODEC_ID_MP2, Frame }, /* MP2 or MP3 */
+ //{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x1C,0x00 }, CODEC_ID_DOLBY_E, Frame }, /* Dolby-E */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_ID_NONE, Frame },
+};
+
+static const MXFCodecUL mxf_picture_essence_container_uls[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x60,0x01 }, CODEC_ID_MPEG2VIDEO, Frame }, /* MPEG-ES Frame wrapped */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0xe0,0x02 }, CODEC_ID_MPEG2VIDEO, Clip }, /* MPEG-ES Clip wrapped, 0xe0 MPV stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x04,0x61,0x07 }, CODEC_ID_MPEG2VIDEO, Clip }, /* MPEG-ES Custom wrapped, 0x61 ??? stream id */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_ID_NONE, Frame },
+};
+
+static const MXFCodecUL mxf_sound_essence_container_uls[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x01,0x00 }, CODEC_ID_PCM_S16LE, Frame }, /* BWF Frame wrapped */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x03,0x00 }, CODEC_ID_PCM_S16LE, Frame }, /* AES Frame wrapped */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x40,0x01 }, CODEC_ID_MP2, Frame }, /* MPEG-ES Frame wrapped, 0x40 ??? stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0xc0,0x01 }, CODEC_ID_MP2, Frame }, /* MPEG-ES Frame wrapped, 0xc0 MPA stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0xc0,0x02 }, CODEC_ID_MP2, Clip }, /* MPEG-ES Clip wrapped, 0xc0 MPA stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x05,0x01 }, CODEC_ID_PCM_S16BE, Frame }, /* D-10 Mapping 30Mbps PAL Extended Template */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 }, CODEC_ID_PCM_S16BE, Frame }, /* D-10 Mapping 50Mbps PAL Extended Template */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_ID_NONE, Frame },
+};
+
+static const MXFCodecUL *mxf_get_codec_ul(const MXFCodecUL *uls, UID *uid)
+{
+ while (uls->id != CODEC_ID_NONE) {
+ if(!memcmp(uls->uid, *uid, 16))
+ break;
+ uls++;
+ }
+ return uls;
+}
+
+static enum CodecType mxf_get_codec_type(const MXFDataDefinitionUL *uls, UID *uid)
+{
+ while (uls->type != CODEC_TYPE_DATA) {
+ if(!memcmp(uls->uid, *uid, 16))
+ break;
+ uls++;
+ }
+ return uls->type;
+}
+
+static void *mxf_resolve_strong_ref(MXFContext *mxf, UID *strong_ref)
+{
+ int i;
+
+ if (!strong_ref)
+ return NULL;
+ for (i = 0; i < mxf->metadata_sets_count; i++) {
+ if (!memcmp(*strong_ref, mxf->metadata_sets[i]->uid, 16)) {
+ return mxf->metadata_sets[i];
+ }
+ }
+ return NULL;
+}
+
+static int mxf_parse_structural_metadata(MXFContext *mxf)
+{
+ MXFPackage *material_package = NULL;
+ MXFPackage *temp_package = NULL;
+ int i, j, k;
+
+ dprintf("metadata sets count %d\n", mxf->metadata_sets_count);
+ /* TODO: handle multiple material packages (OP3x) */
+ for (i = 0; i < mxf->packages_count; i++) {
+ if (!(temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve package strong ref\n");
+ return -1;
+ }
+ if (temp_package->type == MaterialPackage) {
+ material_package = temp_package;
+ break;
+ }
+ }
+ if (!material_package) {
+ av_log(mxf->fc, AV_LOG_ERROR, "no material package found\n");
+ return -1;
+ }
+
+ for (i = 0; i < material_package->tracks_count; i++) {
+ MXFPackage *source_package = NULL;
+ MXFTrack *material_track = NULL;
+ MXFTrack *source_track = NULL;
+ MXFTrack *temp_track = NULL;
+ MXFDescriptor *descriptor = NULL;
+ MXFStructuralComponent *component = NULL;
+ const MXFCodecUL *codec_ul = NULL;
+ const MXFCodecUL *container_ul = NULL;
+ AVStream *st;
+
+ if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track strong ref\n");
+ continue;
+ }
+
+ if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track sequence strong ref\n");
+ return -1;
+ }
+
+ /* TODO: handle multiple source clips */
+ for (j = 0; j < material_track->sequence->structural_components_count; j++) {
+ /* TODO: handle timecode component */
+ component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j]);
+ if (!component || component->type != SourceClip)
+ continue;
+
+ for (k = 0; k < mxf->packages_count; k++) {
+ if (!(temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
+ return -1;
+ }
+ if (!memcmp(temp_package->package_uid, component->source_package_uid, 16)) {
+ source_package = temp_package;
+ break;
+ }
+ }
+ if (!source_package) {
+ av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source package found\n", material_track->track_id);
+ break;
+ }
+ for (k = 0; k < source_package->tracks_count; k++) {
+ if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
+ return -1;
+ }
+ if (temp_track->track_id == component->source_track_id) {
+ source_track = temp_track;
+ break;
+ }
+ }
+ if (!source_track) {
+ av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source track found\n", material_track->track_id);
+ break;
+ }
+ }
+ if (!source_track)
+ continue;
+
+ st = av_new_stream(mxf->fc, source_track->track_id);
+ st->priv_data = source_track;
+ st->duration = component->duration;
+ if (st->duration == -1)
+ st->duration = AV_NOPTS_VALUE;
+ st->start_time = component->start_position;
+ av_set_pts_info(st, 64, material_track->edit_rate.num, material_track->edit_rate.den);
+
+ if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
+ return -1;
+ }
+
+#ifdef DEBUG
+ PRINT_KEY("data definition ul", source_track->sequence->data_definition_ul);
+#endif
+ st->codec->codec_type = mxf_get_codec_type(mxf_data_definition_uls, &source_track->sequence->data_definition_ul);
+
+ source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref);
+ if (source_package->descriptor) {
+ if (source_package->descriptor->type == MultipleDescriptor) {
+ for (j = 0; j < source_package->descriptor->sub_descriptors_count; j++) {
+ MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j]);
+
+ if (!sub_descriptor) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve sub descriptor strong ref\n");
+ continue;
+ }
+ if (sub_descriptor->linked_track_id == source_track->track_id) {
+ descriptor = sub_descriptor;
+ break;
+ }
+ }
+ } else
+ descriptor = source_package->descriptor;
+ }
+ if (!descriptor) {
+ av_log(mxf->fc, AV_LOG_INFO, "source track %d: stream %d, no descriptor found\n", source_track->track_id, st->index);
+ continue;
+ }
+#ifdef DEBUG
+ PRINT_KEY("essence codec ul", descriptor->essence_codec_ul);
+ PRINT_KEY("essence container ul", descriptor->essence_container_ul);
+#endif
+ /* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */
+ codec_ul = mxf_get_codec_ul(mxf_codec_uls, &descriptor->essence_codec_ul);
+ st->codec->codec_id = codec_ul->id;
+ if (descriptor->extradata) {
+ st->codec->extradata = descriptor->extradata;
+ st->codec->extradata_size = descriptor->extradata_size;
+ }
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, &descriptor->essence_container_ul);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ st->codec->codec_id = container_ul->id;
+ st->codec->width = descriptor->width;
+ st->codec->height = descriptor->height;
+ st->codec->bits_per_sample = descriptor->bits_per_sample; /* Uncompressed */
+ st->need_parsing = 2; /* only parse headers */
+ } else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, &descriptor->essence_container_ul);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ st->codec->codec_id = container_ul->id;
+ st->codec->channels = descriptor->channels;
+ st->codec->bits_per_sample = descriptor->bits_per_sample;
+ st->codec->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den;
+ /* TODO: implement CODEC_ID_RAWAUDIO */
+ if (st->codec->codec_id == CODEC_ID_PCM_S16LE) {
+ if (descriptor->bits_per_sample == 24)
+ st->codec->codec_id = CODEC_ID_PCM_S24LE;
+ else if (descriptor->bits_per_sample == 32)
+ st->codec->codec_id = CODEC_ID_PCM_S32LE;
+ } else if (st->codec->codec_id == CODEC_ID_PCM_S16BE) {
+ if (descriptor->bits_per_sample == 24)
+ st->codec->codec_id = CODEC_ID_PCM_S24BE;
+ else if (descriptor->bits_per_sample == 32)
+ st->codec->codec_id = CODEC_ID_PCM_S32BE;
+ if (descriptor->essence_container_ul[13] == 0x01) /* D-10 Mapping */
+ st->codec->channels = 8; /* force channels to 8 */
+ } else if (st->codec->codec_id == CODEC_ID_MP2) {
+ st->need_parsing = 1;
+ }
+ }
+ if (container_ul && container_ul->wrapping == Clip) {
+ dprintf("stream %d: clip wrapped essence\n", st->index);
+ st->need_parsing = 1;
+ }
+ }
+ return 0;
+}
+
+static const MXFMetadataReadTableEntry mxf_metadata_read_table[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x2F,0x00 }, mxf_read_metadata_preface },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x18,0x00 }, mxf_read_metadata_content_storage },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x37,0x00 }, mxf_read_metadata_source_package },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x36,0x00 }, mxf_read_metadata_material_package },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0F,0x00 }, mxf_read_metadata_sequence },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x11,0x00 }, mxf_read_metadata_source_clip },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x44,0x00 }, mxf_read_metadata_multiple_descriptor },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x42,0x00 }, mxf_read_metadata_generic_descriptor }, /* Generic Sound */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x28,0x00 }, mxf_read_metadata_generic_descriptor }, /* CDCI */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x29,0x00 }, mxf_read_metadata_generic_descriptor }, /* RGBA */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 }, mxf_read_metadata_generic_descriptor }, /* MPEG 2 Video */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 }, mxf_read_metadata_generic_descriptor }, /* Wave */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 }, mxf_read_metadata_generic_descriptor }, /* AES3 */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3A,0x00 }, mxf_read_metadata_track }, /* Static Track */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3B,0x00 }, mxf_read_metadata_track }, /* Generic Track */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, NULL },
+};
+
+static int mxf_read_sync(ByteIOContext *pb, const uint8_t *key, unsigned size)
+{
+ int i, b;
+ for (i = 0; i < size && !url_feof(pb); i++) {
+ b = get_byte(pb);
+ if (b == key[0])
+ i = 0;
+ else if (b != key[i])
+ i = -1;
+ }
+ return i == size;
+}
+
+static int mxf_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MXFContext *mxf = s->priv_data;
+ KLVPacket klv;
+
+ if (!mxf_read_sync(&s->pb, mxf_header_partition_pack_key, 14)) {
+ av_log(s, AV_LOG_ERROR, "could not find header partition pack key\n");
+ return -1;
+ }
+ url_fseek(&s->pb, -14, SEEK_CUR);
+ mxf->fc = s;
+ while (!url_feof(&s->pb)) {
+ const MXFMetadataReadTableEntry *function;
+
+ if (klv_read_packet(&klv, &s->pb) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading KLV packet\n");
+ return -1;
+ }
+#ifdef DEBUG
+ PRINT_KEY("read header", klv.key);
+#endif
+ if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
+ /* FIXME avoid seek */
+ url_fseek(&s->pb, klv.offset, SEEK_SET);
+ break;
+ }
+
+ for (function = mxf_metadata_read_table; function->read; function++) {
+ if (IS_KLV_KEY(klv.key, function->key)) {
+ if (function->read(mxf, &klv) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading header metadata\n");
+ return -1;
+ }
+ break;
+ }
+ }
+ if (!function->read)
+ url_fskip(&s->pb, klv.length);
+ }
+ return mxf_parse_structural_metadata(mxf);
+}
+
+static int mxf_read_close(AVFormatContext *s)
+{
+ MXFContext *mxf = s->priv_data;
+ int i;
+
+ av_freep(&mxf->packages_refs);
+ av_freep(&mxf->essence_container_data_sets_refs);
+ av_freep(&mxf->essence_containers_uls);
+ for (i = 0; i < mxf->metadata_sets_count; i++) {
+ switch (mxf->metadata_sets[i]->type) {
+ case MultipleDescriptor:
+ av_freep(&((MXFDescriptor *)mxf->metadata_sets[i])->sub_descriptors_refs);
+ break;
+ case Sequence:
+ av_freep(&((MXFSequence *)mxf->metadata_sets[i])->structural_components_refs);
+ break;
+ case SourcePackage:
+ case MaterialPackage:
+ av_freep(&((MXFPackage *)mxf->metadata_sets[i])->tracks_refs);
+ break;
+ default:
+ break;
+ }
+ av_freep(&mxf->metadata_sets[i]);
+ }
+ av_freep(&mxf->metadata_sets);
+ return 0;
+}
+
+static int mxf_probe(AVProbeData *p) {
+ uint8_t *bufp = p->buf;
+ uint8_t *end = p->buf + p->buf_size;
+
+ if (p->buf_size < sizeof(mxf_header_partition_pack_key))
+ return 0;
+
+ /* Must skip Run-In Sequence and search for MXF header partition pack key SMPTE 377M 5.5 */
+ end -= sizeof(mxf_header_partition_pack_key);
+ for (; bufp < end; bufp++) {
+ if (IS_KLV_KEY(bufp, mxf_header_partition_pack_key))
+ return AVPROBE_SCORE_MAX;
+ }
+ return 0;
+}
+
+/* rudimentary binary seek */
+/* XXX: use MXF Index */
+static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ int64_t seconds;
+
+ if (!s->bit_rate)
+ return -1;
+ if (sample_time < 0)
+ sample_time = 0;
+ seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
+ url_fseek(&s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET);
+ if (!mxf_read_sync(&s->pb, mxf_essence_element_key, 12))
+ return -1;
+
+ /* found KLV key */
+ url_fseek(&s->pb, -12, SEEK_CUR);
+ av_update_cur_dts(s, st, sample_time);
+ return 0;
+}
+
+AVInputFormat mxf_demuxer = {
+ "mxf",
+ "MXF format",
+ sizeof(MXFContext),
+ mxf_probe,
+ mxf_read_header,
+ mxf_read_packet,
+ mxf_read_close,
+ mxf_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/nsvdec.c b/contrib/ffmpeg/libavformat/nsvdec.c
new file mode 100644
index 000000000..9a5fe97f8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nsvdec.c
@@ -0,0 +1,763 @@
+/*
+ * NSV demuxer
+ * Copyright (c) 2004 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+
+//#define DEBUG
+//#define DEBUG_DUMP_INDEX // XXX dumbdriving-271.nsv breaks with it commented!!
+//#define DEBUG_SEEK
+#define CHECK_SUBSEQUENT_NSVS
+//#define DISABLE_AUDIO
+
+/* max bytes to crawl for trying to resync
+ * stupid streaming servers don't start at chunk boundaries...
+ */
+#define NSV_MAX_RESYNC (500*1024)
+#define NSV_MAX_RESYNC_TRIES 300
+
+/*
+ * First version by Francois Revol - revol@free.fr
+ * References:
+ * (1) http://www.multimedia.cx/nsv-format.txt
+ * seems someone came to the same conclusions as me, and updated it:
+ * (2) http://www.stud.ktu.lt/~vitslav/nsv/nsv-format.txt
+ * http://www.stud.ktu.lt/~vitslav/nsv/
+ * official docs
+ * (3) http://ultravox.aol.com/NSVFormat.rtf
+ * Sample files:
+ * (S1) http://www.nullsoft.com/nsv/samples/
+ * http://www.nullsoft.com/nsv/samples/faster.nsv
+ * http://streamripper.sourceforge.net/openbb/read.php?TID=492&page=4
+ */
+
+/*
+ * notes on the header (Francois Revol):
+ *
+ * It is followed by strings, then a table, but nothing tells
+ * where the table begins according to (1). After checking faster.nsv,
+ * I believe NVSf[16-19] gives the size of the strings data
+ * (that is the offset of the data table after the header).
+ * After checking all samples from (S1) all confirms this.
+ *
+ * Then, about NSVf[12-15], faster.nsf has 179700. When veiwing it in VLC,
+ * I noticed there was about 1 NVSs chunk/s, so I ran
+ * strings faster.nsv | grep NSVs | wc -l
+ * which gave me 180. That leads me to think that NSVf[12-15] might be the
+ * file length in milliseconds.
+ * Let's try that:
+ * for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done
+ * except for nstrailer (which doesn't have an NSVf header), it repports correct time.
+ *
+ * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks,
+ * so the header seems to not be mandatory. (for streaming).
+ *
+ * index slice duration check (excepts nsvtrailer.nsv):
+ * for f in [^n]*.nsv; do DUR="$(ffmpeg -i "$f" 2>/dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)"; IC="$(ffmpeg -i "$f" 2>/dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)"; echo "duration $DUR, slite time $(($DUR/$IC))"; done
+ */
+
+/*
+ * TODO:
+ * - handle timestamps !!!
+ * - use index
+ * - mime-type in probe()
+ * - seek
+ */
+
+#ifdef DEBUG
+#define PRINT(_v) printf _v
+#else
+#define PRINT(_v)
+#endif
+
+#if 0
+struct NSVf_header {
+ uint32_t chunk_tag; /* 'NSVf' */
+ uint32_t chunk_size;
+ uint32_t file_size; /* max 4GB ??? noone learns anything it seems :^) */
+ uint32_t file_length; //unknown1; /* what about MSB of file_size ? */
+ uint32_t info_strings_size; /* size of the info strings */ //unknown2;
+ uint32_t table_entries;
+ uint32_t table_entries_used; /* the left ones should be -1 */
+};
+
+struct NSVs_header {
+ uint32_t chunk_tag; /* 'NSVs' */
+ uint32_t v4cc; /* or 'NONE' */
+ uint32_t a4cc; /* or 'NONE' */
+ uint16_t vwidth; /* assert(vwidth%16==0) */
+ uint16_t vheight; /* assert(vheight%16==0) */
+ uint8_t framerate; /* value = (framerate&0x80)?frtable[frameratex0x7f]:framerate */
+ uint16_t unknown;
+};
+
+struct nsv_avchunk_header {
+ uint8_t vchunk_size_lsb;
+ uint16_t vchunk_size_msb; /* value = (vchunk_size_msb << 4) | (vchunk_size_lsb >> 4) */
+ uint16_t achunk_size;
+};
+
+struct nsv_pcm_header {
+ uint8_t bits_per_sample;
+ uint8_t channel_count;
+ uint16_t sample_rate;
+};
+#endif
+
+/* variation from avi.h */
+/*typedef struct CodecTag {
+ int id;
+ unsigned int tag;
+} CodecTag;*/
+
+/* tags */
+
+#define T_NSVF MKTAG('N', 'S', 'V', 'f') /* file header */
+#define T_NSVS MKTAG('N', 'S', 'V', 's') /* chunk header */
+#define T_TOC2 MKTAG('T', 'O', 'C', '2') /* extra index marker */
+#define T_NONE MKTAG('N', 'O', 'N', 'E') /* null a/v 4CC */
+#define T_SUBT MKTAG('S', 'U', 'B', 'T') /* subtitle aux data */
+#define T_ASYN MKTAG('A', 'S', 'Y', 'N') /* async a/v aux marker */
+#define T_KEYF MKTAG('K', 'E', 'Y', 'F') /* video keyframe aux marker (addition) */
+
+#define TB_NSVF MKBETAG('N', 'S', 'V', 'f')
+#define TB_NSVS MKBETAG('N', 'S', 'V', 's')
+
+/* hardcoded stream indices */
+#define NSV_ST_VIDEO 0
+#define NSV_ST_AUDIO 1
+#define NSV_ST_SUBT 2
+
+enum NSVStatus {
+ NSV_UNSYNC,
+ NSV_FOUND_NSVF,
+ NSV_HAS_READ_NSVF,
+ NSV_FOUND_NSVS,
+ NSV_HAS_READ_NSVS,
+ NSV_FOUND_BEEF,
+ NSV_GOT_VIDEO,
+ NSV_GOT_AUDIO,
+};
+
+typedef struct NSVStream {
+ int frame_offset; /* current frame (video) or byte (audio) counter
+ (used to compute the pts) */
+ int scale;
+ int rate;
+ int sample_size; /* audio only data */
+ int start;
+
+ int new_frame_offset; /* temporary storage (used during seek) */
+ int cum_len; /* temporary storage (used during seek) */
+} NSVStream;
+
+typedef struct {
+ int base_offset;
+ int NSVf_end;
+ uint32_t *nsvf_index_data;
+ int index_entries;
+ enum NSVStatus state;
+ AVPacket ahead[2]; /* [v, a] if .data is !NULL there is something */
+ /* cached */
+ int64_t duration;
+ uint32_t vtag, atag;
+ uint16_t vwidth, vheight;
+ int16_t avsync;
+ //DVDemuxContext* dv_demux;
+} NSVContext;
+
+static const CodecTag nsv_codec_video_tags[] = {
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
+ { CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') },
+ { CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
+/*
+ { CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') },
+ { CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
+*/
+ { CODEC_ID_XVID, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */
+ { CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') },
+ { 0, 0 },
+};
+
+static const CodecTag nsv_codec_audio_tags[] = {
+ { CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
+ { CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') },
+ { CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') }, /* _CUTTED__MUXED_2 Heads - Out Of The City.nsv */
+ { CODEC_ID_PCM_U16LE, MKTAG('P', 'C', 'M', ' ') },
+ { 0, 0 },
+};
+
+//static int nsv_load_index(AVFormatContext *s);
+static int nsv_read_chunk(AVFormatContext *s, int fill_header);
+
+#ifdef DEBUG
+static void print_tag(const char *str, unsigned int tag, int size)
+{
+ printf("%s: tag=%c%c%c%c\n",
+ str, tag & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff);
+}
+#endif
+
+/* try to find something we recognize, and set the state accordingly */
+static int nsv_resync(AVFormatContext *s)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t v = 0;
+ int i;
+
+ PRINT(("%s(), offset = %"PRId64", state = %d\n", __FUNCTION__, url_ftell(pb), nsv->state));
+
+ //nsv->state = NSV_UNSYNC;
+
+ for (i = 0; i < NSV_MAX_RESYNC; i++) {
+ if (url_feof(pb)) {
+ PRINT(("NSV EOF\n"));
+ nsv->state = NSV_UNSYNC;
+ return -1;
+ }
+ v <<= 8;
+ v |= get_byte(pb);
+/*
+ if (i < 8) {
+ PRINT(("NSV resync: [%d] = %02x\n", i, v & 0x0FF));
+ }
+*/
+
+ if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */
+ PRINT(("NSV resynced on BEEF after %d bytes\n", i+1));
+ nsv->state = NSV_FOUND_BEEF;
+ return 0;
+ }
+ /* we read as big endian, thus the MK*BE* */
+ if (v == TB_NSVF) { /* NSVf */
+ PRINT(("NSV resynced on NSVf after %d bytes\n", i+1));
+ nsv->state = NSV_FOUND_NSVF;
+ return 0;
+ }
+ if (v == MKBETAG('N', 'S', 'V', 's')) { /* NSVs */
+ PRINT(("NSV resynced on NSVs after %d bytes\n", i+1));
+ nsv->state = NSV_FOUND_NSVS;
+ return 0;
+ }
+
+ }
+ PRINT(("NSV sync lost\n"));
+ return -1;
+}
+
+static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int file_size, size;
+ int64_t duration;
+ int strings_size;
+ int table_entries;
+ int table_entries_used;
+
+ PRINT(("%s()\n", __FUNCTION__));
+
+ nsv->state = NSV_UNSYNC; /* in case we fail */
+
+ size = get_le32(pb);
+ if (size < 28)
+ return -1;
+ nsv->NSVf_end = size;
+
+ //s->file_size = (uint32_t)get_le32(pb);
+ file_size = (uint32_t)get_le32(pb);
+ PRINT(("NSV NSVf chunk_size %u\n", size));
+ PRINT(("NSV NSVf file_size %u\n", file_size));
+
+ nsv->duration = duration = get_le32(pb); /* in ms */
+ PRINT(("NSV NSVf duration %"PRId64" ms\n", duration));
+ // XXX: store it in AVStreams
+
+ strings_size = get_le32(pb);
+ table_entries = get_le32(pb);
+ table_entries_used = get_le32(pb);
+ PRINT(("NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
+ strings_size, table_entries, table_entries_used));
+ if (url_feof(pb))
+ return -1;
+
+ PRINT(("NSV got header; filepos %"PRId64"\n", url_ftell(pb)));
+
+ if (strings_size > 0) {
+ char *strings; /* last byte will be '\0' to play safe with str*() */
+ char *p, *endp;
+ char *token, *value;
+ char quote;
+
+ p = strings = av_mallocz(strings_size + 1);
+ endp = strings + strings_size;
+ get_buffer(pb, strings, strings_size);
+ while (p < endp) {
+ while (*p == ' ')
+ p++; /* strip out spaces */
+ if (p >= endp-2)
+ break;
+ token = p;
+ p = strchr(p, '=');
+ if (!p || p >= endp-2)
+ break;
+ *p++ = '\0';
+ quote = *p++;
+ value = p;
+ p = strchr(p, quote);
+ if (!p || p >= endp)
+ break;
+ *p++ = '\0';
+ PRINT(("NSV NSVf INFO: %s='%s'\n", token, value));
+ if (!strcmp(token, "ASPECT")) {
+ /* don't care */
+ } else if (!strcmp(token, "CREATOR") || !strcmp(token, "Author")) {
+ strncpy(s->author, value, 512-1);
+ } else if (!strcmp(token, "Copyright")) {
+ strncpy(s->copyright, value, 512-1);
+ } else if (!strcmp(token, "TITLE") || !strcmp(token, "Title")) {
+ strncpy(s->title, value, 512-1);
+ }
+ }
+ av_free(strings);
+ }
+ if (url_feof(pb))
+ return -1;
+
+ PRINT(("NSV got infos; filepos %"PRId64"\n", url_ftell(pb)));
+
+ if (table_entries_used > 0) {
+ nsv->index_entries = table_entries_used;
+ if((unsigned)table_entries >= UINT_MAX / sizeof(uint32_t))
+ return -1;
+ nsv->nsvf_index_data = av_malloc(table_entries * sizeof(uint32_t));
+#warning "FIXME: Byteswap buffer as needed"
+ get_buffer(pb, (unsigned char *)nsv->nsvf_index_data, table_entries * sizeof(uint32_t));
+ }
+
+ PRINT(("NSV got index; filepos %"PRId64"\n", url_ftell(pb)));
+
+#ifdef DEBUG_DUMP_INDEX
+#define V(v) ((v<0x20 || v > 127)?'.':v)
+ /* dump index */
+ PRINT(("NSV %d INDEX ENTRIES:\n", table_entries));
+ PRINT(("NSV [dataoffset][fileoffset]\n", table_entries));
+ for (i = 0; i < table_entries; i++) {
+ unsigned char b[8];
+ url_fseek(pb, size + nsv->nsvf_index_data[i], SEEK_SET);
+ get_buffer(pb, b, 8);
+ PRINT(("NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x"
+ "%c%c%c%c%c%c%c%c\n",
+ nsv->nsvf_index_data[i], size + nsv->nsvf_index_data[i],
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+ V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) ));
+ }
+ //url_fseek(pb, size, SEEK_SET); /* go back to end of header */
+#undef V
+#endif
+
+ url_fseek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
+
+ if (url_feof(pb))
+ return -1;
+ nsv->state = NSV_HAS_READ_NSVF;
+ return 0;
+}
+
+static int nsv_parse_NSVs_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t vtag, atag;
+ uint16_t vwidth, vheight;
+ AVRational framerate;
+ int i;
+ AVStream *st;
+ NSVStream *nst;
+ PRINT(("%s()\n", __FUNCTION__));
+
+ vtag = get_le32(pb);
+ atag = get_le32(pb);
+ vwidth = get_le16(pb);
+ vheight = get_le16(pb);
+ i = get_byte(pb);
+
+ PRINT(("NSV NSVs framerate code %2x\n", i));
+ if(i&0x80) { /* odd way of giving native framerates from docs */
+ int t=(i & 0x7F)>>2;
+ if(t<16) framerate = (AVRational){1, t+1};
+ else framerate = (AVRational){t-15, 1};
+
+ if(i&1){
+ framerate.num *= 1000;
+ framerate.den *= 1001;
+ }
+
+ if((i&3)==3) framerate.num *= 24;
+ else if((i&3)==2) framerate.num *= 25;
+ else framerate.num *= 30;
+ }
+ else
+ framerate= (AVRational){i, 1};
+
+ nsv->avsync = get_le16(pb);
+#ifdef DEBUG
+ print_tag("NSV NSVs vtag", vtag, 0);
+ print_tag("NSV NSVs atag", atag, 0);
+ PRINT(("NSV NSVs vsize %dx%d\n", vwidth, vheight));
+#endif
+
+ /* XXX change to ap != NULL ? */
+ if (s->nb_streams == 0) { /* streams not yet published, let's do that */
+ nsv->vtag = vtag;
+ nsv->atag = atag;
+ nsv->vwidth = vwidth;
+ nsv->vheight = vwidth;
+ if (vtag != T_NONE) {
+ st = av_new_stream(s, NSV_ST_VIDEO);
+ if (!st)
+ goto fail;
+
+ nst = av_mallocz(sizeof(NSVStream));
+ if (!nst)
+ goto fail;
+ st->priv_data = nst;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_tag = vtag;
+ st->codec->codec_id = codec_get_id(nsv_codec_video_tags, vtag);
+ st->codec->width = vwidth;
+ st->codec->height = vheight;
+ st->codec->bits_per_sample = 24; /* depth XXX */
+
+ av_set_pts_info(st, 64, framerate.den, framerate.num);
+ st->start_time = 0;
+ st->duration = av_rescale(nsv->duration, framerate.num, 1000*framerate.den);
+ }
+ if (atag != T_NONE) {
+#ifndef DISABLE_AUDIO
+ st = av_new_stream(s, NSV_ST_AUDIO);
+ if (!st)
+ goto fail;
+
+ nst = av_mallocz(sizeof(NSVStream));
+ if (!nst)
+ goto fail;
+ st->priv_data = nst;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = atag;
+ st->codec->codec_id = codec_get_id(nsv_codec_audio_tags, atag);
+
+ st->need_parsing = 1; /* for PCM we will read a chunk later and put correct info */
+
+ /* set timebase to common denominator of ms and framerate */
+ av_set_pts_info(st, 64, 1, framerate.num*1000);
+ st->start_time = 0;
+ st->duration = (int64_t)nsv->duration * framerate.num;
+#endif
+ }
+#ifdef CHECK_SUBSEQUENT_NSVS
+ } else {
+ if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) {
+ PRINT(("NSV NSVs header values differ from the first one!!!\n"));
+ //return -1;
+ }
+#endif /* CHECK_SUBSEQUENT_NSVS */
+ }
+
+ nsv->state = NSV_HAS_READ_NSVS;
+ return 0;
+fail:
+ /* XXX */
+ nsv->state = NSV_UNSYNC;
+ return -1;
+}
+
+static int nsv_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NSVContext *nsv = s->priv_data;
+ int i, err;
+
+ PRINT(("%s()\n", __FUNCTION__));
+ PRINT(("filename '%s'\n", s->filename));
+
+ nsv->state = NSV_UNSYNC;
+ nsv->ahead[0].data = nsv->ahead[1].data = NULL;
+
+ for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) {
+ if (nsv_resync(s) < 0)
+ return -1;
+ if (nsv->state == NSV_FOUND_NSVF)
+ err = nsv_parse_NSVf_header(s, ap);
+ /* we need the first NSVs also... */
+ if (nsv->state == NSV_FOUND_NSVS) {
+ err = nsv_parse_NSVs_header(s, ap);
+ break; /* we just want the first one */
+ }
+ }
+ if (s->nb_streams < 1) /* no luck so far */
+ return -1;
+ /* now read the first chunk, so we can attempt to decode more info */
+ err = nsv_read_chunk(s, 1);
+
+ PRINT(("parsed header\n"));
+ return 0;
+}
+
+static int nsv_read_chunk(AVFormatContext *s, int fill_header)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st[2] = {NULL, NULL};
+ NSVStream *nst;
+ AVPacket *pkt;
+ int i, err = 0;
+ uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */
+ uint32_t vsize;
+ uint16_t asize;
+ uint16_t auxsize;
+ uint32_t auxtag;
+
+ PRINT(("%s(%d)\n", __FUNCTION__, fill_header));
+
+ if (nsv->ahead[0].data || nsv->ahead[1].data)
+ return 0; //-1; /* hey! eat what you've in your plate first! */
+
+null_chunk_retry:
+ if (url_feof(pb))
+ return -1;
+
+ for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++)
+ err = nsv_resync(s);
+ if (err < 0)
+ return err;
+ if (nsv->state == NSV_FOUND_NSVS)
+ err = nsv_parse_NSVs_header(s, NULL);
+ if (err < 0)
+ return err;
+ if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF)
+ return -1;
+
+ auxcount = get_byte(pb);
+ vsize = get_le16(pb);
+ asize = get_le16(pb);
+ vsize = (vsize << 4) | (auxcount >> 4);
+ auxcount &= 0x0f;
+ PRINT(("NSV CHUNK %d aux, %u bytes video, %d bytes audio\n", auxcount, vsize, asize));
+ /* skip aux stuff */
+ for (i = 0; i < auxcount; i++) {
+ auxsize = get_le16(pb);
+ auxtag = get_le32(pb);
+ PRINT(("NSV aux data: '%c%c%c%c', %d bytes\n",
+ (auxtag & 0x0ff),
+ ((auxtag >> 8) & 0x0ff),
+ ((auxtag >> 16) & 0x0ff),
+ ((auxtag >> 24) & 0x0ff),
+ auxsize));
+ url_fskip(pb, auxsize);
+ vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming braindead */
+ }
+
+ if (url_feof(pb))
+ return -1;
+ if (!vsize && !asize) {
+ nsv->state = NSV_UNSYNC;
+ goto null_chunk_retry;
+ }
+
+ /* map back streams to v,a */
+ if (s->streams[0])
+ st[s->streams[0]->id] = s->streams[0];
+ if (s->streams[1])
+ st[s->streams[1]->id] = s->streams[1];
+
+ if (vsize/* && st[NSV_ST_VIDEO]*/) {
+ nst = st[NSV_ST_VIDEO]->priv_data;
+ pkt = &nsv->ahead[NSV_ST_VIDEO];
+ av_get_packet(pb, pkt, vsize);
+ pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO;
+ pkt->dts = nst->frame_offset++;
+ pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
+/*
+ for (i = 0; i < MIN(8, vsize); i++)
+ PRINT(("NSV video: [%d] = %02x\n", i, pkt->data[i]));
+*/
+ }
+ if (asize/*st[NSV_ST_AUDIO]*/) {
+ nst = st[NSV_ST_AUDIO]->priv_data;
+ pkt = &nsv->ahead[NSV_ST_AUDIO];
+ /* read raw audio specific header on the first audio chunk... */
+ /* on ALL audio chunks ?? seems so! */
+ if (asize && st[NSV_ST_AUDIO]->codec->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) {
+ uint8_t bps;
+ uint8_t channels;
+ uint16_t samplerate;
+ bps = get_byte(pb);
+ channels = get_byte(pb);
+ samplerate = get_le16(pb);
+ asize-=4;
+ PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
+ if (fill_header) {
+ st[NSV_ST_AUDIO]->need_parsing = 0; /* we know everything */
+ if (bps != 16) {
+ PRINT(("NSV AUDIO bit/sample != 16 (%d)!!!\n", bps));
+ }
+ bps /= channels; // ???
+ if (bps == 8)
+ st[NSV_ST_AUDIO]->codec->codec_id = CODEC_ID_PCM_U8;
+ samplerate /= 4;/* UGH ??? XXX */
+ channels = 1;
+ st[NSV_ST_AUDIO]->codec->channels = channels;
+ st[NSV_ST_AUDIO]->codec->sample_rate = samplerate;
+ PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
+ }
+ }
+ av_get_packet(pb, pkt, asize);
+ pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO;
+ pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
+ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) {
+ /* on a nsvs frame we have new information on a/v sync */
+ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1);
+ pkt->dts *= (int64_t)1000 * st[NSV_ST_VIDEO]->time_base.num;
+ pkt->dts += (int64_t)nsv->avsync * st[NSV_ST_VIDEO]->time_base.den;
+ PRINT(("NSV AUDIO: sync:%d, dts:%"PRId64, nsv->avsync, pkt->dts));
+ }
+ nst->frame_offset++;
+ }
+
+ nsv->state = NSV_UNSYNC;
+ return 0;
+}
+
+
+static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NSVContext *nsv = s->priv_data;
+ int i, err = 0;
+
+ PRINT(("%s()\n", __FUNCTION__));
+
+ /* in case we don't already have something to eat ... */
+ if (nsv->ahead[0].data == NULL && nsv->ahead[1].data == NULL)
+ err = nsv_read_chunk(s, 0);
+ if (err < 0)
+ return err;
+
+ /* now pick one of the plates */
+ for (i = 0; i < 2; i++) {
+ if (nsv->ahead[i].data) {
+ PRINT(("%s: using cached packet[%d]\n", __FUNCTION__, i));
+ /* avoid the cost of new_packet + memcpy(->data) */
+ memcpy(pkt, &nsv->ahead[i], sizeof(AVPacket));
+ nsv->ahead[i].data = NULL; /* we ate that one */
+ return pkt->size;
+ }
+ }
+
+ /* this restaurant is not approvisionned :^] */
+ return -1;
+}
+
+static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+#if 0
+ NSVContext *avi = s->priv_data;
+ AVStream *st;
+ NSVStream *ast;
+ int frame_number, i;
+ int64_t pos;
+#endif
+
+ return -1;
+}
+
+static int nsv_read_close(AVFormatContext *s)
+{
+/* int i; */
+ NSVContext *nsv = s->priv_data;
+
+ if (nsv->index_entries)
+ av_free(nsv->nsvf_index_data);
+
+#if 0
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ NSVStream *ast = st->priv_data;
+ if(ast){
+ av_free(ast->index_entries);
+ av_free(ast);
+ }
+ av_free(st->codec->palctrl);
+ }
+
+#endif
+ return 0;
+}
+
+static int nsv_probe(AVProbeData *p)
+{
+ int i;
+// PRINT(("nsv_probe(), buf_size %d\n", p->buf_size));
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
+ p->buf[2] == 'V' && p->buf[3] == 'f')
+ return AVPROBE_SCORE_MAX;
+ /* streamed files might not have any header */
+ if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
+ p->buf[2] == 'V' && p->buf[3] == 's')
+ return AVPROBE_SCORE_MAX;
+ /* XXX: do streamed files always start at chunk boundary ?? */
+ /* or do we need to search NSVs in the byte stream ? */
+ /* seems the servers don't bother starting clean chunks... */
+ /* sometimes even the first header is at 9KB or something :^) */
+ for (i = 1; i < p->buf_size - 3; i++) {
+ if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' &&
+ p->buf[i+2] == 'V' && p->buf[i+3] == 's')
+ return AVPROBE_SCORE_MAX-20;
+ }
+ /* so we'll have more luck on extension... */
+ if (match_ext(p->filename, "nsv"))
+ return AVPROBE_SCORE_MAX-20;
+ /* FIXME: add mime-type check */
+ return 0;
+}
+
+AVInputFormat nsv_demuxer = {
+ "nsv",
+ "NullSoft Video format",
+ sizeof(NSVContext),
+ nsv_probe,
+ nsv_read_header,
+ nsv_read_packet,
+ nsv_read_close,
+ nsv_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/nut.c b/contrib/ffmpeg/libavformat/nut.c
new file mode 100644
index 000000000..df64caf15
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nut.c
@@ -0,0 +1,1457 @@
+/*
+ * "NUT" Container Format muxer and demuxer (DRAFT-200403??)
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * Visit the official site at http://www.nut.hu/
+ *
+ */
+
+/*
+ * TODO:
+ * - index writing
+ * - index packet reading support
+*/
+
+//#define DEBUG 1
+
+#include <limits.h>
+#include "avformat.h"
+#include "mpegaudio.h"
+#include "riff.h"
+#include "adler32.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+//#define TRACE
+
+//from /dev/random
+
+#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
+#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
+#define KEYFRAME_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
+#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
+#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
+
+#define ID_STRING "nut/multimedia container\0"
+
+#define MAX_DISTANCE (1024*16-1)
+#define MAX_SHORT_DISTANCE (1024*4-1)
+
+#define FLAG_DATA_SIZE 1
+#define FLAG_KEY_FRAME 2
+#define FLAG_INVALID 4
+
+typedef struct {
+ uint8_t flags;
+ uint8_t stream_id_plus1;
+ uint16_t size_mul;
+ uint16_t size_lsb;
+ int16_t timestamp_delta;
+ uint8_t reserved_count;
+} FrameCode;
+
+typedef struct {
+ int last_key_frame;
+ int msb_timestamp_shift;
+ int rate_num;
+ int rate_den;
+ int64_t last_pts;
+ int64_t last_sync_pos; ///<pos of last 1/2 type frame
+ int decode_delay;
+} StreamContext;
+
+typedef struct {
+ AVFormatContext *avf;
+ int written_packet_size;
+ int64_t packet_start[3]; //0-> startcode less, 1-> short startcode 2-> long startcodes
+ FrameCode frame_code[256];
+ unsigned int stream_count;
+ uint64_t next_startcode; ///< stores the next startcode if it has alraedy been parsed but the stream isnt seekable
+ StreamContext *stream;
+ int max_distance;
+ int max_short_distance;
+ int rate_num;
+ int rate_den;
+ int short_startcode;
+} NUTContext;
+
+static char *info_table[][2]={
+ {NULL , NULL }, // end
+ {NULL , NULL },
+ {NULL , "UTF8"},
+ {NULL , "v"},
+ {NULL , "s"},
+ {"StreamId" , "v"},
+ {"SegmentId" , "v"},
+ {"StartTimestamp" , "v"},
+ {"EndTimestamp" , "v"},
+ {"Author" , "UTF8"},
+ {"Title" , "UTF8"},
+ {"Description" , "UTF8"},
+ {"Copyright" , "UTF8"},
+ {"Encoder" , "UTF8"},
+ {"Keyword" , "UTF8"},
+ {"Cover" , "JPEG"},
+ {"Cover" , "PNG"},
+};
+
+static void update(NUTContext *nut, int stream_index, int64_t frame_start, int frame_type, int frame_code, int key_frame, int size, int64_t pts){
+ StreamContext *stream= &nut->stream[stream_index];
+
+ stream->last_key_frame= key_frame;
+ nut->packet_start[ frame_type ]= frame_start;
+ stream->last_pts= pts;
+}
+
+static void reset(AVFormatContext *s, int64_t global_ts){
+ NUTContext *nut = s->priv_data;
+ int i;
+
+ for(i=0; i<s->nb_streams; i++){
+ StreamContext *stream= &nut->stream[i];
+
+ stream->last_key_frame= 1;
+
+ stream->last_pts= av_rescale(global_ts, stream->rate_num*(int64_t)nut->rate_den, stream->rate_den*(int64_t)nut->rate_num);
+ }
+}
+
+static void build_frame_code(AVFormatContext *s){
+ NUTContext *nut = s->priv_data;
+ int key_frame, index, pred, stream_id;
+ int start=0;
+ int end= 255;
+ int keyframe_0_esc= s->nb_streams > 2;
+ int pred_table[10];
+
+ if(keyframe_0_esc){
+ /* keyframe = 0 escape */
+ FrameCode *ft= &nut->frame_code[start];
+ ft->flags= FLAG_DATA_SIZE;
+ ft->stream_id_plus1= 0;
+ ft->size_mul=1;
+ ft->timestamp_delta=0;
+ start++;
+ }
+
+ for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
+ int start2= start + (end-start)*stream_id / s->nb_streams;
+ int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
+ AVCodecContext *codec = s->streams[stream_id]->codec;
+ int is_audio= codec->codec_type == CODEC_TYPE_AUDIO;
+ int intra_only= /*codec->intra_only || */is_audio;
+ int pred_count;
+
+ for(key_frame=0; key_frame<2; key_frame++){
+ if(intra_only && keyframe_0_esc && key_frame==0)
+ continue;
+
+ {
+ FrameCode *ft= &nut->frame_code[start2];
+ ft->flags= FLAG_KEY_FRAME*key_frame;
+ ft->flags|= FLAG_DATA_SIZE;
+ ft->stream_id_plus1= stream_id + 1;
+ ft->size_mul=1;
+ ft->timestamp_delta=0;
+ start2++;
+ }
+ }
+
+ key_frame= intra_only;
+#if 1
+ if(is_audio){
+ int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
+ int pts;
+ for(pts=0; pts<2; pts++){
+ for(pred=0; pred<2; pred++){
+ FrameCode *ft= &nut->frame_code[start2];
+ ft->flags= FLAG_KEY_FRAME*key_frame;
+ ft->stream_id_plus1= stream_id + 1;
+ ft->size_mul=frame_bytes + 2;
+ ft->size_lsb=frame_bytes + pred;
+ ft->timestamp_delta=pts;
+ start2++;
+ }
+ }
+ }else{
+ FrameCode *ft= &nut->frame_code[start2];
+ ft->flags= FLAG_KEY_FRAME | FLAG_DATA_SIZE;
+ ft->stream_id_plus1= stream_id + 1;
+ ft->size_mul=1;
+ ft->timestamp_delta=1;
+ start2++;
+ }
+#endif
+
+ if(codec->has_b_frames){
+ pred_count=5;
+ pred_table[0]=-2;
+ pred_table[1]=-1;
+ pred_table[2]=1;
+ pred_table[3]=3;
+ pred_table[4]=4;
+ }else if(codec->codec_id == CODEC_ID_VORBIS){
+ pred_count=3;
+ pred_table[0]=2;
+ pred_table[1]=9;
+ pred_table[2]=16;
+ }else{
+ pred_count=1;
+ pred_table[0]=1;
+ }
+
+ for(pred=0; pred<pred_count; pred++){
+ int start3= start2 + (end2-start2)*pred / pred_count;
+ int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
+
+ for(index=start3; index<end3; index++){
+ FrameCode *ft= &nut->frame_code[index];
+ ft->flags= FLAG_KEY_FRAME*key_frame;
+ ft->flags|= FLAG_DATA_SIZE;
+ ft->stream_id_plus1= stream_id + 1;
+//FIXME use single byte size and pred from last
+ ft->size_mul= end3-start3;
+ ft->size_lsb= index - start3;
+ ft->timestamp_delta= pred_table[pred];
+ }
+ }
+ }
+ memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
+ nut->frame_code['N'].flags= FLAG_INVALID;
+}
+
+static uint64_t get_v(ByteIOContext *bc)
+{
+ uint64_t val = 0;
+
+ for(;;)
+ {
+ int tmp = get_byte(bc);
+
+ if (tmp&0x80)
+ val= (val<<7) + tmp - 0x80;
+ else{
+//av_log(NULL, AV_LOG_DEBUG, "get_v()= %"PRId64"\n", (val<<7) + tmp);
+ return (val<<7) + tmp;
+ }
+ }
+ return -1;
+}
+
+static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
+ unsigned int len= get_v(bc);
+
+ if(len && maxlen)
+ get_buffer(bc, string, FFMIN(len, maxlen));
+ while(len > maxlen){
+ get_byte(bc);
+ len--;
+ }
+
+ if(maxlen)
+ string[FFMIN(len, maxlen-1)]= 0;
+
+ if(maxlen == len)
+ return -1;
+ else
+ return 0;
+}
+
+static int64_t get_s(ByteIOContext *bc){
+ int64_t v = get_v(bc) + 1;
+
+ if (v&1) return -(v>>1);
+ else return (v>>1);
+}
+
+static uint64_t get_vb(ByteIOContext *bc){
+ uint64_t val=0;
+ unsigned int i= get_v(bc);
+
+ if(i>8)
+ return UINT64_MAX;
+
+ while(i--)
+ val = (val<<8) + get_byte(bc);
+
+//av_log(NULL, AV_LOG_DEBUG, "get_vb()= %"PRId64"\n", val);
+ return val;
+}
+
+#ifdef TRACE
+static inline uint64_t get_v_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_v(bc);
+
+ printf("get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline int64_t get_s_trace(ByteIOContext *bc, char *file, char *func, int line){
+ int64_t v= get_s(bc);
+
+ printf("get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline uint64_t get_vb_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_vb(bc);
+
+ printf("get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+#define get_v(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#endif
+
+
+static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_checksum)
+{
+ int64_t start, size;
+ start= url_ftell(bc) - 8;
+
+ size= get_v(bc);
+
+ init_checksum(bc, calculate_checksum ? av_adler32_update : NULL, 1);
+
+ nut->packet_start[2] = start;
+ nut->written_packet_size= size;
+
+ return size;
+}
+
+static int check_checksum(ByteIOContext *bc){
+ unsigned long checksum= get_checksum(bc);
+ return checksum != get_be32(bc);
+}
+
+/**
+ *
+ */
+static int get_length(uint64_t val){
+ int i;
+
+ for (i=7; val>>i; i+=7);
+
+ return i;
+}
+
+static uint64_t find_any_startcode(ByteIOContext *bc, int64_t pos){
+ uint64_t state=0;
+
+ if(pos >= 0)
+ url_fseek(bc, pos, SEEK_SET); //note, this may fail if the stream isnt seekable, but that shouldnt matter, as in this case we simply start where we are currently
+
+ while(!url_feof(bc)){
+ state= (state<<8) | get_byte(bc);
+ if((state>>56) != 'N')
+ continue;
+ switch(state){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case KEYFRAME_STARTCODE:
+ case INFO_STARTCODE:
+ case INDEX_STARTCODE:
+ return state;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * find the given startcode.
+ * @param code the startcode
+ * @param pos the start position of the search, or -1 if the current position
+ * @returns the position of the startcode or -1 if not found
+ */
+static int64_t find_startcode(ByteIOContext *bc, uint64_t code, int64_t pos){
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ if(startcode == code)
+ return url_ftell(bc) - 8;
+ else if(startcode == 0)
+ return -1;
+ pos=-1;
+ }
+}
+
+static int64_t lsb2full(StreamContext *stream, int64_t lsb){
+ int64_t mask = (1<<stream->msb_timestamp_shift)-1;
+ int64_t delta= stream->last_pts - mask/2;
+ return ((lsb - delta)&mask) + delta;
+}
+
+#ifdef CONFIG_MUXERS
+
+static void put_v(ByteIOContext *bc, uint64_t val)
+{
+ int i;
+
+//av_log(NULL, AV_LOG_DEBUG, "put_v()= %"PRId64"\n", val);
+ val &= 0x7FFFFFFFFFFFFFFFULL; // FIXME can only encode upto 63 bits currently
+ i= get_length(val);
+
+ for (i-=7; i>0; i-=7){
+ put_byte(bc, 0x80 | (val>>i));
+ }
+
+ put_byte(bc, val&0x7f);
+}
+
+/**
+ * stores a string as vb.
+ */
+static void put_str(ByteIOContext *bc, const char *string){
+ int len= strlen(string);
+
+ put_v(bc, len);
+ put_buffer(bc, string, len);
+}
+
+static void put_s(ByteIOContext *bc, int64_t val){
+ if (val<=0) put_v(bc, -2*val );
+ else put_v(bc, 2*val-1);
+}
+
+static void put_vb(ByteIOContext *bc, uint64_t val){
+ int i;
+
+ for (i=8; val>>i; i+=8);
+
+ put_v(bc, i>>3);
+ for(i-=8; i>=0; i-=8)
+ put_byte(bc, (val>>i)&0xFF);
+}
+
+#ifdef TRACE
+static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
+ printf("get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+
+ put_v(bc, v);
+}
+
+static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
+ printf("get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+
+ put_s(bc, v);
+}
+
+static inline void put_vb_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
+ printf("get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+
+ put_vb(bc, v);
+}
+#define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define put_vb(bc, v) put_vb_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#endif
+
+static int put_packetheader(NUTContext *nut, ByteIOContext *bc, int max_size, int calculate_checksum)
+{
+ put_flush_packet(bc);
+ nut->packet_start[2]= url_ftell(bc) - 8;
+ nut->written_packet_size = max_size;
+
+ /* packet header */
+ put_v(bc, nut->written_packet_size); /* forward ptr */
+
+ if(calculate_checksum)
+ init_checksum(bc, av_adler32_update, 1);
+
+ return 0;
+}
+
+/**
+ *
+ * must not be called more then once per packet
+ */
+static int update_packetheader(NUTContext *nut, ByteIOContext *bc, int additional_size, int calculate_checksum){
+ int64_t start= nut->packet_start[2];
+ int64_t cur= url_ftell(bc);
+ int size= cur - start - get_length(nut->written_packet_size)/7 - 8;
+
+ if(calculate_checksum)
+ size += 4;
+
+ if(size != nut->written_packet_size){
+ int i;
+
+ assert( size <= nut->written_packet_size );
+
+ url_fseek(bc, start + 8, SEEK_SET);
+ for(i=get_length(size); i < get_length(nut->written_packet_size); i+=7)
+ put_byte(bc, 0x80);
+ put_v(bc, size);
+
+ url_fseek(bc, cur, SEEK_SET);
+ nut->written_packet_size= size; //FIXME may fail if multiple updates with differing sizes, as get_length may differ
+
+ if(calculate_checksum)
+ put_be32(bc, get_checksum(bc));
+ }
+
+ return 0;
+}
+
+static int nut_write_header(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ AVCodecContext *codec;
+ int i, j, tmp_time, tmp_flags,tmp_stream, tmp_mul, tmp_size, tmp_fields;
+
+ if (strcmp(s->filename, "./data/b-libav.nut")) {
+ av_log(s, AV_LOG_ERROR, " libavformat NUT is non-compliant and disabled\n");
+ return -1;
+ }
+
+ nut->avf= s;
+
+ nut->stream =
+ av_mallocz(sizeof(StreamContext)*s->nb_streams);
+
+
+ put_buffer(bc, ID_STRING, strlen(ID_STRING));
+ put_byte(bc, 0);
+ nut->packet_start[2]= url_ftell(bc);
+
+ /* main header */
+ put_be64(bc, MAIN_STARTCODE);
+ put_packetheader(nut, bc, 120+5*256, 1);
+ put_v(bc, 2); /* version */
+ put_v(bc, s->nb_streams);
+ put_v(bc, MAX_DISTANCE);
+ put_v(bc, MAX_SHORT_DISTANCE);
+
+ put_v(bc, nut->rate_num=1);
+ put_v(bc, nut->rate_den=2);
+ put_v(bc, nut->short_startcode=0x4EFE79);
+
+ build_frame_code(s);
+ assert(nut->frame_code['N'].flags == FLAG_INVALID);
+
+ tmp_time= tmp_flags= tmp_stream= tmp_mul= tmp_size= /*tmp_res=*/ INT_MAX;
+ for(i=0; i<256;){
+ tmp_fields=0;
+ tmp_size= 0;
+ if(tmp_time != nut->frame_code[i].timestamp_delta) tmp_fields=1;
+ if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
+ if(tmp_stream != nut->frame_code[i].stream_id_plus1) tmp_fields=3;
+ if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
+// if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
+
+ tmp_time = nut->frame_code[i].timestamp_delta;
+ tmp_flags = nut->frame_code[i].flags;
+ tmp_stream= nut->frame_code[i].stream_id_plus1;
+ tmp_mul = nut->frame_code[i].size_mul;
+ tmp_size = nut->frame_code[i].size_lsb;
+// tmp_res = nut->frame_code[i].res;
+
+ for(j=0; i<256; j++,i++){
+ if(nut->frame_code[i].timestamp_delta != tmp_time ) break;
+ if(nut->frame_code[i].flags != tmp_flags ) break;
+ if(nut->frame_code[i].stream_id_plus1 != tmp_stream) break;
+ if(nut->frame_code[i].size_mul != tmp_mul ) break;
+ if(nut->frame_code[i].size_lsb != tmp_size+j) break;
+// if(nut->frame_code[i].res != tmp_res ) break;
+ }
+ if(j != tmp_mul - tmp_size) tmp_fields=6;
+
+ put_v(bc, tmp_flags);
+ put_v(bc, tmp_fields);
+ if(tmp_fields>0) put_s(bc, tmp_time);
+ if(tmp_fields>1) put_v(bc, tmp_mul);
+ if(tmp_fields>2) put_v(bc, tmp_stream);
+ if(tmp_fields>3) put_v(bc, tmp_size);
+ if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/);
+ if(tmp_fields>5) put_v(bc, j);
+ }
+
+ update_packetheader(nut, bc, 0, 1);
+
+ /* stream headers */
+ for (i = 0; i < s->nb_streams; i++)
+ {
+ int nom, denom, ssize;
+
+ codec = s->streams[i]->codec;
+
+ put_be64(bc, STREAM_STARTCODE);
+ put_packetheader(nut, bc, 120 + codec->extradata_size, 1);
+ put_v(bc, i /*s->streams[i]->index*/);
+ switch(codec->codec_type){
+ case CODEC_TYPE_VIDEO: put_v(bc, 0); break;
+ case CODEC_TYPE_AUDIO: put_v(bc, 1); break;
+// case CODEC_TYPE_TEXT : put_v(bc, 2); break;
+ case CODEC_TYPE_DATA : put_v(bc, 3); break;
+ default: return -1;
+ }
+ if (codec->codec_tag)
+ put_vb(bc, codec->codec_tag);
+ else if (codec->codec_type == CODEC_TYPE_VIDEO)
+ {
+ put_vb(bc, codec_get_bmp_tag(codec->codec_id));
+ }
+ else if (codec->codec_type == CODEC_TYPE_AUDIO)
+ {
+ put_vb(bc, codec_get_wav_tag(codec->codec_id));
+ }
+ else
+ put_vb(bc, 0);
+
+ ff_parse_specific_params(codec, &nom, &ssize, &denom);
+
+ nut->stream[i].rate_num= nom;
+ nut->stream[i].rate_den= denom;
+ av_set_pts_info(s->streams[i], 60, denom, nom);
+
+ put_v(bc, codec->bit_rate);
+ put_vb(bc, 0); /* no language code */
+ put_v(bc, nom);
+ put_v(bc, denom);
+ if(nom / denom < 1000)
+ nut->stream[i].msb_timestamp_shift = 7;
+ else
+ nut->stream[i].msb_timestamp_shift = 14;
+ put_v(bc, nut->stream[i].msb_timestamp_shift);
+ put_v(bc, codec->has_b_frames);
+ put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
+
+ if(codec->extradata_size){
+ put_v(bc, 1);
+ put_v(bc, codec->extradata_size);
+ put_buffer(bc, codec->extradata, codec->extradata_size);
+ }
+ put_v(bc, 0); /* end of codec specific headers */
+
+ switch(codec->codec_type)
+ {
+ case CODEC_TYPE_AUDIO:
+ put_v(bc, codec->sample_rate);
+ put_v(bc, 1);
+ put_v(bc, codec->channels);
+ break;
+ case CODEC_TYPE_VIDEO:
+ put_v(bc, codec->width);
+ put_v(bc, codec->height);
+ put_v(bc, codec->sample_aspect_ratio.num);
+ put_v(bc, codec->sample_aspect_ratio.den);
+ put_v(bc, 0); /* csp type -- unknown */
+ break;
+ default:
+ break;
+ }
+ update_packetheader(nut, bc, 0, 1);
+ }
+
+ /* info header */
+ put_be64(bc, INFO_STARTCODE);
+ put_packetheader(nut, bc, 30+strlen(s->author)+strlen(s->title)+
+ strlen(s->comment)+strlen(s->copyright)+strlen(LIBAVFORMAT_IDENT), 1);
+ if (s->author[0])
+ {
+ put_v(bc, 9); /* type */
+ put_str(bc, s->author);
+ }
+ if (s->title[0])
+ {
+ put_v(bc, 10); /* type */
+ put_str(bc, s->title);
+ }
+ if (s->comment[0])
+ {
+ put_v(bc, 11); /* type */
+ put_str(bc, s->comment);
+ }
+ if (s->copyright[0])
+ {
+ put_v(bc, 12); /* type */
+ put_str(bc, s->copyright);
+ }
+ /* encoder */
+ if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)){
+ put_v(bc, 13); /* type */
+ put_str(bc, LIBAVFORMAT_IDENT);
+ }
+
+ put_v(bc, 0); /* eof info */
+ update_packetheader(nut, bc, 0, 1);
+
+ put_flush_packet(bc);
+
+ return 0;
+}
+
+static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NUTContext *nut = s->priv_data;
+ StreamContext *stream= &nut->stream[pkt->stream_index];
+ ByteIOContext *bc = &s->pb;
+ int key_frame = 0, full_pts=0;
+ AVCodecContext *enc;
+ int64_t coded_pts;
+ int frame_type, best_length, frame_code, flags, i, size_mul, size_lsb, time_delta;
+ const int64_t frame_start= url_ftell(bc);
+ int64_t pts= pkt->pts;
+ int size= pkt->size;
+ int stream_index= pkt->stream_index;
+
+ enc = s->streams[stream_index]->codec;
+ key_frame = !!(pkt->flags & PKT_FLAG_KEY);
+
+ frame_type=0;
+ if(frame_start + size + 20 - FFMAX(nut->packet_start[1], nut->packet_start[2]) > MAX_DISTANCE)
+ frame_type=2;
+ if(key_frame && !stream->last_key_frame)
+ frame_type=2;
+
+ if(frame_type>1){
+ int64_t global_ts= av_rescale(pts, stream->rate_den*(int64_t)nut->rate_num, stream->rate_num*(int64_t)nut->rate_den);
+ reset(s, global_ts);
+ put_be64(bc, KEYFRAME_STARTCODE);
+ put_v(bc, global_ts);
+ }
+ assert(stream->last_pts != AV_NOPTS_VALUE);
+ coded_pts = pts & ((1<<stream->msb_timestamp_shift)-1);
+ if(lsb2full(stream, coded_pts) != pts)
+ full_pts=1;
+
+ if(full_pts)
+ coded_pts= pts + (1<<stream->msb_timestamp_shift);
+
+ best_length=INT_MAX;
+ frame_code= -1;
+ for(i=0; i<256; i++){
+ int stream_id_plus1= nut->frame_code[i].stream_id_plus1;
+ int fc_key_frame;
+ int length=0;
+ size_mul= nut->frame_code[i].size_mul;
+ size_lsb= nut->frame_code[i].size_lsb;
+ time_delta= nut->frame_code[i].timestamp_delta;
+ flags= nut->frame_code[i].flags;
+
+ assert(size_mul > size_lsb);
+
+ if(stream_id_plus1 == 0) length+= get_length(stream_index);
+ else if(stream_id_plus1 - 1 != stream_index)
+ continue;
+ fc_key_frame= !!(flags & FLAG_KEY_FRAME);
+
+ assert(key_frame==0 || key_frame==1);
+ if(fc_key_frame != key_frame)
+ continue;
+
+ if(flags & FLAG_DATA_SIZE){
+ if(size % size_mul != size_lsb)
+ continue;
+ length += get_length(size / size_mul);
+ }else if(size != size_lsb)
+ continue;
+
+ if(full_pts && time_delta)
+ continue;
+
+ if(!time_delta){
+ length += get_length(coded_pts);
+ }else{
+ if(time_delta != pts - stream->last_pts)
+ continue;
+ }
+
+ if(length < best_length){
+ best_length= length;
+ frame_code=i;
+ }
+// av_log(s, AV_LOG_DEBUG, "%d %d %d %d %d %d %d %d %d %d\n", key_frame, frame_type, full_pts, size, stream_index, flags, size_mul, size_lsb, stream_id_plus1, length);
+ }
+
+ assert(frame_code != -1);
+ flags= nut->frame_code[frame_code].flags;
+ size_mul= nut->frame_code[frame_code].size_mul;
+ size_lsb= nut->frame_code[frame_code].size_lsb;
+ time_delta= nut->frame_code[frame_code].timestamp_delta;
+#ifdef TRACE
+ best_length /= 7;
+ best_length ++; //frame_code
+ if(frame_type==2){
+ best_length += 8; // startcode
+ }
+ av_log(s, AV_LOG_DEBUG, "kf:%d ft:%d pt:%d fc:%2X len:%2d size:%d stream:%d flag:%d mul:%d lsb:%d s+1:%d pts_delta:%d pts:%"PRId64" fs:%"PRId64"\n", key_frame, frame_type, full_pts ? 1 : 0, frame_code, best_length, size, stream_index, flags, size_mul, size_lsb, nut->frame_code[frame_code].stream_id_plus1,(int)(pts - stream->last_pts), pts, frame_start);
+// av_log(s, AV_LOG_DEBUG, "%d %d %d\n", stream->lru_pts_delta[0], stream->lru_pts_delta[1], stream->lru_pts_delta[2]);
+#endif
+
+ assert(frame_type != 1); //short startcode not implemented yet
+ put_byte(bc, frame_code);
+
+ if(nut->frame_code[frame_code].stream_id_plus1 == 0)
+ put_v(bc, stream_index);
+ if (!time_delta){
+ put_v(bc, coded_pts);
+ }
+ if(flags & FLAG_DATA_SIZE)
+ put_v(bc, size / size_mul);
+ else
+ assert(size == size_lsb);
+ if(size > MAX_DISTANCE){
+ assert(frame_type > 1);
+ }
+
+ put_buffer(bc, pkt->data, size);
+
+ update(nut, stream_index, frame_start, frame_type, frame_code, key_frame, size, pts);
+
+ return 0;
+}
+
+static int nut_write_trailer(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+
+#if 0
+ int i;
+
+ /* WRITE INDEX */
+
+ for (i = 0; s->nb_streams; i++)
+ {
+ put_be64(bc, INDEX_STARTCODE);
+ put_packetheader(nut, bc, 64, 1);
+ put_v(bc, s->streams[i]->id);
+ put_v(bc, ...);
+ update_packetheader(nut, bc, 0, 1);
+ }
+#endif
+
+ put_flush_packet(bc);
+
+ av_freep(&nut->stream);
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int nut_probe(AVProbeData *p)
+{
+ int i;
+ uint64_t code= 0xff;
+
+ for (i = 0; i < p->buf_size; i++) {
+ code = (code << 8) | p->buf[i];
+ if (code == MAIN_STARTCODE)
+ return AVPROBE_SCORE_MAX;
+ }
+ return 0;
+}
+
+static int decode_main_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp;
+ int i, j, tmp_stream, tmp_mul, tmp_time, tmp_size, count, tmp_res;
+
+ get_packetheader(nut, bc, 1);
+
+ tmp = get_v(bc);
+ if (tmp != 2){
+ av_log(s, AV_LOG_ERROR, "bad version (%"PRId64")\n", tmp);
+ return -1;
+ }
+
+ nut->stream_count = get_v(bc);
+ if(nut->stream_count > MAX_STREAMS){
+ av_log(s, AV_LOG_ERROR, "too many streams\n");
+ return -1;
+ }
+ nut->max_distance = get_v(bc);
+ nut->max_short_distance = get_v(bc);
+ nut->rate_num= get_v(bc);
+ nut->rate_den= get_v(bc);
+ nut->short_startcode= get_v(bc);
+ if(nut->short_startcode>>16 != 'N'){
+ av_log(s, AV_LOG_ERROR, "invalid short startcode %X\n", nut->short_startcode);
+ return -1;
+ }
+
+ for(i=0; i<256;){
+ int tmp_flags = get_v(bc);
+ int tmp_fields= get_v(bc);
+ if(tmp_fields>0) tmp_time = get_s(bc);
+ if(tmp_fields>1) tmp_mul = get_v(bc);
+ if(tmp_fields>2) tmp_stream= get_v(bc);
+ if(tmp_fields>3) tmp_size = get_v(bc);
+ else tmp_size = 0;
+ if(tmp_fields>4) tmp_res = get_v(bc);
+ else tmp_res = 0;
+ if(tmp_fields>5) count = get_v(bc);
+ else count = tmp_mul - tmp_size;
+
+ while(tmp_fields-- > 6)
+ get_v(bc);
+
+ if(count == 0 || i+count > 256){
+ av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
+ return -1;
+ }
+ if(tmp_stream > nut->stream_count + 1){
+ av_log(s, AV_LOG_ERROR, "illegal stream number\n");
+ return -1;
+ }
+
+ for(j=0; j<count; j++,i++){
+ nut->frame_code[i].flags = tmp_flags ;
+ nut->frame_code[i].timestamp_delta = tmp_time ;
+ nut->frame_code[i].stream_id_plus1 = tmp_stream;
+ nut->frame_code[i].size_mul = tmp_mul ;
+ nut->frame_code[i].size_lsb = tmp_size+j;
+ nut->frame_code[i].reserved_count = tmp_res ;
+ }
+ }
+ if(nut->frame_code['N'].flags != FLAG_INVALID){
+ av_log(s, AV_LOG_ERROR, "illegal frame_code table\n");
+ return -1;
+ }
+
+ if(check_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Main header checksum mismatch\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int decode_stream_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int class, nom, denom, stream_id;
+ uint64_t tmp;
+ AVStream *st;
+
+ get_packetheader(nut, bc, 1);
+ stream_id= get_v(bc);
+ if(stream_id >= nut->stream_count || s->streams[stream_id])
+ return -1;
+
+ st = av_new_stream(s, stream_id);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ class = get_v(bc);
+ tmp = get_vb(bc);
+ st->codec->codec_tag= tmp;
+ switch(class)
+ {
+ case 0:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = codec_get_bmp_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 1:
+ case 32: //compatibility
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = codec_get_wav_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 2:
+// st->codec->codec_type = CODEC_TYPE_TEXT;
+// break;
+ case 3:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "Unknown stream class (%d)\n", class);
+ return -1;
+ }
+ s->bit_rate += get_v(bc);
+ get_vb(bc); /* language code */
+ nom = get_v(bc);
+ denom = get_v(bc);
+ nut->stream[stream_id].msb_timestamp_shift = get_v(bc);
+ st->codec->has_b_frames=
+ nut->stream[stream_id].decode_delay= get_v(bc);
+ get_byte(bc); /* flags */
+
+ /* codec specific data headers */
+ while(get_v(bc) != 0){
+ st->codec->extradata_size= get_v(bc);
+ if((unsigned)st->codec->extradata_size > (1<<30))
+ return -1;
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
+// url_fskip(bc, get_v(bc));
+ }
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) /* VIDEO */
+ {
+ st->codec->width = get_v(bc);
+ st->codec->height = get_v(bc);
+ st->codec->sample_aspect_ratio.num= get_v(bc);
+ st->codec->sample_aspect_ratio.den= get_v(bc);
+ get_v(bc); /* csp type */
+ }
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) /* AUDIO */
+ {
+ st->codec->sample_rate = get_v(bc);
+ get_v(bc); // samplerate_den
+ st->codec->channels = get_v(bc);
+ }
+ if(check_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Stream header %d checksum mismatch\n", stream_id);
+ return -1;
+ }
+ av_set_pts_info(s->streams[stream_id], 60, denom, nom);
+ nut->stream[stream_id].rate_num= nom;
+ nut->stream[stream_id].rate_den= denom;
+ return 0;
+}
+
+static int decode_info_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+
+ get_packetheader(nut, bc, 1);
+
+ for(;;){
+ int id= get_v(bc);
+ char *name, *type, custom_name[256], custom_type[256];
+
+ if(!id)
+ break;
+ else if(id >= sizeof(info_table)/sizeof(info_table[0])){
+ av_log(s, AV_LOG_ERROR, "info id is too large %d %zd\n", id, sizeof(info_table)/sizeof(info_table[0]));
+ return -1;
+ }
+
+ type= info_table[id][1];
+ name= info_table[id][0];
+//av_log(s, AV_LOG_DEBUG, "%d %s %s\n", id, type, name);
+
+ if(!type){
+ get_str(bc, custom_type, sizeof(custom_type));
+ type= custom_type;
+ }
+ if(!name){
+ get_str(bc, custom_name, sizeof(custom_name));
+ name= custom_name;
+ }
+
+ if(!strcmp(type, "v")){
+ get_v(bc);
+ }else{
+ if(!strcmp(name, "Author"))
+ get_str(bc, s->author, sizeof(s->author));
+ else if(!strcmp(name, "Title"))
+ get_str(bc, s->title, sizeof(s->title));
+ else if(!strcmp(name, "Copyright"))
+ get_str(bc, s->copyright, sizeof(s->copyright));
+ else if(!strcmp(name, "Description"))
+ get_str(bc, s->comment, sizeof(s->comment));
+ else
+ get_str(bc, NULL, 0);
+ }
+ }
+ if(check_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Info header checksum mismatch\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos;
+ int inited_stream_count;
+
+ nut->avf= s;
+
+ /* main header */
+ pos=0;
+ for(;;){
+ pos= find_startcode(bc, MAIN_STARTCODE, pos)+1;
+ if (pos<0){
+ av_log(s, AV_LOG_ERROR, "no main startcode found\n");
+ return -1;
+ }
+ if(decode_main_header(nut) >= 0)
+ break;
+ }
+
+
+ s->bit_rate = 0;
+
+ nut->stream = av_malloc(sizeof(StreamContext)*nut->stream_count);
+
+ /* stream headers */
+ pos=0;
+ for(inited_stream_count=0; inited_stream_count < nut->stream_count;){
+ pos= find_startcode(bc, STREAM_STARTCODE, pos)+1;
+ if (pos<0+1){
+ av_log(s, AV_LOG_ERROR, "not all stream headers found\n");
+ return -1;
+ }
+ if(decode_stream_header(nut) >= 0)
+ inited_stream_count++;
+ }
+
+ /* info headers */
+ pos=0;
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ pos= url_ftell(bc);
+
+ if(startcode==0){
+ av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
+ return -1;
+ }else if(startcode == KEYFRAME_STARTCODE){
+ nut->next_startcode= startcode;
+ break;
+ }else if(startcode != INFO_STARTCODE){
+ continue;
+ }
+
+ decode_info_header(nut);
+ }
+
+ return 0;
+}
+
+static int decode_frame_header(NUTContext *nut, int *key_frame_ret, int64_t *pts_ret, int *stream_id_ret, int frame_code, int frame_type, int64_t frame_start){
+ AVFormatContext *s= nut->avf;
+ StreamContext *stream;
+ ByteIOContext *bc = &s->pb;
+ int size, flags, size_mul, size_lsb, stream_id, time_delta;
+ int64_t pts = 0;
+
+ if(frame_type < 2 && frame_start - nut->packet_start[2] > nut->max_distance){
+ av_log(s, AV_LOG_ERROR, "last frame must have been damaged\n");
+ return -1;
+ }
+
+ if(frame_type)
+ nut->packet_start[ frame_type ]= frame_start; //otherwise 1 goto 1 may happen
+
+ flags= nut->frame_code[frame_code].flags;
+ size_mul= nut->frame_code[frame_code].size_mul;
+ size_lsb= nut->frame_code[frame_code].size_lsb;
+ stream_id= nut->frame_code[frame_code].stream_id_plus1 - 1;
+ time_delta= nut->frame_code[frame_code].timestamp_delta;
+
+ if(stream_id==-1)
+ stream_id= get_v(bc);
+ if(stream_id >= s->nb_streams){
+ av_log(s, AV_LOG_ERROR, "illegal stream_id\n");
+ return -1;
+ }
+ stream= &nut->stream[stream_id];
+
+// av_log(s, AV_LOG_DEBUG, "ft:%d ppts:%d %d %d\n", frame_type, stream->lru_pts_delta[0], stream->lru_pts_delta[1], stream->lru_pts_delta[2]);
+
+ *key_frame_ret= !!(flags & FLAG_KEY_FRAME);
+
+ if(!time_delta){
+ int64_t mask = (1<<stream->msb_timestamp_shift)-1;
+ pts= get_v(bc);
+ if(pts > mask){
+ pts -= mask+1;
+ }else{
+ if(stream->last_pts == AV_NOPTS_VALUE){
+ av_log(s, AV_LOG_ERROR, "no reference pts available\n");
+ return -1;
+ }
+ pts= lsb2full(stream, pts);
+ }
+ }else{
+ if(stream->last_pts == AV_NOPTS_VALUE){
+ av_log(s, AV_LOG_ERROR, "no reference pts available\n");
+ return -1;
+ }
+ pts= stream->last_pts + time_delta;
+ }
+
+ if(*key_frame_ret){
+// av_log(s, AV_LOG_DEBUG, "stream:%d start:%"PRId64" pts:%"PRId64" length:%"PRId64"\n",stream_id, frame_start, av_pts, frame_start - nut->stream[stream_id].last_sync_pos);
+ av_add_index_entry(
+ s->streams[stream_id],
+ frame_start,
+ pts,
+ 0,
+ frame_start - nut->stream[stream_id].last_sync_pos,
+ AVINDEX_KEYFRAME);
+ nut->stream[stream_id].last_sync_pos= frame_start;
+// assert(nut->packet_start == frame_start);
+ }
+
+ assert(size_mul > size_lsb);
+ size= size_lsb;
+ if(flags & FLAG_DATA_SIZE)
+ size+= size_mul*get_v(bc);
+
+#ifdef TRACE
+av_log(s, AV_LOG_DEBUG, "fs:%"PRId64" fc:%d ft:%d kf:%d pts:%"PRId64" size:%d mul:%d lsb:%d flags:%d delta:%d\n", frame_start, frame_code, frame_type, *key_frame_ret, pts, size, size_mul, size_lsb, flags, time_delta);
+#endif
+
+ if(frame_type==0 && url_ftell(bc) - nut->packet_start[2] + size > nut->max_distance){
+ av_log(s, AV_LOG_ERROR, "frame size too large\n");
+ return -1;
+ }
+
+ *stream_id_ret = stream_id;
+ *pts_ret = pts;
+
+ update(nut, stream_id, frame_start, frame_type, frame_code, *key_frame_ret, size, pts);
+
+ return size;
+}
+
+static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code, int frame_type, int64_t frame_start){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int size, stream_id, key_frame, discard;
+ int64_t pts, last_IP_pts;
+
+ size= decode_frame_header(nut, &key_frame, &pts, &stream_id, frame_code, frame_type, frame_start);
+ if(size < 0)
+ return -1;
+
+ discard= s->streams[ stream_id ]->discard;
+ last_IP_pts= s->streams[ stream_id ]->last_IP_pts;
+ if( (discard >= AVDISCARD_NONKEY && !key_frame)
+ ||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts)
+ || discard >= AVDISCARD_ALL){
+ url_fskip(bc, size);
+ return 1;
+ }
+
+ av_get_packet(bc, pkt, size);
+ pkt->stream_index = stream_id;
+ if (key_frame)
+ pkt->flags |= PKT_FLAG_KEY;
+ pkt->pts = pts;
+
+ return 0;
+}
+
+static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int i, frame_code=0, ret;
+
+ for(;;){
+ int64_t pos= url_ftell(bc);
+ int frame_type= 0;
+ uint64_t tmp= nut->next_startcode;
+ nut->next_startcode=0;
+
+ if (url_feof(bc))
+ return -1;
+
+ if(tmp){
+ pos-=8;
+ }else{
+ frame_code = get_byte(bc);
+ if(frame_code == 'N'){
+ tmp= frame_code;
+ for(i=1; i<8; i++)
+ tmp = (tmp<<8) + get_byte(bc);
+ }
+ }
+ switch(tmp){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case INDEX_STARTCODE:
+ get_packetheader(nut, bc, 0);
+ assert(nut->packet_start[2] == pos);
+ url_fseek(bc, nut->written_packet_size, SEEK_CUR);
+ break;
+ case INFO_STARTCODE:
+ if(decode_info_header(nut)<0)
+ goto resync;
+ break;
+ case KEYFRAME_STARTCODE:
+ frame_type = 2;
+ reset(s, get_v(bc));
+ frame_code = get_byte(bc);
+ case 0:
+ ret= decode_frame(nut, pkt, frame_code, frame_type, pos);
+ if(ret==0)
+ return 0;
+ else if(ret==1) //ok but discard packet
+ break;
+ default:
+resync:
+av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", nut->packet_start[2]+1);
+ tmp= find_any_startcode(bc, nut->packet_start[2]+1);
+ if(tmp==0)
+ return -1;
+av_log(s, AV_LOG_DEBUG, "sync\n");
+ nut->next_startcode= tmp;
+ }
+ }
+}
+
+static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){
+ NUTContext *nut = s->priv_data;
+ StreamContext *stream;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos, pts;
+ uint64_t code;
+ int frame_code,step, stream_id, i,size, key_frame;
+av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit);
+
+ if(*pos_arg < 0)
+ return AV_NOPTS_VALUE;
+
+ pos= *pos_arg;
+ step= FFMIN(16*1024, pos);
+ do{
+ pos-= step;
+ code= find_any_startcode(bc, pos);
+
+ if(code && url_ftell(bc) - 8 <= *pos_arg)
+ break;
+ step= FFMIN(2*step, pos);
+ }while(step);
+
+ if(!code) //nothing found, not even after pos_arg
+ return AV_NOPTS_VALUE;
+
+ url_fseek(bc, -8, SEEK_CUR);
+ for(i=0; i<s->nb_streams; i++)
+ nut->stream[i].last_sync_pos= url_ftell(bc);
+
+ for(;;){
+ int frame_type=0;
+ int64_t pos= url_ftell(bc);
+ uint64_t tmp=0;
+
+ if(pos > pos_limit || url_feof(bc))
+ return AV_NOPTS_VALUE;
+
+ frame_code = get_byte(bc);
+ if(frame_code == 'N'){
+ tmp= frame_code;
+ for(i=1; i<8; i++)
+ tmp = (tmp<<8) + get_byte(bc);
+ }
+//av_log(s, AV_LOG_DEBUG, "before switch %"PRIX64" at=%"PRId64"\n", tmp, pos);
+
+ switch(tmp){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case INDEX_STARTCODE:
+ case INFO_STARTCODE:
+ get_packetheader(nut, bc, 0);
+ assert(nut->packet_start[2]==pos);
+ url_fseek(bc, nut->written_packet_size, SEEK_CUR);
+ break;
+ case KEYFRAME_STARTCODE:
+ frame_type=2;
+ reset(s, get_v(bc));
+ frame_code = get_byte(bc);
+ case 0:
+ size= decode_frame_header(nut, &key_frame, &pts, &stream_id, frame_code, frame_type, pos);
+ if(size < 0)
+ goto resync;
+
+ stream= &nut->stream[stream_id];
+ if(stream_id != stream_index || !key_frame || pos < *pos_arg){
+ url_fseek(bc, size, SEEK_CUR);
+ break;
+ }
+
+ *pos_arg= pos;
+ return pts;
+ default:
+resync:
+av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", nut->packet_start[2]+1);
+ if(!find_any_startcode(bc, nut->packet_start[2]+1))
+ return AV_NOPTS_VALUE;
+
+ url_fseek(bc, -8, SEEK_CUR);
+ }
+ }
+ return AV_NOPTS_VALUE;
+}
+
+static int nut_read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+// NUTContext *nut = s->priv_data;
+ int64_t pos;
+
+ if(av_seek_frame_binary(s, stream_index, target_ts, flags) < 0)
+ return -1;
+
+ pos= url_ftell(&s->pb);
+ nut_read_timestamp(s, stream_index, &pos, pos-1);
+
+ return 0;
+}
+
+static int nut_read_close(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+
+ av_freep(&nut->stream);
+
+ return 0;
+}
+
+#ifdef CONFIG_NUT_DEMUXER
+AVInputFormat nut_demuxer = {
+ "nut",
+ "nut format",
+ sizeof(NUTContext),
+ nut_probe,
+ nut_read_header,
+ nut_read_packet,
+ nut_read_close,
+ nut_read_seek,
+ nut_read_timestamp,
+ .extensions = "nut",
+};
+#endif
+#ifdef CONFIG_NUT_MUXER
+AVOutputFormat nut_muxer = {
+ "nut",
+ "nut format",
+ "video/x-nut",
+ "nut",
+ sizeof(NUTContext),
+#ifdef CONFIG_LIBVORBIS
+ CODEC_ID_VORBIS,
+#elif defined(CONFIG_MP3LAME)
+ CODEC_ID_MP3,
+#else
+ CODEC_ID_MP2, /* AC3 needs liba52 decoder */
+#endif
+ CODEC_ID_MPEG4,
+ nut_write_header,
+ nut_write_packet,
+ nut_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/nut.h b/contrib/ffmpeg/libavformat/nut.h
new file mode 100644
index 000000000..82bbf6f17
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nut.h
@@ -0,0 +1,97 @@
+/*
+ * "NUT" Container Format (de)muxer
+ * Copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+//#include <limits.h>
+#include "avformat.h"
+#include "crc.h"
+//#include "mpegaudio.h"
+#include "riff.h"
+//#include "adler32.h"
+
+#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
+#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
+#define SYNCPOINT_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
+#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
+#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
+
+#define ID_STRING "nut/multimedia container\0"
+
+#define MAX_DISTANCE (1024*32-1)
+
+typedef enum{
+ FLAG_KEY = 1, ///<if set, frame is keyframe
+ FLAG_EOR = 2, ///<if set, stream has no relevance on presentation. (EOR)
+ FLAG_CODED_PTS = 8, ///<if set, coded_pts is in the frame header
+ FLAG_STREAM_ID = 16, ///<if set, stream_id is coded in the frame header
+ FLAG_SIZE_MSB = 32, ///<if set, data_size_msb is at frame header, otherwise data_size_msb is 0
+ FLAG_CHECKSUM = 64, ///<if set then the frame header contains a checksum
+ FLAG_RESERVED = 128, ///<if set, reserved_count is coded in the frame header
+ FLAG_CODED =4096, ///<if set, coded_flags are stored in the frame header.
+ FLAG_INVALID =8192, ///<if set, frame_code is invalid.
+}flag_t;
+
+typedef struct {
+ uint64_t pos;
+ uint64_t back_ptr;
+// uint64_t global_key_pts;
+ int64_t ts;
+} syncpoint_t;
+
+typedef struct {
+ uint16_t flags;
+ uint8_t stream_id;
+ uint16_t size_mul;
+ uint16_t size_lsb;
+ int16_t pts_delta;
+ uint8_t reserved_count;
+} FrameCode; // maybe s/FrameCode/framecode_t/ or change all to java style but dont mix
+
+typedef struct {
+ int last_flags;
+ int skip_until_key_frame;
+ int64_t last_pts;
+ int time_base_id;
+ AVRational time_base;
+ int msb_pts_shift;
+ int max_pts_distance;
+ int decode_delay; //FIXME duplicate of has_b_frames
+} StreamContext;// maybe s/StreamContext/streamcontext_t/
+
+typedef struct {
+ AVFormatContext *avf;
+// int written_packet_size;
+// int64_t packet_start[3]; //0-> startcode less, 1-> short startcode 2-> long startcodes
+ FrameCode frame_code[256];
+ uint64_t next_startcode; ///< stores the next startcode if it has alraedy been parsed but the stream isnt seekable
+ StreamContext *stream;
+ unsigned int max_distance;
+ unsigned int time_base_count;
+ int64_t last_syncpoint_pos;
+ AVRational *time_base;
+ struct AVTreeNode *syncpoints;
+} NUTContext;
+
+
+//FIXME move to a common spot, like crc.c/h
+static unsigned long av_crc04C11DB7_update(unsigned long checksum, const uint8_t *buf, unsigned int len){
+ return av_crc(av_crc04C11DB7, checksum, buf, len);
+}
diff --git a/contrib/ffmpeg/libavformat/nutdec.c b/contrib/ffmpeg/libavformat/nutdec.c
new file mode 100644
index 000000000..7e0f8cd93
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nutdec.c
@@ -0,0 +1,889 @@
+/*
+ * "NUT" Container Format demuxer
+ * Copyright (c) 2004-2006 Michael Niedermayer
+ * Copyright (c) 2003 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "tree.h"
+#include "nut.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+static uint64_t get_v(ByteIOContext *bc){
+ uint64_t val = 0;
+
+ for(;;)
+ {
+ int tmp = get_byte(bc);
+
+ if (tmp&0x80)
+ val= (val<<7) + tmp - 0x80;
+ else{
+ return (val<<7) + tmp;
+ }
+ }
+ return -1;
+}
+
+static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
+ unsigned int len= get_v(bc);
+
+ if(len && maxlen)
+ get_buffer(bc, string, FFMIN(len, maxlen));
+ while(len > maxlen){
+ get_byte(bc);
+ len--;
+ }
+
+ if(maxlen)
+ string[FFMIN(len, maxlen-1)]= 0;
+
+ if(maxlen == len)
+ return -1;
+ else
+ return 0;
+}
+
+static int64_t get_s(ByteIOContext *bc){
+ int64_t v = get_v(bc) + 1;
+
+ if (v&1) return -(v>>1);
+ else return (v>>1);
+}
+
+static uint64_t get_fourcc(ByteIOContext *bc){
+ unsigned int len= get_v(bc);
+
+ if (len==2) return get_le16(bc);
+ else if(len==4) return get_le32(bc);
+ else return -1;
+}
+
+#ifdef TRACE
+static inline uint64_t get_v_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_v(bc);
+
+ printf("get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline int64_t get_s_trace(ByteIOContext *bc, char *file, char *func, int line){
+ int64_t v= get_s(bc);
+
+ printf("get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline uint64_t get_vb_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_vb(bc);
+
+ printf("get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+#define get_v(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#endif
+
+static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_checksum)
+{
+ int64_t start, size;
+// start= url_ftell(bc) - 8;
+
+ size= get_v(bc);
+
+ init_checksum(bc, calculate_checksum ? av_crc04C11DB7_update : NULL, 0);
+
+// nut->packet_start[2] = start;
+// nut->written_packet_size= size;
+
+ return size;
+}
+
+static uint64_t find_any_startcode(ByteIOContext *bc, int64_t pos){
+ uint64_t state=0;
+
+ if(pos >= 0)
+ url_fseek(bc, pos, SEEK_SET); //note, this may fail if the stream isnt seekable, but that shouldnt matter, as in this case we simply start where we are currently
+
+ while(!url_feof(bc)){
+ state= (state<<8) | get_byte(bc);
+ if((state>>56) != 'N')
+ continue;
+ switch(state){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case SYNCPOINT_STARTCODE:
+ case INFO_STARTCODE:
+ case INDEX_STARTCODE:
+ return state;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * find the given startcode.
+ * @param code the startcode
+ * @param pos the start position of the search, or -1 if the current position
+ * @returns the position of the startcode or -1 if not found
+ */
+static int64_t find_startcode(ByteIOContext *bc, uint64_t code, int64_t pos){
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ if(startcode == code)
+ return url_ftell(bc) - 8;
+ else if(startcode == 0)
+ return -1;
+ pos=-1;
+ }
+}
+
+static int64_t lsb2full(StreamContext *stream, int64_t lsb){
+ int64_t mask = (1<<stream->msb_pts_shift)-1;
+ int64_t delta= stream->last_pts - mask/2;
+ return ((lsb - delta)&mask) + delta;
+}
+
+static int nut_probe(AVProbeData *p){
+ int i;
+ uint64_t code= 0;
+
+ for (i = 0; i < p->buf_size; i++) {
+ code = (code << 8) | p->buf[i];
+ if (code == MAIN_STARTCODE)
+ return AVPROBE_SCORE_MAX;
+ }
+ return 0;
+}
+
+#define GET_V(dst, check) \
+ tmp= get_v(bc);\
+ if(!(check)){\
+ av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp);\
+ return -1;\
+ }\
+ dst= tmp;
+
+static int skip_reserved(ByteIOContext *bc, int64_t pos){
+ pos -= url_ftell(bc);
+ if(pos<0){
+ url_fseek(bc, pos, SEEK_CUR);
+ return -1;
+ }else{
+ while(pos--)
+ get_byte(bc);
+ return 0;
+ }
+}
+
+static int decode_main_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp, end;
+ unsigned int stream_count;
+ int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ GET_V(tmp , tmp >=2 && tmp <= 3)
+ GET_V(stream_count , tmp > 0 && tmp <=MAX_STREAMS)
+
+ nut->max_distance = get_v(bc);
+ if(nut->max_distance > 65536){
+ av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance);
+ nut->max_distance= 65536;
+ }
+
+ GET_V(nut->time_base_count, tmp>0 && tmp<INT_MAX / sizeof(AVRational))
+ nut->time_base= av_malloc(nut->time_base_count * sizeof(AVRational));
+
+ for(i=0; i<nut->time_base_count; i++){
+ GET_V(nut->time_base[i].num, tmp>0 && tmp<(1ULL<<31))
+ GET_V(nut->time_base[i].den, tmp>0 && tmp<(1ULL<<31))
+ if(ff_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1){
+ av_log(s, AV_LOG_ERROR, "time base invalid\n");
+ return -1;
+ }
+ }
+ tmp_pts=0;
+ tmp_mul=1;
+ tmp_stream=0;
+ for(i=0; i<256;){
+ int tmp_flags = get_v(bc);
+ int tmp_fields= get_v(bc);
+ if(tmp_fields>0) tmp_pts = get_s(bc);
+ if(tmp_fields>1) tmp_mul = get_v(bc);
+ if(tmp_fields>2) tmp_stream= get_v(bc);
+ if(tmp_fields>3) tmp_size = get_v(bc);
+ else tmp_size = 0;
+ if(tmp_fields>4) tmp_res = get_v(bc);
+ else tmp_res = 0;
+ if(tmp_fields>5) count = get_v(bc);
+ else count = tmp_mul - tmp_size;
+
+ while(tmp_fields-- > 6)
+ get_v(bc);
+
+ if(count == 0 || i+count > 256){
+ av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
+ return -1;
+ }
+ if(tmp_stream >= stream_count){
+ av_log(s, AV_LOG_ERROR, "illegal stream number\n");
+ return -1;
+ }
+
+ for(j=0; j<count; j++,i++){
+ if (i == 'N') {
+ nut->frame_code[i].flags= FLAG_INVALID;
+ j--;
+ continue;
+ }
+ nut->frame_code[i].flags = tmp_flags ;
+ nut->frame_code[i].pts_delta = tmp_pts ;
+ nut->frame_code[i].stream_id = tmp_stream;
+ nut->frame_code[i].size_mul = tmp_mul ;
+ nut->frame_code[i].size_lsb = tmp_size+j;
+ nut->frame_code[i].reserved_count = tmp_res ;
+ }
+ }
+ assert(nut->frame_code['N'].flags == FLAG_INVALID);
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Main header checksum mismatch\n");
+ return -1;
+ }
+
+ nut->stream = av_mallocz(sizeof(StreamContext)*stream_count);
+ for(i=0; i<stream_count; i++){
+ av_new_stream(s, i);
+ }
+
+ return 0;
+}
+
+static int decode_stream_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ StreamContext *stc;
+ int class, stream_id;
+ uint64_t tmp, end;
+ AVStream *st;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base.num);
+ stc= &nut->stream[stream_id];
+
+ st = s->streams[stream_id];
+ if (!st)
+ return AVERROR_NOMEM;
+
+ class = get_v(bc);
+ tmp = get_fourcc(bc);
+ st->codec->codec_tag= tmp;
+ switch(class)
+ {
+ case 0:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = codec_get_bmp_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 1:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = codec_get_wav_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 2:
+// st->codec->codec_type = CODEC_TYPE_TEXT;
+// break;
+ case 3:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "Unknown stream class (%d)\n", class);
+ return -1;
+ }
+ GET_V(stc->time_base_id , tmp < nut->time_base_count);
+ GET_V(stc->msb_pts_shift , tmp < 16);
+ stc->max_pts_distance= get_v(bc);
+ GET_V(stc->decode_delay , tmp < 1000); //sanity limit, raise this if moors law is true
+ st->codec->has_b_frames= stc->decode_delay;
+ get_v(bc); //stream flags
+
+ GET_V(st->codec->extradata_size, tmp < (1<<30));
+ if(st->codec->extradata_size){
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
+ }
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO){
+ GET_V(st->codec->width , tmp > 0)
+ GET_V(st->codec->height, tmp > 0)
+ st->codec->sample_aspect_ratio.num= get_v(bc);
+ st->codec->sample_aspect_ratio.den= get_v(bc);
+ if((!st->codec->sample_aspect_ratio.num) != (!st->codec->sample_aspect_ratio.den)){
+ av_log(s, AV_LOG_ERROR, "invalid aspect ratio\n");
+ return -1;
+ }
+ get_v(bc); /* csp type */
+ }else if (st->codec->codec_type == CODEC_TYPE_AUDIO){
+ GET_V(st->codec->sample_rate , tmp > 0)
+ tmp= get_v(bc); // samplerate_den
+ if(tmp > st->codec->sample_rate){
+ av_log(s, AV_LOG_ERROR, "bleh, libnut muxed this ;)\n");
+ st->codec->sample_rate= tmp;
+ }
+ GET_V(st->codec->channels, tmp > 0)
+ }
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Stream header %d checksum mismatch\n", stream_id);
+ return -1;
+ }
+ stc->time_base= nut->time_base[stc->time_base_id];
+ av_set_pts_info(s->streams[stream_id], 63, stc->time_base.num, stc->time_base.den);
+ return 0;
+}
+
+static int decode_info_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp;
+ unsigned int stream_id_plus1, chapter_start, chapter_len, count;
+ int chapter_id, i;
+ int64_t value, end;
+ char name[256], str_value[1024], type_str[256], *type= type_str;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ GET_V(stream_id_plus1, tmp <= s->nb_streams)
+ chapter_id = get_s(bc);
+ chapter_start= get_v(bc);
+ chapter_len = get_v(bc);
+ count = get_v(bc);
+ for(i=0; i<count; i++){
+ get_str(bc, name, sizeof(name));
+ value= get_s(bc);
+ if(value == -1){
+ type= "UTF-8";
+ get_str(bc, str_value, sizeof(str_value));
+ }else if(value == -2){
+ get_str(bc, type, sizeof(type));
+ get_str(bc, str_value, sizeof(str_value));
+ }else if(value == -3){
+ type= "s";
+ value= get_s(bc);
+ }else if(value == -4){
+ type= "t";
+ value= get_v(bc);
+ }else if(value < -4){
+ type= "r";
+ get_s(bc);
+ }else{
+ type= "v";
+ }
+
+ if(chapter_id==0 && !strcmp(type, "UTF-8")){
+ if (!strcmp(name, "Author"))
+ pstrcpy(s->author , sizeof(s->author) , str_value);
+ else if(!strcmp(name, "Title"))
+ pstrcpy(s->title , sizeof(s->title) , str_value);
+ else if(!strcmp(name, "Copyright"))
+ pstrcpy(s->copyright, sizeof(s->copyright), str_value);
+ else if(!strcmp(name, "Description"))
+ pstrcpy(s->comment , sizeof(s->comment) , str_value);
+ }
+ }
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Info header checksum mismatch\n");
+ return -1;
+ }
+ return 0;
+}
+
+int sp_pos_cmp(syncpoint_t *a, syncpoint_t *b){
+ return (a->pos - b->pos>>32) - (b->pos - a->pos>>32);
+}
+
+int sp_pts_cmp(syncpoint_t *a, syncpoint_t *b){
+ return (a->ts - b->ts>>32) - (b->ts - a->ts>>32);
+}
+
+static void add_sp(NUTContext *nut, int64_t pos, int64_t back_ptr, int64_t ts){
+ syncpoint_t *sp2, *sp= av_mallocz(sizeof(syncpoint_t));
+
+ sp->pos= pos;
+ sp->back_ptr= back_ptr;
+ sp->ts= ts;
+ sp2= av_tree_insert(&nut->syncpoints, sp, sp_pos_cmp);
+ if(sp2 && sp2 != sp)
+ av_free(sp);
+}
+
+static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int64_t end, tmp;
+ int i;
+ AVRational time_base;
+
+ nut->last_syncpoint_pos= url_ftell(bc)-8;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ tmp= get_v(bc);
+ *back_ptr= nut->last_syncpoint_pos - 16*get_v(bc);
+ if(*back_ptr < 0)
+ return -1;
+
+ time_base= nut->time_base[tmp % nut->time_base_count];
+ for(i=0; i<s->nb_streams; i++){
+ nut->stream[i].last_pts= av_rescale_rnd(
+ tmp / nut->time_base_count,
+ time_base.num * (int64_t)nut->stream[i].time_base.den,
+ time_base.den * (int64_t)nut->stream[i].time_base.num,
+ AV_ROUND_DOWN);
+ //last_key_frame ?
+ }
+ //FIXME put this in a reset func maybe
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n");
+ return -1;
+ }
+
+ *ts= tmp / s->nb_streams * av_q2d(nut->time_base[tmp % s->nb_streams])*AV_TIME_BASE;
+ add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts);
+
+ return 0;
+}
+
+static int find_and_decode_index(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp, end;
+ int i, j, syncpoint_count;
+ int64_t filesize= url_fsize(bc);
+ int64_t *syncpoints;
+ int8_t *has_keyframe;
+
+ url_fseek(bc, filesize-12, SEEK_SET);
+ url_fseek(bc, filesize-get_be64(bc), SEEK_SET);
+ if(get_be64(bc) != INDEX_STARTCODE){
+ av_log(s, AV_LOG_ERROR, "no index at the end\n");
+ return -1;
+ }
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ get_v(bc); //max_pts
+ GET_V(syncpoint_count, tmp < INT_MAX/8 && tmp > 0)
+ syncpoints= av_malloc(sizeof(int64_t)*syncpoint_count);
+ has_keyframe= av_malloc(sizeof(int8_t)*(syncpoint_count+1));
+ for(i=0; i<syncpoint_count; i++){
+ GET_V(syncpoints[i], tmp>0)
+ if(i)
+ syncpoints[i] += syncpoints[i-1];
+ }
+
+ for(i=0; i<s->nb_streams; i++){
+ int64_t last_pts= -1;
+ for(j=0; j<syncpoint_count;){
+ uint64_t x= get_v(bc);
+ int type= x&1;
+ int n= j;
+ x>>=1;
+ if(type){
+ int flag= x&1;
+ x>>=1;
+ if(n+x >= syncpoint_count + 1){
+ av_log(s, AV_LOG_ERROR, "index overflow A\n");
+ return -1;
+ }
+ while(x--)
+ has_keyframe[n++]= flag;
+ has_keyframe[n++]= !flag;
+ }else{
+ while(x != 1){
+ if(n>=syncpoint_count + 1){
+ av_log(s, AV_LOG_ERROR, "index overflow B\n");
+ return -1;
+ }
+ has_keyframe[n++]= x&1;
+ x>>=1;
+ }
+ }
+ if(has_keyframe[0]){
+ av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n");
+ return -1;
+ }
+ assert(n<=syncpoint_count+1);
+ for(; j<n; j++){
+ if(has_keyframe[j]){
+ uint64_t B, A= get_v(bc);
+ if(!A){
+ A= get_v(bc);
+ B= get_v(bc);
+ //eor_pts[j][i] = last_pts + A + B
+ }else
+ B= 0;
+ av_add_index_entry(
+ s->streams[i],
+ 16*syncpoints[j-1],
+ last_pts + A,
+ 0,
+ 0,
+ AVINDEX_KEYFRAME);
+ last_pts += A + B;
+ }
+ }
+ }
+ }
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Index checksum mismatch\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos;
+ int inited_stream_count;
+
+ nut->avf= s;
+
+ /* main header */
+ pos=0;
+ do{
+ pos= find_startcode(bc, MAIN_STARTCODE, pos)+1;
+ if (pos<0+1){
+ av_log(s, AV_LOG_ERROR, "no main startcode found\n");
+ return -1;
+ }
+ }while(decode_main_header(nut) < 0);
+
+ /* stream headers */
+ pos=0;
+ for(inited_stream_count=0; inited_stream_count < s->nb_streams;){
+ pos= find_startcode(bc, STREAM_STARTCODE, pos)+1;
+ if (pos<0+1){
+ av_log(s, AV_LOG_ERROR, "not all stream headers found\n");
+ return -1;
+ }
+ if(decode_stream_header(nut) >= 0)
+ inited_stream_count++;
+ }
+
+ /* info headers */
+ pos=0;
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ pos= url_ftell(bc);
+
+ if(startcode==0){
+ av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
+ return -1;
+ }else if(startcode == SYNCPOINT_STARTCODE){
+ nut->next_startcode= startcode;
+ break;
+ }else if(startcode != INFO_STARTCODE){
+ continue;
+ }
+
+ decode_info_header(nut);
+ }
+
+ s->data_offset= pos-8;
+
+ if(!url_is_streamed(bc)){
+ int64_t orig_pos= url_ftell(bc);
+ find_and_decode_index(nut);
+ url_fseek(bc, orig_pos, SEEK_SET);
+ }
+ assert(nut->next_startcode == SYNCPOINT_STARTCODE);
+
+ return 0;
+}
+
+static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, int frame_code){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ StreamContext *stc;
+ int size, flags, size_mul, pts_delta, i, reserved_count;
+ uint64_t tmp;
+
+ if(url_ftell(bc) > nut->last_syncpoint_pos + nut->max_distance){
+ av_log(s, AV_LOG_ERROR, "last frame must have been damaged %Ld > %Ld + %d\n", url_ftell(bc), nut->last_syncpoint_pos, nut->max_distance);
+ return -1;
+ }
+
+ flags = nut->frame_code[frame_code].flags;
+ size_mul = nut->frame_code[frame_code].size_mul;
+ size = nut->frame_code[frame_code].size_lsb;
+ *stream_id = nut->frame_code[frame_code].stream_id;
+ pts_delta = nut->frame_code[frame_code].pts_delta;
+ reserved_count = nut->frame_code[frame_code].reserved_count;
+
+ if(flags & FLAG_INVALID)
+ return -1;
+ if(flags & FLAG_CODED)
+ flags ^= get_v(bc);
+ if(flags & FLAG_STREAM_ID){
+ GET_V(*stream_id, tmp < s->nb_streams)
+ }
+ stc= &nut->stream[*stream_id];
+ if(flags&FLAG_CODED_PTS){
+ int coded_pts= get_v(bc);
+//FIXME check last_pts validity?
+ if(coded_pts < (1<<stc->msb_pts_shift)){
+ *pts=lsb2full(stc, coded_pts);
+ }else
+ *pts=coded_pts - (1<<stc->msb_pts_shift);
+ }else
+ *pts= stc->last_pts + pts_delta;
+ if(flags&FLAG_SIZE_MSB){
+ size += size_mul*get_v(bc);
+ }
+ if(flags&FLAG_RESERVED)
+ reserved_count= get_v(bc);
+ for(i=0; i<reserved_count; i++)
+ get_v(bc);
+ if(flags&FLAG_CHECKSUM){
+ get_be32(bc); //FIXME check this
+ }else if(size > 2*nut->max_distance || FFABS(stc->last_pts - *pts) > stc->max_pts_distance){
+ av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n");
+ return -1;
+ }
+
+ stc->last_pts= *pts;
+ stc->last_flags= flags;
+
+ return size;
+}
+
+static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int size, stream_id, discard;
+ int64_t pts, last_IP_pts;
+ StreamContext *stc;
+
+ size= decode_frame_header(nut, &pts, &stream_id, frame_code);
+ if(size < 0)
+ return -1;
+
+ stc= &nut->stream[stream_id];
+
+ if (stc->last_flags & FLAG_KEY)
+ stc->skip_until_key_frame=0;
+
+ discard= s->streams[ stream_id ]->discard;
+ last_IP_pts= s->streams[ stream_id ]->last_IP_pts;
+ if( (discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY))
+ ||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts)
+ || discard >= AVDISCARD_ALL
+ || stc->skip_until_key_frame){
+ url_fskip(bc, size);
+ return 1;
+ }
+
+ av_get_packet(bc, pkt, size);
+ pkt->stream_index = stream_id;
+ if (stc->last_flags & FLAG_KEY)
+ pkt->flags |= PKT_FLAG_KEY;
+ pkt->pts = pts;
+
+ return 0;
+}
+
+static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int i, frame_code=0, ret, skip;
+ int64_t ts, back_ptr;
+
+ for(;;){
+ int64_t pos= url_ftell(bc);
+ uint64_t tmp= nut->next_startcode;
+ nut->next_startcode=0;
+
+ if (url_feof(bc))
+ return -1;
+
+ if(tmp){
+ pos-=8;
+ }else{
+ frame_code = get_byte(bc);
+ if(frame_code == 'N'){
+ tmp= frame_code;
+ for(i=1; i<8; i++)
+ tmp = (tmp<<8) + get_byte(bc);
+ }
+ }
+ switch(tmp){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case INDEX_STARTCODE:
+ skip= get_packetheader(nut, bc, 0);
+ url_fseek(bc, skip, SEEK_CUR);
+ break;
+ case INFO_STARTCODE:
+ if(decode_info_header(nut)<0)
+ goto resync;
+ break;
+ case SYNCPOINT_STARTCODE:
+ if(decode_syncpoint(nut, &ts, &back_ptr)<0)
+ goto resync;
+ frame_code = get_byte(bc);
+ case 0:
+ ret= decode_frame(nut, pkt, frame_code);
+ if(ret==0)
+ return 0;
+ else if(ret==1) //ok but discard packet
+ break;
+ default:
+resync:
+av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos);
+ tmp= find_any_startcode(bc, nut->last_syncpoint_pos+1);
+ if(tmp==0)
+ return -1;
+av_log(s, AV_LOG_DEBUG, "sync\n");
+ nut->next_startcode= tmp;
+ }
+ }
+}
+
+static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos, pts, back_ptr;
+av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit);
+
+ pos= *pos_arg;
+resync:
+ do{
+ pos= find_startcode(bc, SYNCPOINT_STARTCODE, pos)+1;
+ if(pos < 1){
+ assert(nut->next_startcode == 0);
+ av_log(s, AV_LOG_ERROR, "read_timestamp failed\n");
+ return AV_NOPTS_VALUE;
+ }
+ }while(decode_syncpoint(nut, &pts, &back_ptr) < 0);
+ *pos_arg = pos-1;
+ assert(nut->last_syncpoint_pos == *pos_arg);
+
+ av_log(s, AV_LOG_DEBUG, "return %Ld %Ld\n", pts,back_ptr );
+ if (stream_index == -1) return pts;
+ else if(stream_index == -2) return back_ptr;
+
+assert(0);
+}
+
+static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags){
+ NUTContext *nut = s->priv_data;
+ AVStream *st= s->streams[stream_index];
+ syncpoint_t dummy={.ts= pts*av_q2d(st->time_base)*AV_TIME_BASE};
+ syncpoint_t nopts_sp= {.ts= AV_NOPTS_VALUE, .back_ptr= AV_NOPTS_VALUE};
+ syncpoint_t *sp, *next_node[2]= {&nopts_sp, &nopts_sp};
+ int64_t pos, pos2, ts;
+ int i;
+
+ if(st->index_entries){
+ int index= av_index_search_timestamp(st, pts, flags);
+ if(index<0)
+ return -1;
+
+ pos2= st->index_entries[index].pos;
+ ts = st->index_entries[index].timestamp;
+ }else{
+ av_tree_find(nut->syncpoints, &dummy, sp_pts_cmp, next_node);
+ av_log(s, AV_LOG_DEBUG, "%Ld-%Ld %Ld-%Ld\n", next_node[0]->pos, next_node[1]->pos,
+ next_node[0]->ts , next_node[1]->ts);
+ pos= av_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
+
+ if(!(flags & AVSEEK_FLAG_BACKWARD)){
+ dummy.pos= pos+16;
+ next_node[1]= &nopts_sp;
+ av_tree_find(nut->syncpoints, &dummy, sp_pos_cmp, next_node);
+ pos2= av_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
+ if(pos2>=0)
+ pos= pos2;
+ //FIXME dir but i think it doesnt matter
+ }
+ dummy.pos= pos;
+ sp= av_tree_find(nut->syncpoints, &dummy, sp_pos_cmp, NULL);
+
+ assert(sp);
+ pos2= sp->back_ptr - 15;
+ }
+ av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2);
+ pos= find_startcode(&s->pb, SYNCPOINT_STARTCODE, pos2);
+ url_fseek(&s->pb, pos, SEEK_SET);
+ av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos);
+ if(pos2 > pos || pos2 + 15 < pos){
+ av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n");
+ }
+ for(i=0; i<s->nb_streams; i++)
+ nut->stream[i].skip_until_key_frame=1;
+
+ return 0;
+}
+
+static int nut_read_close(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+
+ av_freep(&nut->time_base);
+ av_freep(&nut->stream);
+
+ return 0;
+}
+
+#ifdef CONFIG_NUT_DEMUXER
+AVInputFormat nut_demuxer = {
+ "nut",
+ "nut format",
+ sizeof(NUTContext),
+ nut_probe,
+ nut_read_header,
+ nut_read_packet,
+ nut_read_close,
+ read_seek,
+ .extensions = "nut",
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/nuv.c b/contrib/ffmpeg/libavformat/nuv.c
new file mode 100644
index 000000000..3b96eb940
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nuv.c
@@ -0,0 +1,241 @@
+/*
+ * NuppelVideo demuxer.
+ * Copyright (c) 2006 Reimar Doeffinger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+
+typedef struct {
+ int v_id;
+ int a_id;
+} NUVContext;
+
+typedef enum {
+ NUV_VIDEO = 'V',
+ NUV_EXTRADATA = 'D',
+ NUV_AUDIO = 'A',
+ NUV_SEEKP = 'R',
+ NUV_MYTHEXT = 'X'
+} frametype_t;
+
+static int nuv_probe(AVProbeData *p) {
+ if (p->buf_size < 12)
+ return 0;
+ if (!memcmp(p->buf, "NuppelVideo", 12))
+ return AVPROBE_SCORE_MAX;
+ if (!memcmp(p->buf, "MythTVVideo", 12))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+//! little macro to sanitize packet size
+#define PKTSIZE(s) (s & 0xffffff)
+
+/**
+ * \brief read until we found all data needed for decoding
+ * \param vst video stream of which to change parameters
+ * \param ast video stream of which to change parameters
+ * \param myth set if this is a MythTVVideo format file
+ * \return 1 if all required codec data was found
+ */
+static int get_codec_data(ByteIOContext *pb, AVStream *vst,
+ AVStream *ast, int myth) {
+ frametype_t frametype;
+ if (!vst && !myth)
+ return 1; // no codec data needed
+ while (!url_feof(pb)) {
+ int size, subtype;
+ frametype = get_byte(pb);
+ switch (frametype) {
+ case NUV_EXTRADATA:
+ subtype = get_byte(pb);
+ url_fskip(pb, 6);
+ size = PKTSIZE(get_le32(pb));
+ if (vst && subtype == 'R') {
+ vst->codec->extradata_size = size;
+ vst->codec->extradata = av_malloc(size);
+ get_buffer(pb, vst->codec->extradata, size);
+ size = 0;
+ if (!myth)
+ return 1;
+ }
+ break;
+ case NUV_MYTHEXT:
+ url_fskip(pb, 7);
+ size = PKTSIZE(get_le32(pb));
+ if (size != 128 * 4)
+ break;
+ get_le32(pb); // version
+ if (vst) {
+ vst->codec->codec_tag = get_le32(pb);
+ vst->codec->codec_id =
+ codec_get_id(codec_bmp_tags, vst->codec->codec_tag);
+ } else
+ url_fskip(pb, 4);
+
+ if (ast) {
+ ast->codec->codec_tag = get_le32(pb);
+ ast->codec->sample_rate = get_le32(pb);
+ ast->codec->bits_per_sample = get_le32(pb);
+ ast->codec->channels = get_le32(pb);
+ ast->codec->codec_id =
+ wav_codec_get_id(ast->codec->codec_tag,
+ ast->codec->bits_per_sample);
+ } else
+ url_fskip(pb, 4 * 4);
+
+ size -= 6 * 4;
+ url_fskip(pb, size);
+ return 1;
+ case NUV_SEEKP:
+ size = 11;
+ break;
+ default:
+ url_fskip(pb, 7);
+ size = PKTSIZE(get_le32(pb));
+ break;
+ }
+ url_fskip(pb, size);
+ }
+ return 0;
+}
+
+static int nuv_header(AVFormatContext *s, AVFormatParameters *ap) {
+ NUVContext *ctx = (NUVContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ char id_string[12], version_string[5];
+ double aspect, fps;
+ int is_mythtv, width, height, v_packs, a_packs;
+ int stream_nr = 0;
+ AVStream *vst = NULL, *ast = NULL;
+ get_buffer(pb, id_string, 12);
+ is_mythtv = !memcmp(id_string, "MythTVVideo", 12);
+ get_buffer(pb, version_string, 5);
+ url_fskip(pb, 3); // padding
+ width = get_le32(pb);
+ height = get_le32(pb);
+ get_le32(pb); // unused, "desiredwidth"
+ get_le32(pb); // unused, "desiredheight"
+ get_byte(pb); // 'P' == progressive, 'I' == interlaced
+ url_fskip(pb, 3); // padding
+ aspect = av_int2dbl(get_le64(pb));
+ fps = av_int2dbl(get_le64(pb));
+
+ // number of packets per stream type, -1 means unknown, e.g. streaming
+ v_packs = get_le32(pb);
+ a_packs = get_le32(pb);
+ get_le32(pb); // text
+
+ get_le32(pb); // keyframe distance (?)
+
+ if (v_packs) {
+ ctx->v_id = stream_nr++;
+ vst = av_new_stream(s, ctx->v_id);
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_NUV;
+ vst->codec->codec_tag = MKTAG('R', 'J', 'P', 'G');
+ vst->codec->width = width;
+ vst->codec->height = height;
+ vst->codec->bits_per_sample = 10;
+ vst->codec->sample_aspect_ratio = av_d2q(aspect, 10000);
+ vst->r_frame_rate = av_d2q(1.0 / fps, 10000);
+ av_set_pts_info(vst, 32, 1, 1000);
+ } else
+ ctx->v_id = -1;
+
+ if (a_packs) {
+ ctx->a_id = stream_nr++;
+ ast = av_new_stream(s, ctx->a_id);
+ ast->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast->codec->codec_id = CODEC_ID_PCM_S16LE;
+ ast->codec->channels = 2;
+ ast->codec->sample_rate = 44100;
+ ast->codec->bit_rate = 2 * 2 * 44100 * 8;
+ ast->codec->block_align = 2 * 2;
+ ast->codec->bits_per_sample = 16;
+ av_set_pts_info(ast, 32, 1, 1000);
+ } else
+ ctx->a_id = -1;
+
+ get_codec_data(pb, vst, ast, is_mythtv);
+ return 0;
+}
+
+#define HDRSIZE 12
+
+static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
+ NUVContext *ctx = (NUVContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint8_t hdr[HDRSIZE];
+ frametype_t frametype;
+ int ret, size;
+ while (!url_feof(pb)) {
+ ret = get_buffer(pb, hdr, HDRSIZE);
+ if (ret <= 0)
+ return ret ? ret : -1;
+ frametype = hdr[0];
+ size = PKTSIZE(LE_32(&hdr[8]));
+ switch (frametype) {
+ case NUV_VIDEO:
+ case NUV_EXTRADATA:
+ if (ctx->v_id < 0) {
+ av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
+ url_fskip(pb, size);
+ break;
+ }
+ ret = av_new_packet(pkt, HDRSIZE + size);
+ if (ret < 0)
+ return ret;
+ pkt->pos = url_ftell(pb);
+ pkt->pts = LE_32(&hdr[4]);
+ pkt->stream_index = ctx->v_id;
+ memcpy(pkt->data, hdr, HDRSIZE);
+ ret = get_buffer(pb, pkt->data + HDRSIZE, size);
+ return ret;
+ case NUV_AUDIO:
+ if (ctx->a_id < 0) {
+ av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
+ url_fskip(pb, size);
+ break;
+ }
+ ret = av_get_packet(pb, pkt, size);
+ pkt->pts = LE_32(&hdr[4]);
+ pkt->stream_index = ctx->a_id;
+ return ret;
+ case NUV_SEEKP:
+ // contains no data, size value is invalid
+ break;
+ default:
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ return AVERROR_IO;
+}
+
+AVInputFormat nuv_demuxer = {
+ "nuv",
+ "NuppelVideo format",
+ sizeof(NUVContext),
+ nuv_probe,
+ nuv_header,
+ nuv_packet,
+ NULL,
+ NULL,
+};
diff --git a/contrib/ffmpeg/libavformat/ogg.c b/contrib/ffmpeg/libavformat/ogg.c
new file mode 100644
index 000000000..369fa4639
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ogg.c
@@ -0,0 +1,283 @@
+/*
+ * Ogg bitstream support
+ * Mark Hills <mark@pogo.org.uk>
+ *
+ * Uses libogg, but requires libvorbisenc to construct correct headers
+ * when containing Vorbis stream -- currently the only format supported
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+
+#include <ogg/ogg.h>
+
+#include "avformat.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define DECODER_BUFFER_SIZE 4096
+
+
+typedef struct OggContext {
+ /* output */
+ ogg_stream_state os ;
+ int header_handled ;
+ ogg_packet op;
+
+ /* input */
+ ogg_sync_state oy ;
+} OggContext ;
+
+
+#ifdef CONFIG_MUXERS
+static int ogg_write_header(AVFormatContext *avfcontext)
+{
+ OggContext *context = avfcontext->priv_data;
+ ogg_packet *op= &context->op;
+ int n;
+
+ ogg_stream_init(&context->os, 31415);
+
+ for(n = 0 ; n < avfcontext->nb_streams ; n++) {
+ AVCodecContext *codec = avfcontext->streams[n]->codec;
+ uint8_t *headers = codec->extradata;
+ int headers_len = codec->extradata_size;
+ uint8_t *header_start[3];
+ int header_len[3];
+ int i, j;
+
+ av_set_pts_info(avfcontext->streams[n], 60, 1, AV_TIME_BASE);
+
+ for(j=1,i=0;i<2;++i, ++j) {
+ header_len[i]=0;
+ while(j<headers_len && headers[j]==0xff) {
+ header_len[i]+=0xff;
+ ++j;
+ }
+ header_len[i]+=headers[j];
+ }
+ header_len[2]=headers_len-header_len[0]-header_len[1]-j;
+ headers+=j;
+ header_start[0] = headers;
+ header_start[1] = header_start[0] + header_len[0];
+ header_start[2] = header_start[1] + header_len[1];
+
+ for(i=0; i < 3; ++i){
+ op->bytes = header_len[i];
+
+ op->packet= header_start[i];
+ op->b_o_s= op->packetno==0;
+
+ ogg_stream_packetin(&context->os, op);
+
+ op->packetno++; //FIXME multiple streams
+ }
+
+ context->header_handled = 0 ;
+ }
+
+ return 0 ;
+}
+
+static int ogg_write_packet(AVFormatContext *avfcontext, AVPacket *pkt)
+{
+ OggContext *context = avfcontext->priv_data ;
+ AVCodecContext *avctx= avfcontext->streams[pkt->stream_index]->codec;
+ ogg_packet *op= &context->op;
+ ogg_page og ;
+ int64_t pts;
+
+ pts= av_rescale(pkt->pts, avctx->sample_rate, AV_TIME_BASE);
+
+// av_log(avfcontext, AV_LOG_DEBUG, "M%d\n", size);
+
+ /* flush header packets so audio starts on a new page */
+
+ if(!context->header_handled) {
+ while(ogg_stream_flush(&context->os, &og)) {
+ put_buffer(&avfcontext->pb, og.header, og.header_len) ;
+ put_buffer(&avfcontext->pb, og.body, og.body_len) ;
+ put_flush_packet(&avfcontext->pb);
+ }
+ context->header_handled = 1 ;
+ }
+
+ op->packet = (uint8_t*) pkt->data;
+ op->bytes = pkt->size;
+ op->b_o_s = op->packetno == 0;
+ op->granulepos= pts;
+
+ /* correct the fields in the packet -- essential for streaming */
+
+ ogg_stream_packetin(&context->os, op);
+
+ while(ogg_stream_pageout(&context->os, &og)) {
+ put_buffer(&avfcontext->pb, og.header, og.header_len);
+ put_buffer(&avfcontext->pb, og.body, og.body_len);
+ put_flush_packet(&avfcontext->pb);
+ }
+ op->packetno++;
+
+ return 0;
+}
+
+
+static int ogg_write_trailer(AVFormatContext *avfcontext) {
+ OggContext *context = avfcontext->priv_data ;
+ ogg_page og ;
+
+ while(ogg_stream_flush(&context->os, &og)) {
+ put_buffer(&avfcontext->pb, og.header, og.header_len) ;
+ put_buffer(&avfcontext->pb, og.body, og.body_len) ;
+ put_flush_packet(&avfcontext->pb);
+ }
+
+ ogg_stream_clear(&context->os) ;
+ return 0 ;
+}
+
+
+AVOutputFormat ogg_muxer = {
+ "ogg",
+ "Ogg Vorbis",
+ "audio/x-vorbis",
+ "ogg",
+ sizeof(OggContext),
+ CODEC_ID_VORBIS,
+ 0,
+ ogg_write_header,
+ ogg_write_packet,
+ ogg_write_trailer,
+} ;
+#endif //CONFIG_MUXERS
+
+#if 0
+static int next_packet(AVFormatContext *avfcontext, ogg_packet *op) {
+ OggContext *context = avfcontext->priv_data ;
+ ogg_page og ;
+ char *buf ;
+
+ while(ogg_stream_packetout(&context->os, op) != 1) {
+
+ /* while no pages are available, read in more data to the sync */
+ while(ogg_sync_pageout(&context->oy, &og) != 1) {
+ buf = ogg_sync_buffer(&context->oy, DECODER_BUFFER_SIZE) ;
+ if(get_buffer(&avfcontext->pb, buf, DECODER_BUFFER_SIZE) <= 0)
+ return 1 ;
+ ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
+ }
+
+ /* got a page. Feed it into the stream and get the packet */
+ if(ogg_stream_pagein(&context->os, &og) != 0)
+ return 1 ;
+ }
+
+ return 0 ;
+}
+
+
+static int ogg_read_header(AVFormatContext *avfcontext, AVFormatParameters *ap)
+{
+ OggContext *context = avfcontext->priv_data;
+ ogg_packet op ;
+ char *buf ;
+ ogg_page og ;
+ AVStream *ast ;
+ AVCodecContext *codec;
+ uint8_t *p;
+ int i;
+
+ ogg_sync_init(&context->oy) ;
+ buf = ogg_sync_buffer(&context->oy, DECODER_BUFFER_SIZE) ;
+
+ if(get_buffer(&avfcontext->pb, buf, DECODER_BUFFER_SIZE) <= 0)
+ return AVERROR_IO ;
+
+ ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
+ ogg_sync_pageout(&context->oy, &og) ;
+ ogg_stream_init(&context->os, ogg_page_serialno(&og)) ;
+ ogg_stream_pagein(&context->os, &og) ;
+
+ /* currently only one vorbis stream supported */
+
+ ast = av_new_stream(avfcontext, 0) ;
+ if(!ast)
+ return AVERROR_NOMEM ;
+ av_set_pts_info(ast, 60, 1, AV_TIME_BASE);
+
+ codec= &ast->codec;
+ codec->codec_type = CODEC_TYPE_AUDIO;
+ codec->codec_id = CODEC_ID_VORBIS;
+ for(i=0; i<3; i++){
+ if(next_packet(avfcontext, &op)){
+ return -1;
+ }
+ if(op.bytes >= (1<<16) || op.bytes < 0)
+ return -1;
+ codec->extradata_size+= 2 + op.bytes;
+ codec->extradata= av_realloc(codec->extradata, codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ memset(codec->extradata + codec->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ p= codec->extradata + codec->extradata_size - 2 - op.bytes;
+ *(p++)= op.bytes>>8;
+ *(p++)= op.bytes&0xFF;
+ memcpy(p, op.packet, op.bytes);
+ }
+
+ return 0 ;
+}
+
+
+static int ogg_read_packet(AVFormatContext *avfcontext, AVPacket *pkt) {
+ ogg_packet op ;
+
+ if(next_packet(avfcontext, &op))
+ return AVERROR_IO ;
+ if(av_new_packet(pkt, op.bytes) < 0)
+ return AVERROR_IO ;
+ pkt->stream_index = 0 ;
+ memcpy(pkt->data, op.packet, op.bytes);
+ if(avfcontext->streams[0]->codec.sample_rate && op.granulepos!=-1)
+ pkt->pts= av_rescale(op.granulepos, AV_TIME_BASE, avfcontext->streams[0]->codec.sample_rate);
+// printf("%"PRId64" %d %d\n", pkt->pts, (int)op.granulepos, avfcontext->streams[0]->codec.sample_rate);
+
+ return op.bytes;
+}
+
+
+static int ogg_read_close(AVFormatContext *avfcontext) {
+ OggContext *context = avfcontext->priv_data ;
+
+ ogg_stream_clear(&context->os) ;
+ ogg_sync_clear(&context->oy) ;
+
+ return 0 ;
+}
+
+
+static AVInputFormat ogg_iformat = {
+ "ogg",
+ "Ogg Vorbis",
+ sizeof(OggContext),
+ NULL,
+ ogg_read_header,
+ ogg_read_packet,
+ ogg_read_close,
+ .extensions = "ogg",
+} ;
+#endif
diff --git a/contrib/ffmpeg/libavformat/ogg2.c b/contrib/ffmpeg/libavformat/ogg2.c
new file mode 100644
index 000000000..1e5d38620
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ogg2.c
@@ -0,0 +1,697 @@
+/*
+ * Ogg bitstream support
+ * Luca Barbato <lu_zero@gentoo.org>
+ * Based on tcvp implementation
+ *
+ */
+
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+
+#include <stdio.h>
+#include "ogg2.h"
+#include "avformat.h"
+
+#define MAX_PAGE_SIZE 65307
+#define DECODER_BUFFER_SIZE MAX_PAGE_SIZE
+
+static ogg_codec_t *ogg_codecs[] = {
+ &vorbis_codec,
+ &theora_codec,
+ &flac_codec,
+ &ogm_video_codec,
+ &ogm_audio_codec,
+ &ogm_old_codec,
+ NULL
+};
+
+#if 0 // CONFIG_MUXERS
+static int
+ogg_write_header (AVFormatContext * avfcontext)
+{
+}
+
+static int
+ogg_write_packet (AVFormatContext * avfcontext, AVPacket * pkt)
+{
+}
+
+
+static int
+ogg_write_trailer (AVFormatContext * avfcontext)
+{
+}
+
+
+AVOutputFormat ogg_muxer = {
+ "ogg",
+ "Ogg Vorbis",
+ "audio/x-vorbis",
+ "ogg",
+ sizeof (OggContext),
+ CODEC_ID_VORBIS,
+ 0,
+ ogg_write_header,
+ ogg_write_packet,
+ ogg_write_trailer,
+};
+#endif //CONFIG_MUXERS
+
+//FIXME We could avoid some structure duplication
+static int
+ogg_save (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_state_t *ost =
+ av_malloc(sizeof (*ost) + (ogg->nstreams-1) * sizeof (*ogg->streams));
+ int i;
+ ost->pos = url_ftell (&s->pb);;
+ ost->curidx = ogg->curidx;
+ ost->next = ogg->state;
+ memcpy(ost->streams, ogg->streams, ogg->nstreams * sizeof(*ogg->streams));
+
+ for (i = 0; i < ogg->nstreams; i++){
+ ogg_stream_t *os = ogg->streams + i;
+ os->buf = av_malloc (os->bufsize);
+ memset (os->buf, 0, os->bufsize);
+ memcpy (os->buf, ost->streams[i].buf, os->bufpos);
+ }
+
+ ogg->state = ost;
+
+ return 0;
+}
+
+static int
+ogg_restore (AVFormatContext * s, int discard)
+{
+ ogg_t *ogg = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ ogg_state_t *ost = ogg->state;
+ int i;
+
+ if (!ost)
+ return 0;
+
+ ogg->state = ost->next;
+
+ if (!discard){
+ for (i = 0; i < ogg->nstreams; i++)
+ av_free (ogg->streams[i].buf);
+
+ url_fseek (bc, ost->pos, SEEK_SET);
+ ogg->curidx = ost->curidx;
+ memcpy (ogg->streams, ost->streams,
+ ogg->nstreams * sizeof (*ogg->streams));
+ }
+
+ av_free (ost);
+
+ return 0;
+}
+
+static int
+ogg_reset (ogg_t * ogg)
+{
+ int i;
+
+ for (i = 0; i < ogg->nstreams; i++){
+ ogg_stream_t *os = ogg->streams + i;
+ os->bufpos = 0;
+ os->pstart = 0;
+ os->psize = 0;
+ os->granule = -1;
+ os->lastgp = -1;
+ os->nsegs = 0;
+ os->segp = 0;
+ }
+
+ ogg->curidx = -1;
+
+ return 0;
+}
+
+static ogg_codec_t *
+ogg_find_codec (uint8_t * buf, int size)
+{
+ int i;
+
+ for (i = 0; ogg_codecs[i]; i++)
+ if (size >= ogg_codecs[i]->magicsize &&
+ !memcmp (buf, ogg_codecs[i]->magic, ogg_codecs[i]->magicsize))
+ return ogg_codecs[i];
+
+ return NULL;
+}
+
+static int
+ogg_find_stream (ogg_t * ogg, int serial)
+{
+ int i;
+
+ for (i = 0; i < ogg->nstreams; i++)
+ if (ogg->streams[i].serial == serial)
+ return i;
+
+ return -1;
+}
+
+static int
+ogg_new_stream (AVFormatContext * s, uint32_t serial)
+{
+
+ ogg_t *ogg = s->priv_data;
+ int idx = ogg->nstreams++;
+ AVStream *st;
+ ogg_stream_t *os;
+
+ ogg->streams = av_realloc (ogg->streams,
+ ogg->nstreams * sizeof (*ogg->streams));
+ memset (ogg->streams + idx, 0, sizeof (*ogg->streams));
+ os = ogg->streams + idx;
+ os->serial = serial;
+ os->bufsize = DECODER_BUFFER_SIZE;
+ os->buf = av_malloc(os->bufsize);
+ os->header = -1;
+
+ st = av_new_stream (s, idx);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 64, 1, 1000000);
+
+ return idx;
+}
+
+static int
+ogg_new_buf(ogg_t *ogg, int idx)
+{
+ ogg_stream_t *os = ogg->streams + idx;
+ uint8_t *nb = av_malloc(os->bufsize);
+ int size = os->bufpos - os->pstart;
+ if(os->buf){
+ memcpy(nb, os->buf + os->pstart, size);
+ av_free(os->buf);
+ }
+ os->buf = nb;
+ os->bufpos = size;
+ os->pstart = 0;
+
+ return 0;
+}
+
+static int
+ogg_read_page (AVFormatContext * s, int *str)
+{
+ ByteIOContext *bc = &s->pb;
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os;
+ int i = 0;
+ int flags, nsegs;
+ uint64_t gp;
+ uint32_t serial;
+ uint32_t seq;
+ uint32_t crc;
+ int size, idx;
+ uint8_t sync[4];
+ int sp = 0;
+
+ if (get_buffer (bc, sync, 4) < 4)
+ return -1;
+
+ do{
+ int c;
+
+ if (sync[sp & 3] == 'O' &&
+ sync[(sp + 1) & 3] == 'g' &&
+ sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S')
+ break;
+
+ c = url_fgetc (bc);
+ if (c < 0)
+ return -1;
+ sync[sp++ & 3] = c;
+ }while (i++ < MAX_PAGE_SIZE);
+
+ if (i >= MAX_PAGE_SIZE){
+ av_log (s, AV_LOG_INFO, "ogg, can't find sync word\n");
+ return -1;
+ }
+
+ if (url_fgetc (bc) != 0) /* version */
+ return -1;
+
+ flags = url_fgetc (bc);
+ gp = get_le64 (bc);
+ serial = get_le32 (bc);
+ seq = get_le32 (bc);
+ crc = get_le32 (bc);
+ nsegs = url_fgetc (bc);
+
+ idx = ogg_find_stream (ogg, serial);
+ if (idx < 0){
+ idx = ogg_new_stream (s, serial);
+ if (idx < 0)
+ return -1;
+ }
+
+ os = ogg->streams + idx;
+
+ if(os->psize > 0)
+ ogg_new_buf(ogg, idx);
+
+ if (get_buffer (bc, os->segments, nsegs) < nsegs)
+ return -1;
+
+ os->nsegs = nsegs;
+ os->segp = 0;
+
+ size = 0;
+ for (i = 0; i < nsegs; i++)
+ size += os->segments[i];
+
+ if (flags & OGG_FLAG_CONT){
+ if (!os->psize){
+ while (os->segp < os->nsegs){
+ int seg = os->segments[os->segp++];
+ os->pstart += seg;
+ if (seg < 255)
+ break;
+ }
+ }
+ }else{
+ os->psize = 0;
+ }
+
+ if (os->bufsize - os->bufpos < size){
+ uint8_t *nb = av_malloc (os->bufsize *= 2);
+ memcpy (nb, os->buf, os->bufpos);
+ av_free (os->buf);
+ os->buf = nb;
+ }
+
+ if (get_buffer (bc, os->buf + os->bufpos, size) < size)
+ return -1;
+
+ os->lastgp = os->granule;
+ os->bufpos += size;
+ os->granule = gp;
+ os->flags = flags;
+
+ if (str)
+ *str = idx;
+
+ return 0;
+}
+
+static int
+ogg_packet (AVFormatContext * s, int *str, int *dstart, int *dsize)
+{
+ ogg_t *ogg = s->priv_data;
+ int idx;
+ ogg_stream_t *os;
+ int complete = 0;
+ int segp = 0, psize = 0;
+
+#if 0
+ av_log (s, AV_LOG_DEBUG, "ogg_packet: curidx=%i\n", ogg->curidx);
+#endif
+
+ do{
+ idx = ogg->curidx;
+
+ while (idx < 0){
+ if (ogg_read_page (s, &idx) < 0)
+ return -1;
+ }
+
+ os = ogg->streams + idx;
+
+#if 0
+ av_log (s, AV_LOG_DEBUG,
+ "ogg_packet: idx=%d pstart=%d psize=%d segp=%d nsegs=%d\n",
+ idx, os->pstart, os->psize, os->segp, os->nsegs);
+#endif
+
+ if (!os->codec){
+ if (os->header < 0){
+ os->codec = ogg_find_codec (os->buf, os->bufpos);
+ if (!os->codec){
+ os->header = 0;
+ return 0;
+ }
+ }else{
+ return 0;
+ }
+ }
+
+ segp = os->segp;
+ psize = os->psize;
+
+ while (os->segp < os->nsegs){
+ int ss = os->segments[os->segp++];
+ os->psize += ss;
+ if (ss < 255){
+ complete = 1;
+ break;
+ }
+ }
+
+ if (!complete && os->segp == os->nsegs){
+ ogg->curidx = -1;
+ }
+ }while (!complete);
+
+#if 0
+ av_log (s, AV_LOG_DEBUG,
+ "ogg_packet: idx %i, frame size %i, start %i\n",
+ idx, os->psize, os->pstart);
+#endif
+
+ ogg->curidx = idx;
+
+ if (os->header < 0){
+ int hdr = os->codec->header (s, idx);
+ if (!hdr){
+ os->header = os->seq;
+ os->segp = segp;
+ os->psize = psize;
+ ogg->headers = 1;
+ }else{
+ os->pstart += os->psize;
+ os->psize = 0;
+ }
+ }
+
+ if (os->header > -1 && os->seq > os->header){
+ if (os->codec && os->codec->packet)
+ os->codec->packet (s, idx);
+ if (str)
+ *str = idx;
+ if (dstart)
+ *dstart = os->pstart;
+ if (dsize)
+ *dsize = os->psize;
+ os->pstart += os->psize;
+ os->psize = 0;
+ }
+
+ os->seq++;
+ if (os->segp == os->nsegs)
+ ogg->curidx = -1;
+
+ return 0;
+}
+
+static int
+ogg_get_headers (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+
+ do{
+ if (ogg_packet (s, NULL, NULL, NULL) < 0)
+ return -1;
+ }while (!ogg->headers);
+
+#if 0
+ av_log (s, AV_LOG_DEBUG, "found headers\n");
+#endif
+
+ return 0;
+}
+
+static uint64_t
+ogg_gptopts (AVFormatContext * s, int i, uint64_t gp)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + i;
+ uint64_t pts = AV_NOPTS_VALUE;
+
+ if(os->codec->gptopts){
+ pts = os->codec->gptopts(s, i, gp);
+ } else {
+ pts = gp;
+ }
+
+ return pts;
+}
+
+
+static int
+ogg_get_length (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+ int idx = -1, i;
+ offset_t size, end;
+
+ if(s->pb.is_streamed)
+ return 0;
+
+// already set
+ if (s->duration != AV_NOPTS_VALUE)
+ return 0;
+
+ size = url_fsize(&s->pb);
+ if(size < 0)
+ return 0;
+ end = size > MAX_PAGE_SIZE? size - MAX_PAGE_SIZE: size;
+
+ ogg_save (s);
+ url_fseek (&s->pb, end, SEEK_SET);
+
+ while (!ogg_read_page (s, &i)){
+ if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
+ idx = i;
+ }
+
+ if (idx != -1){
+ s->streams[idx]->duration =
+ ogg_gptopts (s, idx, ogg->streams[idx].granule);
+ }
+
+ ogg->size = size;
+ ogg_restore (s, 0);
+ ogg_save (s);
+ while (!ogg_read_page (s, &i)) {
+ if (i == idx && ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
+ break;
+ }
+ if (i == idx) {
+ s->streams[idx]->start_time = ogg_gptopts (s, idx, ogg->streams[idx].granule);
+ s->streams[idx]->duration -= s->streams[idx]->start_time;
+ }
+ ogg_restore (s, 0);
+
+ return 0;
+}
+
+
+static int
+ogg_read_header (AVFormatContext * s, AVFormatParameters * ap)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg->curidx = -1;
+ //linear headers seek from start
+ if (ogg_get_headers (s) < 0){
+ return -1;
+ }
+
+ //linear granulepos seek from end
+ ogg_get_length (s);
+
+ //fill the extradata in the per codec callbacks
+ return 0;
+}
+
+
+static int
+ogg_read_packet (AVFormatContext * s, AVPacket * pkt)
+{
+ ogg_t *ogg;
+ ogg_stream_t *os;
+ int idx = -1;
+ int pstart, psize;
+
+ //Get an ogg packet
+ do{
+ if (ogg_packet (s, &idx, &pstart, &psize) < 0)
+ return AVERROR_IO;
+ }while (idx < 0 || !s->streams[idx]);
+
+ ogg = s->priv_data;
+ os = ogg->streams + idx;
+
+ //Alloc a pkt
+ if (av_new_packet (pkt, psize) < 0)
+ return AVERROR_IO;
+ pkt->stream_index = idx;
+ memcpy (pkt->data, os->buf + pstart, psize);
+ if (os->lastgp != -1LL){
+ pkt->pts = ogg_gptopts (s, idx, os->lastgp);
+ os->lastgp = -1;
+ }
+
+ return psize;
+}
+
+
+static int
+ogg_read_close (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+ int i;
+
+ for (i = 0; i < ogg->nstreams; i++){
+ av_free (ogg->streams[i].buf);
+ av_free (ogg->streams[i].private);
+ }
+ av_free (ogg->streams);
+ return 0;
+}
+
+
+static int
+ogg_read_seek (AVFormatContext * s, int stream_index, int64_t target_ts,
+ int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ ogg_t *ogg = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ uint64_t min = 0, max = ogg->size;
+ uint64_t tmin = st->start_time, tmax = st->start_time + st->duration;
+ int64_t pts = AV_NOPTS_VALUE;
+
+ ogg_save (s);
+
+ if ((uint64_t)target_ts < tmin || target_ts < 0)
+ target_ts = tmin;
+ while (min <= max && tmin < tmax){
+ uint64_t p = min + (max - min) * (target_ts - tmin) / (tmax - tmin);
+ int i = -1;
+
+ url_fseek (bc, p, SEEK_SET);
+
+ while (!ogg_read_page (s, &i)){
+ if (i == stream_index && ogg->streams[i].granule != 0 &&
+ ogg->streams[i].granule != -1)
+ break;
+ }
+
+ if (i == -1)
+ break;
+
+ pts = ogg_gptopts (s, i, ogg->streams[i].granule);
+ p = url_ftell (bc);
+
+ if (FFABS (pts - target_ts) * st->time_base.num < st->time_base.den)
+ break;
+
+ if (pts > target_ts){
+ if (max == p && tmax == pts) {
+ // probably our tmin is wrong, causing us to always end up too late in the file
+ tmin = (target_ts + tmin + 1) / 2;
+ if (tmin == target_ts) {
+ url_fseek(bc, min, SEEK_SET);
+ break;
+ }
+ }
+ max = p;
+ tmax = pts;
+ }else{
+ if (min == p && tmin == pts) {
+ // probably our tmax is wrong, causing us to always end up too early in the file
+ tmax = (target_ts + tmax) / 2;
+ if (tmax == target_ts) {
+ url_fseek(bc, max, SEEK_SET);
+ break;
+ }
+ }
+ min = p;
+ tmin = pts;
+ }
+ }
+
+ if (FFABS (pts - target_ts) * st->time_base.num < st->time_base.den){
+ ogg_restore (s, 1);
+ ogg_reset (ogg);
+ }else{
+ ogg_restore (s, 0);
+ pts = AV_NOPTS_VALUE;
+ }
+
+ av_update_cur_dts(s, st, pts);
+ return 0;
+
+#if 0
+ //later...
+ int64_t pos;
+ if (av_seek_frame_binary (s, stream_index, target_ts, flags) < 0)
+ return -1;
+ pos = url_ftell (&s->pb);
+ ogg_read_timestamp (s, stream_index, &pos, pos - 1);
+#endif
+
+}
+
+#if 0
+static int64_t
+ogg_read_timestamp (AVFormatContext * s, int stream_index, int64_t * pos_arg,
+ int64_t pos_limit)
+{
+ ogg_t *ogg = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos, pts;
+
+ if (*pos_arg < 0)
+ return AV_NOPTS_VALUE;
+
+ pos = *pos_arg;
+}
+#endif
+
+static int ogg_probe(AVProbeData *p)
+{
+ if (p->buf_size < 6)
+ return 0;
+ if (p->buf[0] == 'O' && p->buf[1] == 'g' &&
+ p->buf[2] == 'g' && p->buf[3] == 'S' &&
+ p->buf[4] == 0x0 && p->buf[5] <= 0x7 )
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVInputFormat ogg_demuxer = {
+ "ogg",
+ "Ogg",
+ sizeof (ogg_t),
+ ogg_probe,
+ ogg_read_header,
+ ogg_read_packet,
+ ogg_read_close,
+ ogg_read_seek,
+// ogg_read_timestamp,
+ .extensions = "ogg",
+};
diff --git a/contrib/ffmpeg/libavformat/ogg2.h b/contrib/ffmpeg/libavformat/ogg2.h
new file mode 100644
index 000000000..dd6f24aab
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ogg2.h
@@ -0,0 +1,85 @@
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#ifndef OGG_H
+#define OGG_H
+
+#include "avformat.h"
+
+typedef struct ogg_codec {
+ int8_t *magic;
+ uint8_t magicsize;
+ int8_t *name;
+ int (*header)(AVFormatContext *, int);
+ int (*packet)(AVFormatContext *, int);
+ uint64_t (*gptopts)(AVFormatContext *, int, uint64_t);
+} ogg_codec_t;
+
+typedef struct ogg_stream {
+ uint8_t *buf;
+ unsigned int bufsize;
+ unsigned int bufpos;
+ unsigned int pstart;
+ unsigned int psize;
+ uint32_t serial;
+ uint32_t seq;
+ uint64_t granule, lastgp;
+ int flags;
+ ogg_codec_t *codec;
+ int header;
+ int nsegs, segp;
+ uint8_t segments[255];
+ void *private;
+} ogg_stream_t;
+
+typedef struct ogg_state {
+ uint64_t pos;
+ int curidx;
+ struct ogg_state *next;
+ ogg_stream_t streams[1];
+} ogg_state_t;
+
+typedef struct ogg {
+ ogg_stream_t *streams;
+ int nstreams;
+ int headers;
+ int curidx;
+ uint64_t size;
+ ogg_state_t *state;
+} ogg_t;
+
+#define OGG_FLAG_CONT 1
+#define OGG_FLAG_BOS 2
+#define OGG_FLAG_EOS 4
+
+extern ogg_codec_t vorbis_codec;
+extern ogg_codec_t theora_codec;
+extern ogg_codec_t flac_codec;
+extern ogg_codec_t ogm_video_codec;
+extern ogg_codec_t ogm_audio_codec;
+extern ogg_codec_t ogm_old_codec;
+
+extern int vorbis_comment(AVFormatContext *ms, uint8_t *buf, int size);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/oggparseflac.c b/contrib/ffmpeg/libavformat/oggparseflac.c
new file mode 100644
index 000000000..8960088d8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparseflac.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2005 Matthieu CASTET
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "ogg2.h"
+
+#define FLAC_STREAMINFO_SIZE 0x22
+
+static int
+flac_header (AVFormatContext * s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ GetBitContext gb;
+ int mdt;
+
+ if (os->buf[os->pstart] == 0xff)
+ return 0;
+
+ init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
+ get_bits(&gb, 1); /* metadata_last */
+ mdt = get_bits(&gb, 7);
+
+ if (mdt == 0x7f) {
+ skip_bits(&gb, 4*8); /* "FLAC" */
+ if(get_bits(&gb, 8) != 1) /* unsupported major version */
+ return -1;
+ skip_bits(&gb, 8 + 16); /* minor version + header count */
+ skip_bits(&gb, 4*8); /* "fLaC" */
+
+ /* METADATA_BLOCK_HEADER */
+ if (get_bits(&gb, 32) != FLAC_STREAMINFO_SIZE)
+ return -1;
+
+ skip_bits(&gb, 16*2+24*2);
+
+ st->codec->sample_rate = get_bits_long(&gb, 20);
+ st->codec->channels = get_bits(&gb, 3) + 1;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_FLAC;
+
+ st->codec->extradata =
+ av_malloc(FLAC_STREAMINFO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy (st->codec->extradata, os->buf + os->pstart + 5 + 4 + 4 + 4,
+ FLAC_STREAMINFO_SIZE);
+ st->codec->extradata_size = FLAC_STREAMINFO_SIZE;
+
+ st->time_base.num = 1;
+ st->time_base.den = st->codec->sample_rate;
+ } else if (mdt == 4) {
+ vorbis_comment (s, os->buf + os->pstart + 4, os->psize - 4);
+ }
+
+ return 1;
+}
+
+ogg_codec_t flac_codec = {
+ .magic = "\177FLAC",
+ .magicsize = 5,
+ .header = flac_header
+};
diff --git a/contrib/ffmpeg/libavformat/oggparseogm.c b/contrib/ffmpeg/libavformat/oggparseogm.c
new file mode 100644
index 000000000..8788e5d41
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparseogm.c
@@ -0,0 +1,166 @@
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "bswap.h"
+#include "ogg2.h"
+#include "riff.h"
+
+static int
+ogm_header(AVFormatContext *s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ uint8_t *p = os->buf + os->pstart;
+ uint64_t time_unit;
+ uint64_t spu;
+ uint32_t default_len;
+
+ if(!(*p & 1))
+ return 0;
+ if(*p != 1)
+ return 1;
+
+ p++;
+
+ if(*p == 'v'){
+ int tag;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ p += 8;
+ tag = le2me_32(unaligned32(p));
+ st->codec->codec_id = codec_get_bmp_id(tag);
+ st->codec->codec_tag = tag;
+ } else {
+ int cid;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ p += 8;
+ p[4] = 0;
+ cid = strtol(p, NULL, 16);
+ st->codec->codec_id = codec_get_wav_id(cid);
+ }
+
+ p += 4;
+ p += 4; /* useless size field */
+
+ time_unit = le2me_64(unaligned64(p));
+ p += 8;
+ spu = le2me_64(unaligned64(p));
+ p += 8;
+ default_len = le2me_32(unaligned32(p));
+ p += 4;
+
+ p += 8; /* buffersize + bits_per_sample */
+
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+ st->codec->width = le2me_32(unaligned32(p));
+ p += 4;
+ st->codec->height = le2me_32(unaligned32(p));
+ st->codec->time_base.den = spu * 10000000;
+ st->codec->time_base.num = time_unit;
+ st->time_base = st->codec->time_base;
+ } else {
+ st->codec->channels = le2me_16(unaligned16(p));
+ p += 2;
+ p += 2; /* block_align */
+ st->codec->bit_rate = le2me_32(unaligned32(p)) * 8;
+ st->codec->sample_rate = spu * 10000000 / time_unit;
+ st->time_base.num = 1;
+ st->time_base.den = st->codec->sample_rate;
+ }
+
+ return 1;
+}
+
+static int
+ogm_dshow_header(AVFormatContext *s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ uint8_t *p = os->buf + os->pstart;
+ uint32_t t;
+
+ if(!(*p & 1))
+ return 0;
+ if(*p != 1)
+ return 1;
+
+ t = le2me_32(unaligned32(p + 96));
+
+ if(t == 0x05589f80){
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = codec_get_bmp_id(le2me_32(unaligned32(p + 68)));
+ st->codec->time_base.den = 10000000;
+ st->codec->time_base.num = le2me_64(unaligned64(p + 164));
+ st->codec->width = le2me_32(unaligned32(p + 176));
+ st->codec->height = le2me_32(unaligned32(p + 180));
+ } else if(t == 0x05589f81){
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = codec_get_wav_id(le2me_16(unaligned16(p+124)));
+ st->codec->channels = le2me_16(unaligned16(p + 126));
+ st->codec->sample_rate = le2me_32(unaligned32(p + 128));
+ st->codec->bit_rate = le2me_32(unaligned32(p + 132)) * 8;
+ }
+
+ return 1;
+}
+
+static int
+ogm_packet(AVFormatContext *s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ uint8_t *p = os->buf + os->pstart;
+ int lb;
+
+ lb = ((*p & 2) << 1) | ((*p >> 6) & 3);
+ os->pstart += lb + 1;
+ os->psize -= lb + 1;
+
+ return 0;
+}
+
+ogg_codec_t ogm_video_codec = {
+ .magic = "\001video",
+ .magicsize = 6,
+ .header = ogm_header,
+ .packet = ogm_packet
+};
+
+ogg_codec_t ogm_audio_codec = {
+ .magic = "\001audio",
+ .magicsize = 6,
+ .header = ogm_header,
+ .packet = ogm_packet
+};
+
+ogg_codec_t ogm_old_codec = {
+ .magic = "\001Direct Show Samples embedded in Ogg",
+ .magicsize = 35,
+ .header = ogm_dshow_header,
+ .packet = ogm_packet
+};
diff --git a/contrib/ffmpeg/libavformat/oggparsetheora.c b/contrib/ffmpeg/libavformat/oggparsetheora.c
new file mode 100644
index 000000000..9052bbbea
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparsetheora.c
@@ -0,0 +1,129 @@
+/**
+ Copyright (C) 2005 Matthieu CASTET, Alex Beregszaszi
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "bswap.h"
+#include "ogg2.h"
+
+typedef struct theora_params {
+ int gpshift;
+ int gpmask;
+} theora_params_t;
+
+static int
+theora_header (AVFormatContext * s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ theora_params_t *thp = os->private;
+ int cds = st->codec->extradata_size + os->psize + 2;
+ uint8_t *cdp;
+
+ if(!(os->buf[os->pstart] & 0x80))
+ return 0;
+
+ if(!thp){
+ thp = av_mallocz(sizeof(*thp));
+ os->private = thp;
+ }
+
+ if (os->buf[os->pstart] == 0x80) {
+ GetBitContext gb;
+ int version;
+
+ init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
+
+ skip_bits(&gb, 7*8); /* 0x80"theora" */
+
+ version = get_bits(&gb, 8) << 16;
+ version |= get_bits(&gb, 8) << 8;
+ version |= get_bits(&gb, 8);
+
+ if (version < 0x030100)
+ {
+ av_log(s, AV_LOG_ERROR,
+ "Too old or unsupported Theora (%x)\n", version);
+ return -1;
+ }
+
+ st->codec->width = get_bits(&gb, 16) << 4;
+ st->codec->height = get_bits(&gb, 16) << 4;
+
+ if (version >= 0x030400)
+ skip_bits(&gb, 164);
+ else if (version >= 0x030200)
+ skip_bits(&gb, 64);
+ st->codec->time_base.den = get_bits(&gb, 32);
+ st->codec->time_base.num = get_bits(&gb, 32);
+ st->time_base = st->codec->time_base;
+
+ st->codec->sample_aspect_ratio.num = get_bits(&gb, 24);
+ st->codec->sample_aspect_ratio.den = get_bits(&gb, 24);
+
+ if (version >= 0x030200)
+ skip_bits(&gb, 38);
+ if (version >= 0x304000)
+ skip_bits(&gb, 2);
+
+ thp->gpshift = get_bits(&gb, 5);
+ thp->gpmask = (1 << thp->gpshift) - 1;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_THEORA;
+
+ } else if (os->buf[os->pstart] == 0x83) {
+ vorbis_comment (s, os->buf + os->pstart + 7, os->psize - 8);
+ }
+
+ st->codec->extradata = av_realloc (st->codec->extradata, cds);
+ cdp = st->codec->extradata + st->codec->extradata_size;
+ *cdp++ = os->psize >> 8;
+ *cdp++ = os->psize & 0xff;
+ memcpy (cdp, os->buf + os->pstart, os->psize);
+ st->codec->extradata_size = cds;
+
+ return 1;
+}
+
+static uint64_t
+theora_gptopts(AVFormatContext *ctx, int idx, uint64_t gp)
+{
+ ogg_t *ogg = ctx->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ theora_params_t *thp = os->private;
+ uint64_t iframe = gp >> thp->gpshift;
+ uint64_t pframe = gp & thp->gpmask;
+
+ return iframe + pframe;
+}
+
+ogg_codec_t theora_codec = {
+ .magic = "\200theora",
+ .magicsize = 7,
+ .header = theora_header,
+ .gptopts = theora_gptopts
+};
diff --git a/contrib/ffmpeg/libavformat/oggparsevorbis.c b/contrib/ffmpeg/libavformat/oggparsevorbis.c
new file mode 100644
index 000000000..5de221cb4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparsevorbis.c
@@ -0,0 +1,205 @@
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "bswap.h"
+#include "ogg2.h"
+
+extern int
+vorbis_comment (AVFormatContext * as, uint8_t *buf, int size)
+{
+ char *p = buf;
+ int s, n, j;
+
+ if (size < 4)
+ return -1;
+
+ s = le2me_32 (unaligned32 (p));
+ p += 4;
+ size -= 4;
+
+ if (size < s + 4)
+ return -1;
+
+ p += s;
+ size -= s;
+
+ n = le2me_32 (unaligned32 (p));
+ p += 4;
+ size -= 4;
+
+ while (size >= 4){
+ char *t, *v;
+ int tl, vl;
+
+ s = le2me_32 (unaligned32 (p));
+ p += 4;
+ size -= 4;
+
+ if (size < s)
+ break;
+
+ t = p;
+ p += s;
+ size -= s;
+ n--;
+
+ v = memchr (t, '=', s);
+ if (!v)
+ continue;
+
+ tl = v - t;
+ vl = s - tl - 1;
+ v++;
+
+ if (tl && vl){
+ char tt[tl + 1];
+ char ct[vl + 1];
+
+ for (j = 0; j < tl; j++)
+ tt[j] = toupper (t[j]);
+ tt[tl] = 0;
+
+ memcpy (ct, v, vl);
+ ct[vl] = 0;
+
+ // took from Vorbis_I_spec
+ if (!strcmp (tt, "AUTHOR"))
+ strncpy (as->author, ct, FFMIN(sizeof (as->author), vl));
+ else if (!strcmp (tt, "TITLE"))
+ strncpy (as->title, ct, FFMIN(sizeof (as->title), vl));
+ else if (!strcmp (tt, "COPYRIGHT"))
+ strncpy (as->copyright, ct, FFMIN(sizeof (as->copyright), vl));
+ else if (!strcmp (tt, "DESCRIPTION"))
+ strncpy (as->comment, ct, FFMIN(sizeof (as->comment), vl));
+ else if (!strcmp (tt, "GENRE"))
+ strncpy (as->genre, ct, FFMIN(sizeof (as->genre), vl));
+ else if (!strcmp (tt, "TRACKNUMBER"))
+ as->track = atoi (ct);
+ //Too bored to add others for today
+ }
+ }
+
+ if (size > 0)
+ av_log (as, AV_LOG_INFO, "%i bytes of comment header remain\n", size);
+ if (n > 0)
+ av_log (as, AV_LOG_INFO,
+ "truncated comment header, %i comments not found\n", n);
+
+ return 0;
+}
+
+
+/** Parse the vorbis header
+ * Vorbis Identification header from Vorbis_I_spec.html#vorbis-spec-codec
+ * [vorbis_version] = read 32 bits as unsigned integer | Not used
+ * [audio_channels] = read 8 bit integer as unsigned | Used
+ * [audio_sample_rate] = read 32 bits as unsigned integer | Used
+ * [bitrate_maximum] = read 32 bits as signed integer | Not used yet
+ * [bitrate_nominal] = read 32 bits as signed integer | Not used yet
+ * [bitrate_minimum] = read 32 bits as signed integer | Used as bitrate
+ * [blocksize_0] = read 4 bits as unsigned integer | Not Used
+ * [blocksize_1] = read 4 bits as unsigned integer | Not Used
+ * [framing_flag] = read one bit | Not Used
+ * */
+
+typedef struct {
+ unsigned int len[3];
+ unsigned char *packet[3];
+} oggvorbis_private_t;
+
+
+static unsigned int
+fixup_vorbis_headers(AVFormatContext * as, oggvorbis_private_t *priv,
+ void **buf)
+{
+ int i,offset, len;
+ unsigned char *ptr;
+
+ len = priv->len[0] + priv->len[1] + priv->len[2];
+ ptr = *buf = av_mallocz(len + len/255 + 64);
+
+ ptr[0] = 2;
+ offset = 1;
+ offset += av_xiphlacing(&ptr[offset], priv->len[0]);
+ offset += av_xiphlacing(&ptr[offset], priv->len[1]);
+ for(i = 0; i < 3; i++) {
+ memcpy(&ptr[offset], priv->packet[i], priv->len[i]);
+ offset += priv->len[i];
+ }
+ *buf = av_realloc(*buf, offset);
+ return offset;
+}
+
+
+static int
+vorbis_header (AVFormatContext * s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ oggvorbis_private_t *priv;
+
+ if (os->seq > 2)
+ return 0;
+
+ if(os->seq == 0) {
+ os->private = av_mallocz(sizeof(oggvorbis_private_t));
+ if(!os->private)
+ return 0;
+ }
+
+ priv = os->private;
+ priv->len[os->seq] = os->psize;
+ priv->packet[os->seq] = av_mallocz(os->psize);
+ memcpy(priv->packet[os->seq], os->buf + os->pstart, os->psize);
+ if (os->buf[os->pstart] == 1) {
+ uint8_t *p = os->buf + os->pstart + 11; //skip up to the audio channels
+ st->codec->channels = *p++;
+ st->codec->sample_rate = le2me_32 (unaligned32 (p));
+ p += 8; //skip maximum and and nominal bitrate
+ st->codec->bit_rate = le2me_32 (unaligned32 (p)); //Minimum bitrate
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_VORBIS;
+
+ st->time_base.num = 1;
+ st->time_base.den = st->codec->sample_rate;
+ } else if (os->buf[os->pstart] == 3) {
+ vorbis_comment (s, os->buf + os->pstart + 7, os->psize - 8);
+ } else {
+ st->codec->extradata_size =
+ fixup_vorbis_headers(s, priv, &st->codec->extradata);
+ }
+
+ return os->seq < 3;
+}
+
+ogg_codec_t vorbis_codec = {
+ .magic = "\001vorbis",
+ .magicsize = 7,
+ .header = vorbis_header
+};
diff --git a/contrib/ffmpeg/libavformat/os_support.c b/contrib/ffmpeg/libavformat/os_support.c
new file mode 100644
index 000000000..a66c867f0
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/os_support.c
@@ -0,0 +1,96 @@
+/*
+ * Various utilities for ffmpeg system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * copyright (c) 2002 Francois Revol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "config.h"
+#include "avformat.h"
+#if defined(CONFIG_WINCE)
+/* Skip includes on WinCE. */
+#elif defined(__MINGW32__)
+#include <sys/types.h>
+#include <sys/timeb.h>
+#elif defined(CONFIG_OS2)
+#include <string.h>
+#include <sys/time.h>
+#else
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/time.h>
+#endif
+#include <time.h>
+
+/**
+ * gets the current time in micro seconds.
+ */
+int64_t av_gettime(void)
+{
+#if defined(CONFIG_WINCE)
+ return timeGetTime() * int64_t_C(1000);
+#elif defined(__MINGW32__)
+ struct timeb tb;
+ _ftime(&tb);
+ return ((int64_t)tb.time * int64_t_C(1000) + (int64_t)tb.millitm) * int64_t_C(1000);
+#else
+ struct timeval tv;
+ gettimeofday(&tv,NULL);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+#endif
+}
+
+#if !defined(CONFIG_WINCE) && !defined(HAVE_LOCALTIME_R)
+struct tm *localtime_r(const time_t *t, struct tm *tp)
+{
+ struct tm *l;
+
+ l = localtime(t);
+ if (!l)
+ return 0;
+ *tp = *l;
+ return tp;
+}
+#endif /* !defined(CONFIG_WINCE) && !defined(HAVE_LOCALTIME_R) */
+
+#if !defined(HAVE_INET_ATON) && defined(CONFIG_NETWORK)
+#include <stdlib.h>
+#include <strings.h>
+#include "barpainet.h"
+
+int inet_aton (const char * str, struct in_addr * add)
+{
+ const char * pch = str;
+ unsigned int add1 = 0, add2 = 0, add3 = 0, add4 = 0;
+
+ add1 = atoi(pch);
+ pch = strpbrk(pch,".");
+ if (pch == 0 || ++pch == 0) goto done;
+ add2 = atoi(pch);
+ pch = strpbrk(pch,".");
+ if (pch == 0 || ++pch == 0) goto done;
+ add3 = atoi(pch);
+ pch = strpbrk(pch,".");
+ if (pch == 0 || ++pch == 0) goto done;
+ add4 = atoi(pch);
+
+done:
+ add->s_addr=(add4<<24)+(add3<<16)+(add2<<8)+add1;
+
+ return 1;
+}
+#endif /* !defined(HAVE_INET_ATON) && defined(CONFIG_NETWORK) */
diff --git a/contrib/ffmpeg/libavformat/os_support.h b/contrib/ffmpeg/libavformat/os_support.h
new file mode 100644
index 000000000..e76a9aaaf
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/os_support.h
@@ -0,0 +1,53 @@
+/*
+ * various utilities for ffmpeg system
+ * copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _OS_SUPPORT_H
+#define _OS_SUPPORT_H
+
+/**
+ * @file os_support.h
+ * miscellaneous OS support macros and functions.
+ *
+ * - usleep() (Win32, BeOS, OS/2)
+ * - floatf() (OS/2)
+ * - strcasecmp() (OS/2)
+ */
+
+#ifdef __MINGW32__
+__declspec(dllimport) void __stdcall Sleep(unsigned long dwMilliseconds);
+// # include <windows.h>
+# define usleep(t) Sleep((t) / 1000)
+#endif
+
+#ifdef __BEOS__
+# ifndef usleep
+# include <OS.h>
+# define usleep(t) snooze((bigtime_t)(t))
+# endif
+#endif
+
+#if defined(CONFIG_OS2)
+#include <stdlib.h>
+static inline int usleep(unsigned int t) { return _sleep2(t / 1000); }
+static inline int strcasecmp(const char* s1, const char* s2) { return stricmp(s1,s2); }
+#endif
+
+#endif /* _OS_SUPPORT_H */
diff --git a/contrib/ffmpeg/libavformat/png.c b/contrib/ffmpeg/libavformat/png.c
new file mode 100644
index 000000000..d62bf540a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/png.c
@@ -0,0 +1,889 @@
+/*
+ * PNG image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+/* TODO:
+ * - add 2, 4 and 16 bit depth support
+ * - use filters when generating a png (better compression)
+ */
+
+#ifdef CONFIG_ZLIB
+#include <zlib.h>
+
+//#define DEBUG
+
+#define PNG_COLOR_MASK_PALETTE 1
+#define PNG_COLOR_MASK_COLOR 2
+#define PNG_COLOR_MASK_ALPHA 4
+
+#define PNG_COLOR_TYPE_GRAY 0
+#define PNG_COLOR_TYPE_PALETTE (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)
+#define PNG_COLOR_TYPE_RGB (PNG_COLOR_MASK_COLOR)
+#define PNG_COLOR_TYPE_RGB_ALPHA (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_ALPHA)
+#define PNG_COLOR_TYPE_GRAY_ALPHA (PNG_COLOR_MASK_ALPHA)
+
+#define PNG_FILTER_VALUE_NONE 0
+#define PNG_FILTER_VALUE_SUB 1
+#define PNG_FILTER_VALUE_UP 2
+#define PNG_FILTER_VALUE_AVG 3
+#define PNG_FILTER_VALUE_PAETH 4
+
+#define PNG_IHDR 0x0001
+#define PNG_IDAT 0x0002
+#define PNG_ALLIMAGE 0x0004
+#define PNG_PLTE 0x0008
+
+#define NB_PASSES 7
+
+#define IOBUF_SIZE 4096
+
+typedef struct PNGDecodeState {
+ int state;
+ int width, height;
+ int bit_depth;
+ int color_type;
+ int compression_type;
+ int interlace_type;
+ int filter_type;
+ int channels;
+ int bits_per_pixel;
+ int bpp;
+
+ uint8_t *image_buf;
+ int image_linesize;
+ uint32_t palette[256];
+ uint8_t *crow_buf;
+ uint8_t *last_row;
+ uint8_t *tmp_row;
+ int pass;
+ int crow_size; /* compressed row size (include filter type) */
+ int row_size; /* decompressed row size */
+ int pass_row_size; /* decompress row size of the current pass */
+ int y;
+ z_stream zstream;
+} PNGDecodeState;
+
+static const uint8_t pngsig[8] = {137, 80, 78, 71, 13, 10, 26, 10};
+
+/* Mask to determine which y pixels are valid in a pass */
+static const uint8_t png_pass_ymask[NB_PASSES] = {
+ 0x80, 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55,
+};
+
+/* Mask to determine which y pixels can be written in a pass */
+static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
+ 0xff, 0xff, 0x0f, 0xcc, 0x33, 0xff, 0x55,
+};
+
+/* minimum x value */
+static const uint8_t png_pass_xmin[NB_PASSES] = {
+ 0, 4, 0, 2, 0, 1, 0
+};
+
+/* x shift to get row width */
+static const uint8_t png_pass_xshift[NB_PASSES] = {
+ 3, 3, 2, 2, 1, 1, 0
+};
+
+/* Mask to determine which pixels are valid in a pass */
+static const uint8_t png_pass_mask[NB_PASSES] = {
+ 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff
+};
+
+/* Mask to determine which pixels to overwrite while displaying */
+static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
+ 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
+};
+
+static int png_probe(AVProbeData *pd)
+{
+ if (pd->buf_size >= 8 &&
+ memcmp(pd->buf, pngsig, 8) == 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static void *png_zalloc(void *opaque, unsigned int items, unsigned int size)
+{
+ return av_malloc(items * size);
+}
+
+static void png_zfree(void *opaque, void *ptr)
+{
+ av_free(ptr);
+}
+
+static int png_get_nb_channels(int color_type)
+{
+ int channels;
+ channels = 1;
+ if ((color_type & (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)) ==
+ PNG_COLOR_MASK_COLOR)
+ channels = 3;
+ if (color_type & PNG_COLOR_MASK_ALPHA)
+ channels++;
+ return channels;
+}
+
+/* compute the row size of an interleaved pass */
+static int png_pass_row_size(int pass, int bits_per_pixel, int width)
+{
+ int shift, xmin, pass_width;
+
+ xmin = png_pass_xmin[pass];
+ if (width <= xmin)
+ return 0;
+ shift = png_pass_xshift[pass];
+ pass_width = (width - xmin + (1 << shift) - 1) >> shift;
+ return (pass_width * bits_per_pixel + 7) >> 3;
+}
+
+/* NOTE: we try to construct a good looking image at each pass. width
+ is the original image width. We also do pixel format convertion at
+ this stage */
+static void png_put_interlaced_row(uint8_t *dst, int width,
+ int bits_per_pixel, int pass,
+ int color_type, const uint8_t *src)
+{
+ int x, mask, dsp_mask, j, src_x, b, bpp;
+ uint8_t *d;
+ const uint8_t *s;
+
+ mask = png_pass_mask[pass];
+ dsp_mask = png_pass_dsp_mask[pass];
+ switch(bits_per_pixel) {
+ case 1:
+ /* we must intialize the line to zero before writing to it */
+ if (pass == 0)
+ memset(dst, 0, (width + 7) >> 3);
+ src_x = 0;
+ for(x = 0; x < width; x++) {
+ j = (x & 7);
+ if ((dsp_mask << j) & 0x80) {
+ b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
+ dst[x >> 3] |= b << (7 - j);
+ }
+ if ((mask << j) & 0x80)
+ src_x++;
+ }
+ break;
+ default:
+ bpp = bits_per_pixel >> 3;
+ d = dst;
+ s = src;
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((dsp_mask << j) & 0x80) {
+ *(uint32_t *)d = (s[3] << 24) | (s[0] << 16) | (s[1] << 8) | s[2];
+ }
+ d += bpp;
+ if ((mask << j) & 0x80)
+ s += bpp;
+ }
+ } else {
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((dsp_mask << j) & 0x80) {
+ memcpy(d, s, bpp);
+ }
+ d += bpp;
+ if ((mask << j) & 0x80)
+ s += bpp;
+ }
+ }
+ break;
+ }
+}
+
+static void png_get_interlaced_row(uint8_t *dst, int row_size,
+ int bits_per_pixel, int pass,
+ const uint8_t *src, int width)
+{
+ int x, mask, dst_x, j, b, bpp;
+ uint8_t *d;
+ const uint8_t *s;
+
+ mask = png_pass_mask[pass];
+ switch(bits_per_pixel) {
+ case 1:
+ memset(dst, 0, row_size);
+ dst_x = 0;
+ for(x = 0; x < width; x++) {
+ j = (x & 7);
+ if ((mask << j) & 0x80) {
+ b = (src[x >> 3] >> (7 - j)) & 1;
+ dst[dst_x >> 3] |= b << (7 - (dst_x & 7));
+ dst_x++;
+ }
+ }
+ break;
+ default:
+ bpp = bits_per_pixel >> 3;
+ d = dst;
+ s = src;
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((mask << j) & 0x80) {
+ memcpy(d, s, bpp);
+ d += bpp;
+ }
+ s += bpp;
+ }
+ break;
+ }
+}
+
+/* XXX: optimize */
+/* NOTE: 'dst' can be equal to 'last' */
+static void png_filter_row(uint8_t *dst, int filter_type,
+ uint8_t *src, uint8_t *last, int size, int bpp)
+{
+ int i, p;
+
+ switch(filter_type) {
+ case PNG_FILTER_VALUE_NONE:
+ memcpy(dst, src, size);
+ break;
+ case PNG_FILTER_VALUE_SUB:
+ for(i = 0; i < bpp; i++) {
+ dst[i] = src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ p = dst[i - bpp];
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_UP:
+ for(i = 0; i < size; i++) {
+ p = last[i];
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_AVG:
+ for(i = 0; i < bpp; i++) {
+ p = (last[i] >> 1);
+ dst[i] = p + src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ p = ((dst[i - bpp] + last[i]) >> 1);
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_PAETH:
+ for(i = 0; i < bpp; i++) {
+ p = last[i];
+ dst[i] = p + src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ int a, b, c, pa, pb, pc;
+
+ a = dst[i - bpp];
+ b = last[i];
+ c = last[i - bpp];
+
+ p = b - c;
+ pc = a - c;
+
+ pa = abs(p);
+ pb = abs(pc);
+ pc = abs(p + pc);
+
+ if (pa <= pb && pa <= pc)
+ p = a;
+ else if (pb <= pc)
+ p = b;
+ else
+ p = c;
+ dst[i] = p + src[i];
+ }
+ break;
+ }
+}
+
+static void convert_from_rgba32(uint8_t *dst, const uint8_t *src, int width)
+{
+ uint8_t *d;
+ int j;
+ unsigned int v;
+
+ d = dst;
+ for(j = 0; j < width; j++) {
+ v = ((const uint32_t *)src)[j];
+ d[0] = v >> 16;
+ d[1] = v >> 8;
+ d[2] = v;
+ d[3] = v >> 24;
+ d += 4;
+ }
+}
+
+static void convert_to_rgba32(uint8_t *dst, const uint8_t *src, int width)
+{
+ int j;
+ unsigned int r, g, b, a;
+
+ for(j = 0;j < width; j++) {
+ r = src[0];
+ g = src[1];
+ b = src[2];
+ a = src[3];
+ *(uint32_t *)dst = (a << 24) | (r << 16) | (g << 8) | b;
+ dst += 4;
+ src += 4;
+ }
+}
+
+/* process exactly one decompressed row */
+static void png_handle_row(PNGDecodeState *s)
+{
+ uint8_t *ptr, *last_row;
+ int got_line;
+
+ if (!s->interlace_type) {
+ ptr = s->image_buf + s->image_linesize * s->y;
+ /* need to swap bytes correctly for RGB_ALPHA */
+ if (s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ s->last_row, s->row_size, s->bpp);
+ memcpy(s->last_row, s->tmp_row, s->row_size);
+ convert_to_rgba32(ptr, s->tmp_row, s->width);
+ } else {
+ /* in normal case, we avoid one copy */
+ if (s->y == 0)
+ last_row = s->last_row;
+ else
+ last_row = ptr - s->image_linesize;
+
+ png_filter_row(ptr, s->crow_buf[0], s->crow_buf + 1,
+ last_row, s->row_size, s->bpp);
+ }
+ s->y++;
+ if (s->y == s->height) {
+ s->state |= PNG_ALLIMAGE;
+ }
+ } else {
+ got_line = 0;
+ for(;;) {
+ ptr = s->image_buf + s->image_linesize * s->y;
+ if ((png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
+ /* if we already read one row, it is time to stop to
+ wait for the next one */
+ if (got_line)
+ break;
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ s->last_row, s->pass_row_size, s->bpp);
+ memcpy(s->last_row, s->tmp_row, s->pass_row_size);
+ got_line = 1;
+ }
+ if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
+ /* NOTE: rgba32 is handled directly in png_put_interlaced_row */
+ png_put_interlaced_row(ptr, s->width, s->bits_per_pixel, s->pass,
+ s->color_type, s->last_row);
+ }
+ s->y++;
+ if (s->y == s->height) {
+ for(;;) {
+ if (s->pass == NB_PASSES - 1) {
+ s->state |= PNG_ALLIMAGE;
+ goto the_end;
+ } else {
+ s->pass++;
+ s->y = 0;
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
+ s->width);
+ s->crow_size = s->pass_row_size + 1;
+ if (s->pass_row_size != 0)
+ break;
+ /* skip pass if empty row */
+ }
+ }
+ }
+ }
+ the_end: ;
+ }
+}
+
+static int png_decode_idat(PNGDecodeState *s, ByteIOContext *f, int length)
+{
+ uint8_t buf[IOBUF_SIZE];
+ int buf_size;
+ int ret;
+ while (length > 0) {
+ /* read the buffer */
+ buf_size = IOBUF_SIZE;
+ if (buf_size > length)
+ buf_size = length;
+ ret = get_buffer(f, buf, buf_size);
+ if (ret != buf_size)
+ return -1;
+ s->zstream.avail_in = buf_size;
+ s->zstream.next_in = buf;
+ /* decode one line if possible */
+ while (s->zstream.avail_in > 0) {
+ ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
+ if (ret != Z_OK && ret != Z_STREAM_END) {
+ return -1;
+ }
+ if (s->zstream.avail_out == 0) {
+ if (!(s->state & PNG_ALLIMAGE)) {
+ png_handle_row(s);
+ }
+ s->zstream.avail_out = s->crow_size;
+ s->zstream.next_out = s->crow_buf;
+ }
+ }
+ length -= buf_size;
+ }
+ return 0;
+}
+
+static int png_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ AVImageInfo info1, *info = &info1;
+ PNGDecodeState s1, *s = &s1;
+ uint32_t tag, length;
+ int ret, crc;
+ uint8_t buf[8];
+
+ /* check signature */
+ ret = get_buffer(f, buf, 8);
+ if (ret != 8)
+ return -1;
+ if (memcmp(buf, pngsig, 8) != 0)
+ return -1;
+ memset(s, 0, sizeof(PNGDecodeState));
+ /* init the zlib */
+ s->zstream.zalloc = png_zalloc;
+ s->zstream.zfree = png_zfree;
+ s->zstream.opaque = NULL;
+ ret = inflateInit(&s->zstream);
+ if (ret != Z_OK)
+ return -1;
+ for(;;) {
+ if (url_feof(f))
+ goto fail;
+ length = get_be32(f);
+ if (length > 0x7fffffff)
+ goto fail;
+ tag = get_le32(f);
+#ifdef DEBUG
+ printf("png: tag=%c%c%c%c length=%u\n",
+ (tag & 0xff),
+ ((tag >> 8) & 0xff),
+ ((tag >> 16) & 0xff),
+ ((tag >> 24) & 0xff), length);
+#endif
+ switch(tag) {
+ case MKTAG('I', 'H', 'D', 'R'):
+ if (length != 13)
+ goto fail;
+ s->width = get_be32(f);
+ s->height = get_be32(f);
+ s->bit_depth = get_byte(f);
+ s->color_type = get_byte(f);
+ s->compression_type = get_byte(f);
+ s->filter_type = get_byte(f);
+ s->interlace_type = get_byte(f);
+ crc = get_be32(f);
+ s->state |= PNG_IHDR;
+#ifdef DEBUG
+ printf("width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
+ s->width, s->height, s->bit_depth, s->color_type,
+ s->compression_type, s->filter_type, s->interlace_type);
+#endif
+ break;
+ case MKTAG('I', 'D', 'A', 'T'):
+ if (!(s->state & PNG_IHDR))
+ goto fail;
+ if (!(s->state & PNG_IDAT)) {
+ /* init image info */
+ info->width = s->width;
+ info->height = s->height;
+ info->interleaved = (s->interlace_type != 0);
+
+ s->channels = png_get_nb_channels(s->color_type);
+ s->bits_per_pixel = s->bit_depth * s->channels;
+ s->bpp = (s->bits_per_pixel + 7) >> 3;
+ s->row_size = (info->width * s->bits_per_pixel + 7) >> 3;
+
+ if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_RGB) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ info->pix_fmt = PIX_FMT_RGBA32;
+ } else if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (s->bit_depth == 1 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ info->pix_fmt = PIX_FMT_MONOBLACK;
+ } else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
+ info->pix_fmt = PIX_FMT_PAL8;
+ } else {
+ goto fail;
+ }
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ goto the_end;
+
+ /* compute the compressed row size */
+ if (!s->interlace_type) {
+ s->crow_size = s->row_size + 1;
+ } else {
+ s->pass = 0;
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
+ s->width);
+ s->crow_size = s->pass_row_size + 1;
+ }
+#ifdef DEBUG
+ printf("row_size=%d crow_size =%d\n",
+ s->row_size, s->crow_size);
+#endif
+ s->image_buf = info->pict.data[0];
+ s->image_linesize = info->pict.linesize[0];
+ /* copy the palette if needed */
+ if (s->color_type == PNG_COLOR_TYPE_PALETTE)
+ memcpy(info->pict.data[1], s->palette, 256 * sizeof(uint32_t));
+ /* empty row is used if differencing to the first row */
+ s->last_row = av_mallocz(s->row_size);
+ if (!s->last_row)
+ goto fail;
+ if (s->interlace_type ||
+ s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ s->tmp_row = av_malloc(s->row_size);
+ if (!s->tmp_row)
+ goto fail;
+ }
+ /* compressed row */
+ s->crow_buf = av_malloc(s->row_size + 1);
+ if (!s->crow_buf)
+ goto fail;
+ s->zstream.avail_out = s->crow_size;
+ s->zstream.next_out = s->crow_buf;
+ }
+ s->state |= PNG_IDAT;
+ if (png_decode_idat(s, f, length) < 0)
+ goto fail;
+ /* skip crc */
+ crc = get_be32(f);
+ break;
+ case MKTAG('P', 'L', 'T', 'E'):
+ {
+ int n, i, r, g, b;
+
+ if ((length % 3) != 0 || length > 256 * 3)
+ goto skip_tag;
+ /* read the palette */
+ n = length / 3;
+ for(i=0;i<n;i++) {
+ r = get_byte(f);
+ g = get_byte(f);
+ b = get_byte(f);
+ s->palette[i] = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ for(;i<256;i++) {
+ s->palette[i] = (0xff << 24);
+ }
+ s->state |= PNG_PLTE;
+ crc = get_be32(f);
+ }
+ break;
+ case MKTAG('t', 'R', 'N', 'S'):
+ {
+ int v, i;
+
+ /* read the transparency. XXX: Only palette mode supported */
+ if (s->color_type != PNG_COLOR_TYPE_PALETTE ||
+ length > 256 ||
+ !(s->state & PNG_PLTE))
+ goto skip_tag;
+ for(i=0;i<length;i++) {
+ v = get_byte(f);
+ s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
+ }
+ crc = get_be32(f);
+ }
+ break;
+ case MKTAG('I', 'E', 'N', 'D'):
+ if (!(s->state & PNG_ALLIMAGE))
+ goto fail;
+ crc = get_be32(f);
+ goto exit_loop;
+ default:
+ /* skip tag */
+ skip_tag:
+ url_fskip(f, length + 4);
+ break;
+ }
+ }
+ exit_loop:
+ ret = 0;
+ the_end:
+ inflateEnd(&s->zstream);
+ av_free(s->crow_buf);
+ av_free(s->last_row);
+ av_free(s->tmp_row);
+ return ret;
+ fail:
+ ret = -1;
+ goto the_end;
+}
+
+static void png_write_chunk(ByteIOContext *f, uint32_t tag,
+ const uint8_t *buf, int length)
+{
+ uint32_t crc;
+ uint8_t tagbuf[4];
+
+ put_be32(f, length);
+ crc = crc32(0, Z_NULL, 0);
+ tagbuf[0] = tag;
+ tagbuf[1] = tag >> 8;
+ tagbuf[2] = tag >> 16;
+ tagbuf[3] = tag >> 24;
+ crc = crc32(crc, tagbuf, 4);
+ put_le32(f, tag);
+ if (length > 0) {
+ crc = crc32(crc, buf, length);
+ put_buffer(f, buf, length);
+ }
+ put_be32(f, crc);
+}
+
+/* XXX: use avcodec generic function ? */
+static void to_be32(uint8_t *p, uint32_t v)
+{
+ p[0] = v >> 24;
+ p[1] = v >> 16;
+ p[2] = v >> 8;
+ p[3] = v;
+}
+
+typedef struct PNGEncodeState {
+ ByteIOContext *f;
+ z_stream zstream;
+ uint8_t buf[IOBUF_SIZE];
+} PNGEncodeState;
+
+
+/* XXX: do filtering */
+static int png_write_row(PNGEncodeState *s, const uint8_t *data, int size)
+{
+ int ret;
+
+ s->zstream.avail_in = size;
+ s->zstream.next_in = (uint8_t *)data;
+ while (s->zstream.avail_in > 0) {
+ ret = deflate(&s->zstream, Z_NO_FLUSH);
+ if (ret != Z_OK)
+ return -1;
+ if (s->zstream.avail_out == 0) {
+ png_write_chunk(s->f, MKTAG('I', 'D', 'A', 'T'), s->buf, IOBUF_SIZE);
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ }
+ }
+ return 0;
+}
+
+static int png_write(ByteIOContext *f, AVImageInfo *info)
+{
+ PNGEncodeState s1, *s = &s1;
+ int bit_depth, color_type, y, len, row_size, ret, is_progressive;
+ int bits_per_pixel, pass_row_size;
+ uint8_t *ptr;
+ uint8_t *crow_buf = NULL;
+ uint8_t *tmp_buf = NULL;
+
+ s->f = f;
+ is_progressive = info->interleaved;
+ switch(info->pix_fmt) {
+ case PIX_FMT_RGBA32:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case PIX_FMT_RGB24:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_RGB;
+ break;
+ case PIX_FMT_GRAY8:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case PIX_FMT_MONOBLACK:
+ bit_depth = 1;
+ color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case PIX_FMT_PAL8:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_PALETTE;
+ break;
+ default:
+ return -1;
+ }
+ bits_per_pixel = png_get_nb_channels(color_type) * bit_depth;
+ row_size = (info->width * bits_per_pixel + 7) >> 3;
+
+ s->zstream.zalloc = png_zalloc;
+ s->zstream.zfree = png_zfree;
+ s->zstream.opaque = NULL;
+ ret = deflateInit2(&s->zstream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, 15, 8, Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ return -1;
+ crow_buf = av_malloc(row_size + 1);
+ if (!crow_buf)
+ goto fail;
+ if (is_progressive) {
+ tmp_buf = av_malloc(row_size + 1);
+ if (!tmp_buf)
+ goto fail;
+ }
+
+ /* write png header */
+ put_buffer(f, pngsig, 8);
+
+ to_be32(s->buf, info->width);
+ to_be32(s->buf + 4, info->height);
+ s->buf[8] = bit_depth;
+ s->buf[9] = color_type;
+ s->buf[10] = 0; /* compression type */
+ s->buf[11] = 0; /* filter type */
+ s->buf[12] = is_progressive; /* interlace type */
+
+ png_write_chunk(f, MKTAG('I', 'H', 'D', 'R'), s->buf, 13);
+
+ /* put the palette if needed */
+ if (color_type == PNG_COLOR_TYPE_PALETTE) {
+ int has_alpha, alpha, i;
+ unsigned int v;
+ uint32_t *palette;
+ uint8_t *alpha_ptr;
+
+ palette = (uint32_t *)info->pict.data[1];
+ ptr = s->buf;
+ alpha_ptr = s->buf + 256 * 3;
+ has_alpha = 0;
+ for(i = 0; i < 256; i++) {
+ v = palette[i];
+ alpha = v >> 24;
+ if (alpha != 0xff)
+ has_alpha = 1;
+ *alpha_ptr++ = alpha;
+ ptr[0] = v >> 16;
+ ptr[1] = v >> 8;
+ ptr[2] = v;
+ ptr += 3;
+ }
+ png_write_chunk(f, MKTAG('P', 'L', 'T', 'E'), s->buf, 256 * 3);
+ if (has_alpha) {
+ png_write_chunk(f, MKTAG('t', 'R', 'N', 'S'), s->buf + 256 * 3, 256);
+ }
+ }
+
+ /* now put each row */
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ if (is_progressive) {
+ uint8_t *ptr1;
+ int pass;
+
+ for(pass = 0; pass < NB_PASSES; pass++) {
+ /* NOTE: a pass is completely omited if no pixels would be
+ output */
+ pass_row_size = png_pass_row_size(pass, bits_per_pixel, info->width);
+ if (pass_row_size > 0) {
+ for(y = 0; y < info->height; y++) {
+ if ((png_pass_ymask[pass] << (y & 7)) & 0x80) {
+ ptr = info->pict.data[0] + y * info->pict.linesize[0];
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ convert_from_rgba32(tmp_buf, ptr, info->width);
+ ptr1 = tmp_buf;
+ } else {
+ ptr1 = ptr;
+ }
+ png_get_interlaced_row(crow_buf + 1, pass_row_size,
+ bits_per_pixel, pass,
+ ptr1, info->width);
+ crow_buf[0] = PNG_FILTER_VALUE_NONE;
+ png_write_row(s, crow_buf, pass_row_size + 1);
+ }
+ }
+ }
+ }
+ } else {
+ for(y = 0; y < info->height; y++) {
+ ptr = info->pict.data[0] + y * info->pict.linesize[0];
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
+ convert_from_rgba32(crow_buf + 1, ptr, info->width);
+ else
+ memcpy(crow_buf + 1, ptr, row_size);
+ crow_buf[0] = PNG_FILTER_VALUE_NONE;
+ png_write_row(s, crow_buf, row_size + 1);
+ }
+ }
+ /* compress last bytes */
+ for(;;) {
+ ret = deflate(&s->zstream, Z_FINISH);
+ if (ret == Z_OK || ret == Z_STREAM_END) {
+ len = IOBUF_SIZE - s->zstream.avail_out;
+ if (len > 0) {
+ png_write_chunk(f, MKTAG('I', 'D', 'A', 'T'), s->buf, len);
+ }
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ if (ret == Z_STREAM_END)
+ break;
+ } else {
+ goto fail;
+ }
+ }
+ png_write_chunk(f, MKTAG('I', 'E', 'N', 'D'), NULL, 0);
+
+ put_flush_packet(f);
+ ret = 0;
+ the_end:
+ av_free(crow_buf);
+ av_free(tmp_buf);
+ deflateEnd(&s->zstream);
+ return ret;
+ fail:
+ ret = -1;
+ goto the_end;
+}
+
+AVImageFormat png_image_format = {
+ "png",
+ "png",
+ png_probe,
+ png_read,
+ (1 << PIX_FMT_RGBA32) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_GRAY8) |
+ (1 << PIX_FMT_MONOBLACK) | (1 << PIX_FMT_PAL8),
+ png_write,
+ AVIMAGE_INTERLEAVED,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/pnm.c b/contrib/ffmpeg/libavformat/pnm.c
new file mode 100644
index 000000000..ade5d7c5d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/pnm.c
@@ -0,0 +1,478 @@
+/*
+ * PNM image format
+ * Copyright (c) 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static inline int pnm_space(int c)
+{
+ return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
+}
+
+static void pnm_get(ByteIOContext *f, char *str, int buf_size)
+{
+ char *s;
+ int c;
+
+ /* skip spaces and comments */
+ for(;;) {
+ c = url_fgetc(f);
+ if (c == '#') {
+ do {
+ c = url_fgetc(f);
+ } while (c != '\n' && c != URL_EOF);
+ } else if (!pnm_space(c)) {
+ break;
+ }
+ }
+
+ s = str;
+ while (c != URL_EOF && !pnm_space(c)) {
+ if ((s - str) < buf_size - 1)
+ *s++ = c;
+ c = url_fgetc(f);
+ }
+ *s = '\0';
+}
+
+static int pnm_read1(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque,
+ int allow_yuv)
+{
+ int i, n, linesize, h;
+ char buf1[32];
+ unsigned char *ptr;
+ AVImageInfo info1, *info = &info1;
+ int ret;
+
+ pnm_get(f, buf1, sizeof(buf1));
+ if (!strcmp(buf1, "P4")) {
+ info->pix_fmt = PIX_FMT_MONOWHITE;
+ } else if (!strcmp(buf1, "P5")) {
+ if (allow_yuv)
+ info->pix_fmt = PIX_FMT_YUV420P;
+ else
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (!strcmp(buf1, "P6")) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+ pnm_get(f, buf1, sizeof(buf1));
+ info->width = atoi(buf1);
+ if (info->width <= 0)
+ return AVERROR_INVALIDDATA;
+ pnm_get(f, buf1, sizeof(buf1));
+ info->height = atoi(buf1);
+ if (info->height <= 0)
+ return AVERROR_INVALIDDATA;
+ if (info->pix_fmt != PIX_FMT_MONOWHITE) {
+ pnm_get(f, buf1, sizeof(buf1));
+ }
+
+ /* more check if YUV420 */
+ if (info->pix_fmt == PIX_FMT_YUV420P) {
+ if ((info->width & 1) != 0)
+ return AVERROR_INVALIDDATA;
+ h = (info->height * 2);
+ if ((h % 3) != 0)
+ return AVERROR_INVALIDDATA;
+ h /= 3;
+ info->height = h;
+ }
+
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ switch(info->pix_fmt) {
+ default:
+ return AVERROR_INVALIDDATA;
+ case PIX_FMT_RGB24:
+ n = info->width * 3;
+ goto do_read;
+ case PIX_FMT_GRAY8:
+ n = info->width;
+ goto do_read;
+ case PIX_FMT_MONOWHITE:
+ n = (info->width + 7) >> 3;
+ do_read:
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ get_buffer(f, ptr, n);
+ ptr += linesize;
+ }
+ break;
+ case PIX_FMT_YUV420P:
+ {
+ unsigned char *ptr1, *ptr2;
+
+ n = info->width;
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ get_buffer(f, ptr, n);
+ ptr += linesize;
+ }
+ ptr1 = info->pict.data[1];
+ ptr2 = info->pict.data[2];
+ n >>= 1;
+ h = info->height >> 1;
+ for(i = 0; i < h; i++) {
+ get_buffer(f, ptr1, n);
+ get_buffer(f, ptr2, n);
+ ptr1 += info->pict.linesize[1];
+ ptr2 += info->pict.linesize[2];
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+static int pnm_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ return pnm_read1(f, alloc_cb, opaque, 0);
+}
+
+static int pgmyuv_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ return pnm_read1(f, alloc_cb, opaque, 1);
+}
+
+static int pnm_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ int i, h, h1, c, n, linesize;
+ char buf[100];
+ uint8_t *ptr, *ptr1, *ptr2;
+
+ h = info->height;
+ h1 = h;
+ switch(info->pix_fmt) {
+ case PIX_FMT_MONOWHITE:
+ c = '4';
+ n = (info->width + 7) >> 3;
+ break;
+ case PIX_FMT_GRAY8:
+ c = '5';
+ n = info->width;
+ break;
+ case PIX_FMT_RGB24:
+ c = '6';
+ n = info->width * 3;
+ break;
+ case PIX_FMT_YUV420P:
+ c = '5';
+ n = info->width;
+ h1 = (h * 3) / 2;
+ break;
+ default:
+ return AVERROR_INVALIDDATA;
+ }
+ snprintf(buf, sizeof(buf),
+ "P%c\n%d %d\n",
+ c, info->width, h1);
+ put_buffer(pb, buf, strlen(buf));
+ if (info->pix_fmt != PIX_FMT_MONOWHITE) {
+ snprintf(buf, sizeof(buf),
+ "%d\n", 255);
+ put_buffer(pb, buf, strlen(buf));
+ }
+
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i=0;i<h;i++) {
+ put_buffer(pb, ptr, n);
+ ptr += linesize;
+ }
+
+ if (info->pix_fmt == PIX_FMT_YUV420P) {
+ h >>= 1;
+ n >>= 1;
+ ptr1 = info->pict.data[1];
+ ptr2 = info->pict.data[2];
+ for(i=0;i<h;i++) {
+ put_buffer(pb, ptr1, n);
+ put_buffer(pb, ptr2, n);
+ ptr1 += info->pict.linesize[1];
+ ptr2 += info->pict.linesize[2];
+ }
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int pam_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ int i, n, linesize, h, w, depth, maxval;
+ char buf1[32], tuple_type[32];
+ unsigned char *ptr;
+ AVImageInfo info1, *info = &info1;
+ int ret;
+
+ pnm_get(f, buf1, sizeof(buf1));
+ if (strcmp(buf1, "P7") != 0)
+ return AVERROR_INVALIDDATA;
+ w = -1;
+ h = -1;
+ maxval = -1;
+ depth = -1;
+ tuple_type[0] = '\0';
+ for(;;) {
+ pnm_get(f, buf1, sizeof(buf1));
+ if (!strcmp(buf1, "WIDTH")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ w = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "HEIGHT")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ h = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "DEPTH")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ depth = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "MAXVAL")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ maxval = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "TUPLETYPE")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ pstrcpy(tuple_type, sizeof(tuple_type), buf1);
+ } else if (!strcmp(buf1, "ENDHDR")) {
+ break;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ /* check that all tags are present */
+ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0')
+ return AVERROR_INVALIDDATA;
+ info->width = w;
+ info->height = h;
+ if (depth == 1) {
+ if (maxval == 1)
+ info->pix_fmt = PIX_FMT_MONOWHITE;
+ else
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (depth == 3) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else if (depth == 4) {
+ info->pix_fmt = PIX_FMT_RGBA32;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ switch(info->pix_fmt) {
+ default:
+ return AVERROR_INVALIDDATA;
+ case PIX_FMT_RGB24:
+ n = info->width * 3;
+ goto do_read;
+ case PIX_FMT_GRAY8:
+ n = info->width;
+ goto do_read;
+ case PIX_FMT_MONOWHITE:
+ n = (info->width + 7) >> 3;
+ do_read:
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ get_buffer(f, ptr, n);
+ ptr += linesize;
+ }
+ break;
+ case PIX_FMT_RGBA32:
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ int j, r, g, b, a;
+
+ for(j = 0;j < w; j++) {
+ r = get_byte(f);
+ g = get_byte(f);
+ b = get_byte(f);
+ a = get_byte(f);
+ ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ ptr += linesize;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int pam_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ int i, h, w, n, linesize, depth, maxval;
+ const char *tuple_type;
+ char buf[100];
+ uint8_t *ptr;
+
+ h = info->height;
+ w = info->width;
+ switch(info->pix_fmt) {
+ case PIX_FMT_MONOWHITE:
+ n = (info->width + 7) >> 3;
+ depth = 1;
+ maxval = 1;
+ tuple_type = "BLACKANDWHITE";
+ break;
+ case PIX_FMT_GRAY8:
+ n = info->width;
+ depth = 1;
+ maxval = 255;
+ tuple_type = "GRAYSCALE";
+ break;
+ case PIX_FMT_RGB24:
+ n = info->width * 3;
+ depth = 3;
+ maxval = 255;
+ tuple_type = "RGB";
+ break;
+ case PIX_FMT_RGBA32:
+ n = info->width * 4;
+ depth = 4;
+ maxval = 255;
+ tuple_type = "RGB_ALPHA";
+ break;
+ default:
+ return AVERROR_INVALIDDATA;
+ }
+ snprintf(buf, sizeof(buf),
+ "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
+ w, h, depth, maxval, tuple_type);
+ put_buffer(pb, buf, strlen(buf));
+
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+
+ if (info->pix_fmt == PIX_FMT_RGBA32) {
+ int j;
+ unsigned int v;
+
+ for(i=0;i<h;i++) {
+ for(j=0;j<w;j++) {
+ v = ((uint32_t *)ptr)[j];
+ put_byte(pb, (v >> 16) & 0xff);
+ put_byte(pb, (v >> 8) & 0xff);
+ put_byte(pb, (v) & 0xff);
+ put_byte(pb, (v >> 24) & 0xff);
+ }
+ ptr += linesize;
+ }
+ } else {
+ for(i=0;i<h;i++) {
+ put_buffer(pb, ptr, n);
+ ptr += linesize;
+ }
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int pnm_probe(AVProbeData *pd)
+{
+ const char *p = pd->buf;
+ if (pd->buf_size >= 8 &&
+ p[0] == 'P' &&
+ p[1] >= '4' && p[1] <= '6' &&
+ pnm_space(p[2]) )
+ return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
+ else
+ return 0;
+}
+
+static int pgmyuv_probe(AVProbeData *pd)
+{
+ if (match_ext(pd->filename, "pgmyuv"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int pam_probe(AVProbeData *pd)
+{
+ const char *p = pd->buf;
+ if (pd->buf_size >= 8 &&
+ p[0] == 'P' &&
+ p[1] == '7' &&
+ p[2] == '\n')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVImageFormat pnm_image_format = {
+ "pnm",
+ NULL,
+ pnm_probe,
+ pnm_read,
+ 0,
+ NULL,
+};
+
+AVImageFormat pbm_image_format = {
+ "pbm",
+ "pbm",
+ NULL,
+ NULL,
+ (1 << PIX_FMT_MONOWHITE),
+ pnm_write,
+};
+
+AVImageFormat pgm_image_format = {
+ "pgm",
+ "pgm",
+ NULL,
+ NULL,
+ (1 << PIX_FMT_GRAY8),
+ pnm_write,
+};
+
+AVImageFormat ppm_image_format = {
+ "ppm",
+ "ppm",
+ NULL,
+ NULL,
+ (1 << PIX_FMT_RGB24),
+ pnm_write,
+};
+
+AVImageFormat pam_image_format = {
+ "pam",
+ "pam",
+ pam_probe,
+ pam_read,
+ (1 << PIX_FMT_MONOWHITE) | (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) |
+ (1 << PIX_FMT_RGBA32),
+ pam_write,
+};
+
+AVImageFormat pgmyuv_image_format = {
+ "pgmyuv",
+ "pgmyuv",
+ pgmyuv_probe,
+ pgmyuv_read,
+ (1 << PIX_FMT_YUV420P),
+ pnm_write,
+};
diff --git a/contrib/ffmpeg/libavformat/psxstr.c b/contrib/ffmpeg/libavformat/psxstr.c
new file mode 100644
index 000000000..b03f65750
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/psxstr.c
@@ -0,0 +1,364 @@
+/*
+ * Sony Playstation (PSX) STR File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file psxstr.c
+ * PSX STR file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * This module handles streams that have been ripped from Sony Playstation
+ * CD games. This demuxer can handle either raw STR files (which are just
+ * concatenations of raw compact disc sectors) or STR files with 0x2C-byte
+ * RIFF headers, followed by CD sectors.
+ */
+
+#include "avformat.h"
+
+//#define PRINTSTUFF
+
+#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
+#define CDXA_TAG MKTAG('C', 'D', 'X', 'A')
+
+#define RAW_CD_SECTOR_SIZE 2352
+#define RAW_CD_SECTOR_DATA_SIZE 2304
+#define VIDEO_DATA_CHUNK_SIZE 0x7E0
+#define VIDEO_DATA_HEADER_SIZE 0x38
+#define RIFF_HEADER_SIZE 0x2C
+
+#define CDXA_TYPE_MASK 0x0E
+#define CDXA_TYPE_DATA 0x08
+#define CDXA_TYPE_AUDIO 0x04
+#define CDXA_TYPE_VIDEO 0x02
+
+#define STR_MAGIC (0x80010160)
+
+typedef struct StrChannel {
+
+ int type;
+#define STR_AUDIO 0
+#define STR_VIDEO 1
+
+ /* video parameters */
+ int width;
+ int height;
+ int video_stream_index;
+
+ /* audio parameters */
+ int sample_rate;
+ int channels;
+ int bits;
+ int audio_stream_index;
+} StrChannel;
+
+typedef struct StrDemuxContext {
+
+ /* a STR file can contain up to 32 channels of data */
+ StrChannel channels[32];
+
+ /* only decode the first audio and video channels encountered */
+ int video_channel;
+ int audio_channel;
+
+ int64_t pts;
+
+ unsigned char *video_chunk;
+ AVPacket tmp_pkt;
+} StrDemuxContext;
+
+static const char sync_header[12] = {0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00};
+
+static int str_probe(AVProbeData *p)
+{
+ int start;
+
+ /* need at least 0x38 bytes to validate */
+ if (p->buf_size < 0x38)
+ return 0;
+
+ if ((LE_32(&p->buf[0]) == RIFF_TAG) &&
+ (LE_32(&p->buf[8]) == CDXA_TAG)) {
+
+ /* RIFF header seen; skip 0x2C bytes */
+ start = RIFF_HEADER_SIZE;
+ } else
+ start = 0;
+
+ /* look for CD sync header (00, 0xFF x 10, 00) */
+ if (memcmp(p->buf+start,sync_header,sizeof(sync_header)))
+ return 0;
+
+ /* MPEG files (like those ripped from VCDs) can also look like this;
+ * only return half certainty */
+ return 50;
+}
+
+#if 0
+static void dump(unsigned char *buf,size_t len)
+{
+ int i;
+ for(i=0;i<len;i++) {
+ if ((i&15)==0) av_log(NULL, AV_LOG_DEBUG, "%04x ",i);
+ av_log(NULL, AV_LOG_DEBUG, "%02x ",buf[i]);
+ if ((i&15)==15) av_log(NULL, AV_LOG_DEBUG, "\n");
+ }
+ av_log(NULL, AV_LOG_DEBUG, "\n");
+}
+#endif
+
+static int str_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
+ AVStream *st;
+ unsigned char sector[RAW_CD_SECTOR_SIZE];
+ int start;
+ int i;
+ int channel;
+
+ /* initialize context members */
+ str->pts = 0;
+ str->audio_channel = -1; /* assume to audio or video */
+ str->video_channel = -1;
+ str->video_chunk = NULL;
+
+
+ /* skip over any RIFF header */
+ if (get_buffer(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
+ return AVERROR_IO;
+ if (LE_32(&sector[0]) == RIFF_TAG)
+ start = RIFF_HEADER_SIZE;
+ else
+ start = 0;
+
+ url_fseek(pb, start, SEEK_SET);
+
+ /* check through the first 32 sectors for individual channels */
+ for (i = 0; i < 32; i++) {
+ if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
+ return AVERROR_IO;
+
+//printf("%02x %02x %02x %02x\n",sector[0x10],sector[0x11],sector[0x12],sector[0x13]);
+
+ channel = sector[0x11];
+ if (channel >= 32)
+ return AVERROR_INVALIDDATA;
+
+ switch (sector[0x12] & CDXA_TYPE_MASK) {
+
+ case CDXA_TYPE_DATA:
+ case CDXA_TYPE_VIDEO:
+ /* check if this channel gets to be the dominant video channel */
+ if (str->video_channel == -1) {
+ /* qualify the magic number */
+ if (LE_32(&sector[0x18]) != STR_MAGIC)
+ break;
+ str->video_channel = channel;
+ str->channels[channel].type = STR_VIDEO;
+ str->channels[channel].width = LE_16(&sector[0x28]);
+ str->channels[channel].height = LE_16(&sector[0x2A]);
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 64, 1, 15);
+
+ str->channels[channel].video_stream_index = st->index;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MDEC;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = str->channels[channel].width;
+ st->codec->height = str->channels[channel].height;
+ }
+ break;
+
+ case CDXA_TYPE_AUDIO:
+ /* check if this channel gets to be the dominant audio channel */
+ if (str->audio_channel == -1) {
+ int fmt;
+ str->audio_channel = channel;
+ str->channels[channel].type = STR_AUDIO;
+ str->channels[channel].channels =
+ (sector[0x13] & 0x01) ? 2 : 1;
+ str->channels[channel].sample_rate =
+ (sector[0x13] & 0x04) ? 18900 : 37800;
+ str->channels[channel].bits =
+ (sector[0x13] & 0x10) ? 8 : 4;
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 64, 128, str->channels[channel].sample_rate);
+
+ str->channels[channel].audio_stream_index = st->index;
+
+ fmt = sector[0x13];
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ADPCM_XA;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->channels = (fmt&1)?2:1;
+ st->codec->sample_rate = (fmt&4)?18900:37800;
+ // st->codec->bit_rate = 0; //FIXME;
+ st->codec->block_align = 128;
+ }
+ break;
+
+ default:
+ /* ignore */
+ break;
+ }
+ }
+
+if (str->video_channel != -1)
+ av_log (s, AV_LOG_DEBUG, " video channel = %d, %d x %d %d\n", str->video_channel,
+ str->channels[str->video_channel].width,
+ str->channels[str->video_channel].height,str->channels[str->video_channel].video_stream_index);
+if (str->audio_channel != -1)
+ av_log (s, AV_LOG_DEBUG, " audio channel = %d, %d Hz, %d channels, %d bits/sample %d\n",
+ str->audio_channel,
+ str->channels[str->audio_channel].sample_rate,
+ str->channels[str->audio_channel].channels,
+ str->channels[str->audio_channel].bits,str->channels[str->audio_channel].audio_stream_index);
+
+ /* back to the start */
+ url_fseek(pb, start, SEEK_SET);
+
+ return 0;
+}
+
+static int str_read_packet(AVFormatContext *s,
+ AVPacket *ret_pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
+ unsigned char sector[RAW_CD_SECTOR_SIZE];
+ int channel;
+ int packet_read = 0;
+ int ret = 0;
+ AVPacket *pkt;
+
+ while (!packet_read) {
+
+ if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
+ return AVERROR_IO;
+
+ channel = sector[0x11];
+ if (channel >= 32)
+ return AVERROR_INVALIDDATA;
+
+ switch (sector[0x12] & CDXA_TYPE_MASK) {
+
+ case CDXA_TYPE_DATA:
+ case CDXA_TYPE_VIDEO:
+ /* check if this the video channel we care about */
+ if (channel == str->video_channel) {
+
+ int current_sector = LE_16(&sector[0x1C]);
+ int sector_count = LE_16(&sector[0x1E]);
+ int frame_size = LE_32(&sector[0x24]);
+ int bytes_to_copy;
+// printf("%d %d %d\n",current_sector,sector_count,frame_size);
+ /* if this is the first sector of the frame, allocate a pkt */
+ pkt = &str->tmp_pkt;
+ if (current_sector == 0) {
+ if (av_new_packet(pkt, frame_size))
+ return AVERROR_IO;
+
+ pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE;
+ pkt->stream_index =
+ str->channels[channel].video_stream_index;
+ // pkt->pts = str->pts;
+
+ /* if there is no audio, adjust the pts after every video
+ * frame; assume 15 fps */
+ if (str->audio_channel != -1)
+ str->pts += (90000 / 15);
+ }
+
+ /* load all the constituent chunks in the video packet */
+ bytes_to_copy = frame_size - current_sector*VIDEO_DATA_CHUNK_SIZE;
+ if (bytes_to_copy>0) {
+ if (bytes_to_copy>VIDEO_DATA_CHUNK_SIZE) bytes_to_copy=VIDEO_DATA_CHUNK_SIZE;
+ memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
+ sector + VIDEO_DATA_HEADER_SIZE, bytes_to_copy);
+ }
+ if (current_sector == sector_count-1) {
+ *ret_pkt = *pkt;
+ return 0;
+ }
+
+ }
+ break;
+
+ case CDXA_TYPE_AUDIO:
+#ifdef PRINTSTUFF
+printf (" dropping audio sector\n");
+#endif
+#if 1
+ /* check if this the video channel we care about */
+ if (channel == str->audio_channel) {
+ pkt = ret_pkt;
+ if (av_new_packet(pkt, 2304))
+ return AVERROR_IO;
+ memcpy(pkt->data,sector+24,2304);
+
+ pkt->stream_index =
+ str->channels[channel].audio_stream_index;
+ //pkt->pts = str->pts;
+ return 0;
+ }
+#endif
+ break;
+ default:
+ /* drop the sector and move on */
+#ifdef PRINTSTUFF
+printf (" dropping other sector\n");
+#endif
+ break;
+ }
+
+ if (url_feof(pb))
+ return AVERROR_IO;
+ }
+
+ return ret;
+}
+
+static int str_read_close(AVFormatContext *s)
+{
+ StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
+
+ av_free(str->video_chunk);
+
+ return 0;
+}
+
+AVInputFormat str_demuxer = {
+ "psxstr",
+ "Sony Playstation STR format",
+ sizeof(StrDemuxContext),
+ str_probe,
+ str_read_header,
+ str_read_packet,
+ str_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/qtpalette.h b/contrib/ffmpeg/libavformat/qtpalette.h
new file mode 100644
index 000000000..ef4ccfa91
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/qtpalette.h
@@ -0,0 +1,295 @@
+/*
+ * Default Palettes for Quicktime Files
+ * Automatically generated from a utility derived from XAnim:
+ * http://xanim.va.pubnix.com/home.html
+ */
+
+#ifndef QTPALETTE_H
+#define QTPALETTE_H
+
+unsigned char ff_qt_default_palette_4[4 * 4] = {
+ 0x93, 0x65, 0x5E, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0xDF, 0xD0, 0xAB, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+unsigned char ff_qt_default_palette_16[16 * 4] = {
+ 0xFF, 0xFB, 0xFF, 0x00,
+ 0xEF, 0xD9, 0xBB, 0x00,
+ 0xE8, 0xC9, 0xB1, 0x00,
+ 0x93, 0x65, 0x5E, 0x00,
+ 0xFC, 0xDE, 0xE8, 0x00,
+ 0x9D, 0x88, 0x91, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0x47, 0x48, 0x37, 0x00,
+ 0x7A, 0x5E, 0x55, 0x00,
+ 0xDF, 0xD0, 0xAB, 0x00,
+ 0xFF, 0xFB, 0xF9, 0x00,
+ 0xE8, 0xCA, 0xC5, 0x00,
+ 0x8A, 0x7C, 0x77, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+unsigned char ff_qt_default_palette_256[256 * 4] = {
+ /* 0, 0x00 */ 0xFF, 0xFF, 0xFF, 0x00,
+ /* 1, 0x01 */ 0xFF, 0xFF, 0xCC, 0x00,
+ /* 2, 0x02 */ 0xFF, 0xFF, 0x99, 0x00,
+ /* 3, 0x03 */ 0xFF, 0xFF, 0x66, 0x00,
+ /* 4, 0x04 */ 0xFF, 0xFF, 0x33, 0x00,
+ /* 5, 0x05 */ 0xFF, 0xFF, 0x00, 0x00,
+ /* 6, 0x06 */ 0xFF, 0xCC, 0xFF, 0x00,
+ /* 7, 0x07 */ 0xFF, 0xCC, 0xCC, 0x00,
+ /* 8, 0x08 */ 0xFF, 0xCC, 0x99, 0x00,
+ /* 9, 0x09 */ 0xFF, 0xCC, 0x66, 0x00,
+ /* 10, 0x0A */ 0xFF, 0xCC, 0x33, 0x00,
+ /* 11, 0x0B */ 0xFF, 0xCC, 0x00, 0x00,
+ /* 12, 0x0C */ 0xFF, 0x99, 0xFF, 0x00,
+ /* 13, 0x0D */ 0xFF, 0x99, 0xCC, 0x00,
+ /* 14, 0x0E */ 0xFF, 0x99, 0x99, 0x00,
+ /* 15, 0x0F */ 0xFF, 0x99, 0x66, 0x00,
+ /* 16, 0x10 */ 0xFF, 0x99, 0x33, 0x00,
+ /* 17, 0x11 */ 0xFF, 0x99, 0x00, 0x00,
+ /* 18, 0x12 */ 0xFF, 0x66, 0xFF, 0x00,
+ /* 19, 0x13 */ 0xFF, 0x66, 0xCC, 0x00,
+ /* 20, 0x14 */ 0xFF, 0x66, 0x99, 0x00,
+ /* 21, 0x15 */ 0xFF, 0x66, 0x66, 0x00,
+ /* 22, 0x16 */ 0xFF, 0x66, 0x33, 0x00,
+ /* 23, 0x17 */ 0xFF, 0x66, 0x00, 0x00,
+ /* 24, 0x18 */ 0xFF, 0x33, 0xFF, 0x00,
+ /* 25, 0x19 */ 0xFF, 0x33, 0xCC, 0x00,
+ /* 26, 0x1A */ 0xFF, 0x33, 0x99, 0x00,
+ /* 27, 0x1B */ 0xFF, 0x33, 0x66, 0x00,
+ /* 28, 0x1C */ 0xFF, 0x33, 0x33, 0x00,
+ /* 29, 0x1D */ 0xFF, 0x33, 0x00, 0x00,
+ /* 30, 0x1E */ 0xFF, 0x00, 0xFF, 0x00,
+ /* 31, 0x1F */ 0xFF, 0x00, 0xCC, 0x00,
+ /* 32, 0x20 */ 0xFF, 0x00, 0x99, 0x00,
+ /* 33, 0x21 */ 0xFF, 0x00, 0x66, 0x00,
+ /* 34, 0x22 */ 0xFF, 0x00, 0x33, 0x00,
+ /* 35, 0x23 */ 0xFF, 0x00, 0x00, 0x00,
+ /* 36, 0x24 */ 0xCC, 0xFF, 0xFF, 0x00,
+ /* 37, 0x25 */ 0xCC, 0xFF, 0xCC, 0x00,
+ /* 38, 0x26 */ 0xCC, 0xFF, 0x99, 0x00,
+ /* 39, 0x27 */ 0xCC, 0xFF, 0x66, 0x00,
+ /* 40, 0x28 */ 0xCC, 0xFF, 0x33, 0x00,
+ /* 41, 0x29 */ 0xCC, 0xFF, 0x00, 0x00,
+ /* 42, 0x2A */ 0xCC, 0xCC, 0xFF, 0x00,
+ /* 43, 0x2B */ 0xCC, 0xCC, 0xCC, 0x00,
+ /* 44, 0x2C */ 0xCC, 0xCC, 0x99, 0x00,
+ /* 45, 0x2D */ 0xCC, 0xCC, 0x66, 0x00,
+ /* 46, 0x2E */ 0xCC, 0xCC, 0x33, 0x00,
+ /* 47, 0x2F */ 0xCC, 0xCC, 0x00, 0x00,
+ /* 48, 0x30 */ 0xCC, 0x99, 0xFF, 0x00,
+ /* 49, 0x31 */ 0xCC, 0x99, 0xCC, 0x00,
+ /* 50, 0x32 */ 0xCC, 0x99, 0x99, 0x00,
+ /* 51, 0x33 */ 0xCC, 0x99, 0x66, 0x00,
+ /* 52, 0x34 */ 0xCC, 0x99, 0x33, 0x00,
+ /* 53, 0x35 */ 0xCC, 0x99, 0x00, 0x00,
+ /* 54, 0x36 */ 0xCC, 0x66, 0xFF, 0x00,
+ /* 55, 0x37 */ 0xCC, 0x66, 0xCC, 0x00,
+ /* 56, 0x38 */ 0xCC, 0x66, 0x99, 0x00,
+ /* 57, 0x39 */ 0xCC, 0x66, 0x66, 0x00,
+ /* 58, 0x3A */ 0xCC, 0x66, 0x33, 0x00,
+ /* 59, 0x3B */ 0xCC, 0x66, 0x00, 0x00,
+ /* 60, 0x3C */ 0xCC, 0x33, 0xFF, 0x00,
+ /* 61, 0x3D */ 0xCC, 0x33, 0xCC, 0x00,
+ /* 62, 0x3E */ 0xCC, 0x33, 0x99, 0x00,
+ /* 63, 0x3F */ 0xCC, 0x33, 0x66, 0x00,
+ /* 64, 0x40 */ 0xCC, 0x33, 0x33, 0x00,
+ /* 65, 0x41 */ 0xCC, 0x33, 0x00, 0x00,
+ /* 66, 0x42 */ 0xCC, 0x00, 0xFF, 0x00,
+ /* 67, 0x43 */ 0xCC, 0x00, 0xCC, 0x00,
+ /* 68, 0x44 */ 0xCC, 0x00, 0x99, 0x00,
+ /* 69, 0x45 */ 0xCC, 0x00, 0x66, 0x00,
+ /* 70, 0x46 */ 0xCC, 0x00, 0x33, 0x00,
+ /* 71, 0x47 */ 0xCC, 0x00, 0x00, 0x00,
+ /* 72, 0x48 */ 0x99, 0xFF, 0xFF, 0x00,
+ /* 73, 0x49 */ 0x99, 0xFF, 0xCC, 0x00,
+ /* 74, 0x4A */ 0x99, 0xFF, 0x99, 0x00,
+ /* 75, 0x4B */ 0x99, 0xFF, 0x66, 0x00,
+ /* 76, 0x4C */ 0x99, 0xFF, 0x33, 0x00,
+ /* 77, 0x4D */ 0x99, 0xFF, 0x00, 0x00,
+ /* 78, 0x4E */ 0x99, 0xCC, 0xFF, 0x00,
+ /* 79, 0x4F */ 0x99, 0xCC, 0xCC, 0x00,
+ /* 80, 0x50 */ 0x99, 0xCC, 0x99, 0x00,
+ /* 81, 0x51 */ 0x99, 0xCC, 0x66, 0x00,
+ /* 82, 0x52 */ 0x99, 0xCC, 0x33, 0x00,
+ /* 83, 0x53 */ 0x99, 0xCC, 0x00, 0x00,
+ /* 84, 0x54 */ 0x99, 0x99, 0xFF, 0x00,
+ /* 85, 0x55 */ 0x99, 0x99, 0xCC, 0x00,
+ /* 86, 0x56 */ 0x99, 0x99, 0x99, 0x00,
+ /* 87, 0x57 */ 0x99, 0x99, 0x66, 0x00,
+ /* 88, 0x58 */ 0x99, 0x99, 0x33, 0x00,
+ /* 89, 0x59 */ 0x99, 0x99, 0x00, 0x00,
+ /* 90, 0x5A */ 0x99, 0x66, 0xFF, 0x00,
+ /* 91, 0x5B */ 0x99, 0x66, 0xCC, 0x00,
+ /* 92, 0x5C */ 0x99, 0x66, 0x99, 0x00,
+ /* 93, 0x5D */ 0x99, 0x66, 0x66, 0x00,
+ /* 94, 0x5E */ 0x99, 0x66, 0x33, 0x00,
+ /* 95, 0x5F */ 0x99, 0x66, 0x00, 0x00,
+ /* 96, 0x60 */ 0x99, 0x33, 0xFF, 0x00,
+ /* 97, 0x61 */ 0x99, 0x33, 0xCC, 0x00,
+ /* 98, 0x62 */ 0x99, 0x33, 0x99, 0x00,
+ /* 99, 0x63 */ 0x99, 0x33, 0x66, 0x00,
+ /* 100, 0x64 */ 0x99, 0x33, 0x33, 0x00,
+ /* 101, 0x65 */ 0x99, 0x33, 0x00, 0x00,
+ /* 102, 0x66 */ 0x99, 0x00, 0xFF, 0x00,
+ /* 103, 0x67 */ 0x99, 0x00, 0xCC, 0x00,
+ /* 104, 0x68 */ 0x99, 0x00, 0x99, 0x00,
+ /* 105, 0x69 */ 0x99, 0x00, 0x66, 0x00,
+ /* 106, 0x6A */ 0x99, 0x00, 0x33, 0x00,
+ /* 107, 0x6B */ 0x99, 0x00, 0x00, 0x00,
+ /* 108, 0x6C */ 0x66, 0xFF, 0xFF, 0x00,
+ /* 109, 0x6D */ 0x66, 0xFF, 0xCC, 0x00,
+ /* 110, 0x6E */ 0x66, 0xFF, 0x99, 0x00,
+ /* 111, 0x6F */ 0x66, 0xFF, 0x66, 0x00,
+ /* 112, 0x70 */ 0x66, 0xFF, 0x33, 0x00,
+ /* 113, 0x71 */ 0x66, 0xFF, 0x00, 0x00,
+ /* 114, 0x72 */ 0x66, 0xCC, 0xFF, 0x00,
+ /* 115, 0x73 */ 0x66, 0xCC, 0xCC, 0x00,
+ /* 116, 0x74 */ 0x66, 0xCC, 0x99, 0x00,
+ /* 117, 0x75 */ 0x66, 0xCC, 0x66, 0x00,
+ /* 118, 0x76 */ 0x66, 0xCC, 0x33, 0x00,
+ /* 119, 0x77 */ 0x66, 0xCC, 0x00, 0x00,
+ /* 120, 0x78 */ 0x66, 0x99, 0xFF, 0x00,
+ /* 121, 0x79 */ 0x66, 0x99, 0xCC, 0x00,
+ /* 122, 0x7A */ 0x66, 0x99, 0x99, 0x00,
+ /* 123, 0x7B */ 0x66, 0x99, 0x66, 0x00,
+ /* 124, 0x7C */ 0x66, 0x99, 0x33, 0x00,
+ /* 125, 0x7D */ 0x66, 0x99, 0x00, 0x00,
+ /* 126, 0x7E */ 0x66, 0x66, 0xFF, 0x00,
+ /* 127, 0x7F */ 0x66, 0x66, 0xCC, 0x00,
+ /* 128, 0x80 */ 0x66, 0x66, 0x99, 0x00,
+ /* 129, 0x81 */ 0x66, 0x66, 0x66, 0x00,
+ /* 130, 0x82 */ 0x66, 0x66, 0x33, 0x00,
+ /* 131, 0x83 */ 0x66, 0x66, 0x00, 0x00,
+ /* 132, 0x84 */ 0x66, 0x33, 0xFF, 0x00,
+ /* 133, 0x85 */ 0x66, 0x33, 0xCC, 0x00,
+ /* 134, 0x86 */ 0x66, 0x33, 0x99, 0x00,
+ /* 135, 0x87 */ 0x66, 0x33, 0x66, 0x00,
+ /* 136, 0x88 */ 0x66, 0x33, 0x33, 0x00,
+ /* 137, 0x89 */ 0x66, 0x33, 0x00, 0x00,
+ /* 138, 0x8A */ 0x66, 0x00, 0xFF, 0x00,
+ /* 139, 0x8B */ 0x66, 0x00, 0xCC, 0x00,
+ /* 140, 0x8C */ 0x66, 0x00, 0x99, 0x00,
+ /* 141, 0x8D */ 0x66, 0x00, 0x66, 0x00,
+ /* 142, 0x8E */ 0x66, 0x00, 0x33, 0x00,
+ /* 143, 0x8F */ 0x66, 0x00, 0x00, 0x00,
+ /* 144, 0x90 */ 0x33, 0xFF, 0xFF, 0x00,
+ /* 145, 0x91 */ 0x33, 0xFF, 0xCC, 0x00,
+ /* 146, 0x92 */ 0x33, 0xFF, 0x99, 0x00,
+ /* 147, 0x93 */ 0x33, 0xFF, 0x66, 0x00,
+ /* 148, 0x94 */ 0x33, 0xFF, 0x33, 0x00,
+ /* 149, 0x95 */ 0x33, 0xFF, 0x00, 0x00,
+ /* 150, 0x96 */ 0x33, 0xCC, 0xFF, 0x00,
+ /* 151, 0x97 */ 0x33, 0xCC, 0xCC, 0x00,
+ /* 152, 0x98 */ 0x33, 0xCC, 0x99, 0x00,
+ /* 153, 0x99 */ 0x33, 0xCC, 0x66, 0x00,
+ /* 154, 0x9A */ 0x33, 0xCC, 0x33, 0x00,
+ /* 155, 0x9B */ 0x33, 0xCC, 0x00, 0x00,
+ /* 156, 0x9C */ 0x33, 0x99, 0xFF, 0x00,
+ /* 157, 0x9D */ 0x33, 0x99, 0xCC, 0x00,
+ /* 158, 0x9E */ 0x33, 0x99, 0x99, 0x00,
+ /* 159, 0x9F */ 0x33, 0x99, 0x66, 0x00,
+ /* 160, 0xA0 */ 0x33, 0x99, 0x33, 0x00,
+ /* 161, 0xA1 */ 0x33, 0x99, 0x00, 0x00,
+ /* 162, 0xA2 */ 0x33, 0x66, 0xFF, 0x00,
+ /* 163, 0xA3 */ 0x33, 0x66, 0xCC, 0x00,
+ /* 164, 0xA4 */ 0x33, 0x66, 0x99, 0x00,
+ /* 165, 0xA5 */ 0x33, 0x66, 0x66, 0x00,
+ /* 166, 0xA6 */ 0x33, 0x66, 0x33, 0x00,
+ /* 167, 0xA7 */ 0x33, 0x66, 0x00, 0x00,
+ /* 168, 0xA8 */ 0x33, 0x33, 0xFF, 0x00,
+ /* 169, 0xA9 */ 0x33, 0x33, 0xCC, 0x00,
+ /* 170, 0xAA */ 0x33, 0x33, 0x99, 0x00,
+ /* 171, 0xAB */ 0x33, 0x33, 0x66, 0x00,
+ /* 172, 0xAC */ 0x33, 0x33, 0x33, 0x00,
+ /* 173, 0xAD */ 0x33, 0x33, 0x00, 0x00,
+ /* 174, 0xAE */ 0x33, 0x00, 0xFF, 0x00,
+ /* 175, 0xAF */ 0x33, 0x00, 0xCC, 0x00,
+ /* 176, 0xB0 */ 0x33, 0x00, 0x99, 0x00,
+ /* 177, 0xB1 */ 0x33, 0x00, 0x66, 0x00,
+ /* 178, 0xB2 */ 0x33, 0x00, 0x33, 0x00,
+ /* 179, 0xB3 */ 0x33, 0x00, 0x00, 0x00,
+ /* 180, 0xB4 */ 0x00, 0xFF, 0xFF, 0x00,
+ /* 181, 0xB5 */ 0x00, 0xFF, 0xCC, 0x00,
+ /* 182, 0xB6 */ 0x00, 0xFF, 0x99, 0x00,
+ /* 183, 0xB7 */ 0x00, 0xFF, 0x66, 0x00,
+ /* 184, 0xB8 */ 0x00, 0xFF, 0x33, 0x00,
+ /* 185, 0xB9 */ 0x00, 0xFF, 0x00, 0x00,
+ /* 186, 0xBA */ 0x00, 0xCC, 0xFF, 0x00,
+ /* 187, 0xBB */ 0x00, 0xCC, 0xCC, 0x00,
+ /* 188, 0xBC */ 0x00, 0xCC, 0x99, 0x00,
+ /* 189, 0xBD */ 0x00, 0xCC, 0x66, 0x00,
+ /* 190, 0xBE */ 0x00, 0xCC, 0x33, 0x00,
+ /* 191, 0xBF */ 0x00, 0xCC, 0x00, 0x00,
+ /* 192, 0xC0 */ 0x00, 0x99, 0xFF, 0x00,
+ /* 193, 0xC1 */ 0x00, 0x99, 0xCC, 0x00,
+ /* 194, 0xC2 */ 0x00, 0x99, 0x99, 0x00,
+ /* 195, 0xC3 */ 0x00, 0x99, 0x66, 0x00,
+ /* 196, 0xC4 */ 0x00, 0x99, 0x33, 0x00,
+ /* 197, 0xC5 */ 0x00, 0x99, 0x00, 0x00,
+ /* 198, 0xC6 */ 0x00, 0x66, 0xFF, 0x00,
+ /* 199, 0xC7 */ 0x00, 0x66, 0xCC, 0x00,
+ /* 200, 0xC8 */ 0x00, 0x66, 0x99, 0x00,
+ /* 201, 0xC9 */ 0x00, 0x66, 0x66, 0x00,
+ /* 202, 0xCA */ 0x00, 0x66, 0x33, 0x00,
+ /* 203, 0xCB */ 0x00, 0x66, 0x00, 0x00,
+ /* 204, 0xCC */ 0x00, 0x33, 0xFF, 0x00,
+ /* 205, 0xCD */ 0x00, 0x33, 0xCC, 0x00,
+ /* 206, 0xCE */ 0x00, 0x33, 0x99, 0x00,
+ /* 207, 0xCF */ 0x00, 0x33, 0x66, 0x00,
+ /* 208, 0xD0 */ 0x00, 0x33, 0x33, 0x00,
+ /* 209, 0xD1 */ 0x00, 0x33, 0x00, 0x00,
+ /* 210, 0xD2 */ 0x00, 0x00, 0xFF, 0x00,
+ /* 211, 0xD3 */ 0x00, 0x00, 0xCC, 0x00,
+ /* 212, 0xD4 */ 0x00, 0x00, 0x99, 0x00,
+ /* 213, 0xD5 */ 0x00, 0x00, 0x66, 0x00,
+ /* 214, 0xD6 */ 0x00, 0x00, 0x33, 0x00,
+ /* 215, 0xD7 */ 0xEE, 0x00, 0x00, 0x00,
+ /* 216, 0xD8 */ 0xDD, 0x00, 0x00, 0x00,
+ /* 217, 0xD9 */ 0xBB, 0x00, 0x00, 0x00,
+ /* 218, 0xDA */ 0xAA, 0x00, 0x00, 0x00,
+ /* 219, 0xDB */ 0x88, 0x00, 0x00, 0x00,
+ /* 220, 0xDC */ 0x77, 0x00, 0x00, 0x00,
+ /* 221, 0xDD */ 0x55, 0x00, 0x00, 0x00,
+ /* 222, 0xDE */ 0x44, 0x00, 0x00, 0x00,
+ /* 223, 0xDF */ 0x22, 0x00, 0x00, 0x00,
+ /* 224, 0xE0 */ 0x11, 0x00, 0x00, 0x00,
+ /* 225, 0xE1 */ 0x00, 0xEE, 0x00, 0x00,
+ /* 226, 0xE2 */ 0x00, 0xDD, 0x00, 0x00,
+ /* 227, 0xE3 */ 0x00, 0xBB, 0x00, 0x00,
+ /* 228, 0xE4 */ 0x00, 0xAA, 0x00, 0x00,
+ /* 229, 0xE5 */ 0x00, 0x88, 0x00, 0x00,
+ /* 230, 0xE6 */ 0x00, 0x77, 0x00, 0x00,
+ /* 231, 0xE7 */ 0x00, 0x55, 0x00, 0x00,
+ /* 232, 0xE8 */ 0x00, 0x44, 0x00, 0x00,
+ /* 233, 0xE9 */ 0x00, 0x22, 0x00, 0x00,
+ /* 234, 0xEA */ 0x00, 0x11, 0x00, 0x00,
+ /* 235, 0xEB */ 0x00, 0x00, 0xEE, 0x00,
+ /* 236, 0xEC */ 0x00, 0x00, 0xDD, 0x00,
+ /* 237, 0xED */ 0x00, 0x00, 0xBB, 0x00,
+ /* 238, 0xEE */ 0x00, 0x00, 0xAA, 0x00,
+ /* 239, 0xEF */ 0x00, 0x00, 0x88, 0x00,
+ /* 240, 0xF0 */ 0x00, 0x00, 0x77, 0x00,
+ /* 241, 0xF1 */ 0x00, 0x00, 0x55, 0x00,
+ /* 242, 0xF2 */ 0x00, 0x00, 0x44, 0x00,
+ /* 243, 0xF3 */ 0x00, 0x00, 0x22, 0x00,
+ /* 244, 0xF4 */ 0x00, 0x00, 0x11, 0x00,
+ /* 245, 0xF5 */ 0xEE, 0xEE, 0xEE, 0x00,
+ /* 246, 0xF6 */ 0xDD, 0xDD, 0xDD, 0x00,
+ /* 247, 0xF7 */ 0xBB, 0xBB, 0xBB, 0x00,
+ /* 248, 0xF8 */ 0xAA, 0xAA, 0xAA, 0x00,
+ /* 249, 0xF9 */ 0x88, 0x88, 0x88, 0x00,
+ /* 250, 0xFA */ 0x77, 0x77, 0x77, 0x00,
+ /* 251, 0xFB */ 0x55, 0x55, 0x55, 0x00,
+ /* 252, 0xFC */ 0x44, 0x44, 0x44, 0x00,
+ /* 253, 0xFD */ 0x22, 0x22, 0x22, 0x00,
+ /* 254, 0xFE */ 0x11, 0x11, 0x11, 0x00,
+ /* 255, 0xFF */ 0x00, 0x00, 0x00, 0x00
+};
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/raw.c b/contrib/ffmpeg/libavformat/raw.c
new file mode 100644
index 000000000..e1ccbcd6d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/raw.c
@@ -0,0 +1,843 @@
+/*
+ * RAW muxer and demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ * Copyright (c) 2005 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#ifdef CONFIG_MUXERS
+/* simple formats */
+static int raw_write_header(struct AVFormatContext *s)
+{
+ return 0;
+}
+
+static int flac_write_header(struct AVFormatContext *s)
+{
+ static const uint8_t header[8] = {
+ 0x66, 0x4C, 0x61, 0x43, 0x80, 0x00, 0x00, 0x22
+ };
+ uint8_t *streaminfo = s->streams[0]->codec->extradata;
+ int len = s->streams[0]->codec->extradata_size;
+ if(streaminfo != NULL && len > 0) {
+ put_buffer(&s->pb, header, 8);
+ put_buffer(&s->pb, streaminfo, len);
+ }
+ return 0;
+}
+
+static int raw_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ put_buffer(&s->pb, pkt->data, pkt->size);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int raw_write_trailer(struct AVFormatContext *s)
+{
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/* raw input */
+static int raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ AVStream *st;
+ int id;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ id = s->iformat->value;
+ if (id == CODEC_ID_RAWVIDEO) {
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ } else {
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ }
+ st->codec->codec_id = id;
+
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ st->codec->sample_rate = ap->sample_rate;
+ st->codec->channels = ap->channels;
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+ break;
+ case CODEC_TYPE_VIDEO:
+ av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
+ st->codec->width = ap->width;
+ st->codec->height = ap->height;
+ st->codec->pix_fmt = ap->pix_fmt;
+ if(st->codec->pix_fmt == PIX_FMT_NONE)
+ st->codec->pix_fmt= PIX_FMT_YUV420P;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+#define RAW_PACKET_SIZE 1024
+
+static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size;
+ // AVStream *st = s->streams[0];
+
+ size= RAW_PACKET_SIZE;
+
+ ret= av_get_packet(&s->pb, pkt, size);
+
+ pkt->stream_index = 0;
+ if (ret <= 0) {
+ return AVERROR_IO;
+ }
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return ret;
+}
+
+static int raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size;
+
+ size = RAW_PACKET_SIZE;
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->pos= url_ftell(&s->pb);
+ pkt->stream_index = 0;
+ ret = get_partial_buffer(&s->pb, pkt->data, size);
+ if (ret <= 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret;
+ return ret;
+}
+
+// http://www.artificis.hu/files/texts/ingenient.txt
+static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size, w, h, unk1, unk2;
+
+ if (get_le32(&s->pb) != MKTAG('M', 'J', 'P', 'G'))
+ return AVERROR_IO; // FIXME
+
+ size = get_le32(&s->pb);
+
+ w = get_le16(&s->pb);
+ h = get_le16(&s->pb);
+
+ url_fskip(&s->pb, 8); // zero + size (padded?)
+ url_fskip(&s->pb, 2);
+ unk1 = get_le16(&s->pb);
+ unk2 = get_le16(&s->pb);
+ url_fskip(&s->pb, 22); // ascii timestamp
+
+ av_log(NULL, AV_LOG_DEBUG, "Ingenient packet: size=%d, width=%d, height=%d, unk1=%d unk2=%d\n",
+ size, w, h, unk1, unk2);
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->pos = url_ftell(&s->pb);
+ pkt->stream_index = 0;
+ ret = get_buffer(&s->pb, pkt->data, size);
+ if (ret <= 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret;
+ return ret;
+}
+
+static int raw_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+int pcm_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st;
+ int block_align, byte_rate;
+ int64_t pos;
+
+ st = s->streams[0];
+
+ block_align = st->codec->block_align ? st->codec->block_align :
+ (av_get_bits_per_sample(st->codec->codec_id) * st->codec->channels) >> 3;
+ byte_rate = st->codec->bit_rate ? st->codec->bit_rate >> 3 :
+ block_align * st->codec->sample_rate;
+
+ if (block_align <= 0 || byte_rate <= 0)
+ return -1;
+
+ /* compute the position by aligning it to block_align */
+ pos = av_rescale_rnd(timestamp * byte_rate,
+ st->time_base.num,
+ st->time_base.den * (int64_t)block_align,
+ (flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP);
+ pos *= block_align;
+
+ /* recompute exact position */
+ st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
+ url_fseek(&s->pb, pos + s->data_offset, SEEK_SET);
+ return 0;
+}
+
+/* ac3 read */
+static int ac3_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_AC3;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+static int shorten_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_SHORTEN;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* flac read */
+static int flac_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_FLAC;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* dts read */
+static int dts_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_DTS;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* aac read */
+static int aac_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_AAC;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* mpeg1/h263 input */
+static int video_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = s->iformat->value;
+ st->need_parsing = 1;
+
+ /* for mjpeg, specify frame rate */
+ /* for mpeg4 specify it too (most mpeg4 streams dont have the fixed_vop_rate set ...)*/
+ if (ap->time_base.num) {
+ av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
+ } else if ( st->codec->codec_id == CODEC_ID_MJPEG ||
+ st->codec->codec_id == CODEC_ID_MPEG4 ||
+ st->codec->codec_id == CODEC_ID_H264) {
+ av_set_pts_info(st, 64, 1, 25);
+ }
+
+ return 0;
+}
+
+#define SEQ_START_CODE 0x000001b3
+#define GOP_START_CODE 0x000001b8
+#define PICTURE_START_CODE 0x00000100
+#define SLICE_START_CODE 0x00000101
+#define PACK_START_CODE 0x000001ba
+#define VIDEO_ID 0x000001e0
+#define AUDIO_ID 0x000001c0
+
+static int mpegvideo_probe(AVProbeData *p)
+{
+ uint32_t code= -1;
+ int pic=0, seq=0, slice=0, pspack=0, pes=0;
+ int i;
+
+ for(i=0; i<p->buf_size; i++){
+ code = (code<<8) + p->buf[i];
+ if ((code & 0xffffff00) == 0x100) {
+ switch(code){
+ case SEQ_START_CODE: seq++; break;
+ case PICTURE_START_CODE: pic++; break;
+ case SLICE_START_CODE: slice++; break;
+ case PACK_START_CODE: pspack++; break;
+ case VIDEO_ID:
+ case AUDIO_ID: pes++; break;
+ }
+ }
+ }
+ if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !pes)
+ return AVPROBE_SCORE_MAX/2+1; // +1 for .mpg
+ return 0;
+}
+
+#define VIDEO_OBJECT_START_CODE 0x00000100
+#define VIDEO_OBJECT_LAYER_START_CODE 0x00000120
+#define VISUAL_OBJECT_START_CODE 0x000001b5
+#define VOP_START_CODE 0x000001b6
+
+static int mpeg4video_probe(AVProbeData *probe_packet)
+{
+ uint32_t temp_buffer= -1;
+ int VO=0, VOL=0, VOP = 0, VISO = 0;
+ int i;
+
+ for(i=0; i<probe_packet->buf_size; i++){
+ temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
+ if ((temp_buffer & 0xffffff00) == 0x100) {
+ switch(temp_buffer){
+ case VOP_START_CODE: VOP++; break;
+ case VISUAL_OBJECT_START_CODE: VISO++; break;
+ }
+ switch(temp_buffer & 0xfffffff0){
+ case VIDEO_OBJECT_START_CODE: VO++; break;
+ case VIDEO_OBJECT_LAYER_START_CODE: VOL++; break;
+ }
+ }
+ }
+
+ if ( VOP >= VISO && VOP >= VOL && VO >= VOL && VOL > 0)
+ return AVPROBE_SCORE_MAX/2;
+ return 0;
+}
+
+static int h263_probe(AVProbeData *p)
+{
+ int code;
+ const uint8_t *d;
+
+ if (p->buf_size < 6)
+ return 0;
+ d = p->buf;
+ code = (d[0] << 14) | (d[1] << 6) | (d[2] >> 2);
+ if (code == 0x20) {
+ return 50;
+ }
+ return 0;
+}
+
+static int h261_probe(AVProbeData *p)
+{
+ int code;
+ const uint8_t *d;
+
+ if (p->buf_size < 6)
+ return 0;
+ d = p->buf;
+ code = (d[0] << 12) | (d[1] << 4) | (d[2] >> 4);
+ if (code == 0x10) {
+ return 50;
+ }
+ return 0;
+}
+
+AVInputFormat shorten_demuxer = {
+ "shn",
+ "raw shorten",
+ 0,
+ NULL,
+ shorten_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "shn",
+};
+
+AVInputFormat flac_demuxer = {
+ "flac",
+ "raw flac",
+ 0,
+ NULL,
+ flac_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "flac",
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat flac_muxer = {
+ "flac",
+ "raw flac",
+ "audio/x-flac",
+ "flac",
+ 0,
+ CODEC_ID_FLAC,
+ 0,
+ flac_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat ac3_demuxer = {
+ "ac3",
+ "raw ac3",
+ 0,
+ NULL,
+ ac3_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "ac3",
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat ac3_muxer = {
+ "ac3",
+ "raw ac3",
+ "audio/x-ac3",
+ "ac3",
+ 0,
+ CODEC_ID_AC3,
+ 0,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat dts_demuxer = {
+ "dts",
+ "raw dts",
+ 0,
+ NULL,
+ dts_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "dts",
+};
+
+AVInputFormat aac_demuxer = {
+ "aac",
+ "ADTS AAC",
+ 0,
+ NULL,
+ aac_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "aac",
+};
+
+AVInputFormat h261_demuxer = {
+ "h261",
+ "raw h261",
+ 0,
+ h261_probe,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "h261",
+ .value = CODEC_ID_H261,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat h261_muxer = {
+ "h261",
+ "raw h261",
+ "video/x-h261",
+ "h261",
+ 0,
+ 0,
+ CODEC_ID_H261,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat h263_demuxer = {
+ "h263",
+ "raw h263",
+ 0,
+ h263_probe,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+// .extensions = "h263", //FIXME remove after writing mpeg4_probe
+ .value = CODEC_ID_H263,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat h263_muxer = {
+ "h263",
+ "raw h263",
+ "video/x-h263",
+ "h263",
+ 0,
+ 0,
+ CODEC_ID_H263,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat m4v_demuxer = {
+ "m4v",
+ "raw MPEG4 video format",
+ 0,
+ mpeg4video_probe, /** probing for mpeg4 data */
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "m4v", //FIXME remove after writing mpeg4_probe
+ .value = CODEC_ID_MPEG4,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat m4v_muxer = {
+ "m4v",
+ "raw MPEG4 video format",
+ NULL,
+ "m4v",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_MPEG4,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat h264_demuxer = {
+ "h264",
+ "raw H264 video format",
+ 0,
+ NULL /*mpegvideo_probe*/,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "h26l,h264,264", //FIXME remove after writing mpeg4_probe
+ .value = CODEC_ID_H264,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat h264_muxer = {
+ "h264",
+ "raw H264 video format",
+ NULL,
+ "h264",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_H264,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat mpegvideo_demuxer = {
+ "mpegvideo",
+ "MPEG video",
+ 0,
+ mpegvideo_probe,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .value = CODEC_ID_MPEG1VIDEO,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mpeg1video_muxer = {
+ "mpeg1video",
+ "MPEG video",
+ "video/x-mpeg",
+ "mpg,mpeg,m1v",
+ 0,
+ 0,
+ CODEC_ID_MPEG1VIDEO,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mpeg2video_muxer = {
+ "mpeg2video",
+ "MPEG2 video",
+ NULL,
+ "m2v",
+ 0,
+ 0,
+ CODEC_ID_MPEG2VIDEO,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat mjpeg_demuxer = {
+ "mjpeg",
+ "MJPEG video",
+ 0,
+ NULL,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "mjpg,mjpeg",
+ .value = CODEC_ID_MJPEG,
+};
+
+AVInputFormat ingenient_demuxer = {
+ "ingenient",
+ "Ingenient MJPEG",
+ 0,
+ NULL,
+ video_read_header,
+ ingenient_read_packet,
+ raw_read_close,
+ .extensions = "cgi", // FIXME
+ .value = CODEC_ID_MJPEG,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mjpeg_muxer = {
+ "mjpeg",
+ "MJPEG video",
+ "video/x-mjpeg",
+ "mjpg,mjpeg",
+ 0,
+ 0,
+ CODEC_ID_MJPEG,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+/* pcm formats */
+
+#define PCMINPUTDEF(name, long_name, ext, codec) \
+AVInputFormat pcm_ ## name ## _demuxer = {\
+ #name,\
+ long_name,\
+ 0,\
+ NULL,\
+ raw_read_header,\
+ raw_read_packet,\
+ raw_read_close,\
+ pcm_read_seek,\
+ .extensions = ext,\
+ .value = codec,\
+};
+
+#define PCMOUTPUTDEF(name, long_name, ext, codec) \
+AVOutputFormat pcm_ ## name ## _muxer = {\
+ #name,\
+ long_name,\
+ NULL,\
+ ext,\
+ 0,\
+ codec,\
+ 0,\
+ raw_write_header,\
+ raw_write_packet,\
+ raw_write_trailer,\
+ .flags= AVFMT_NOTIMESTAMPS,\
+};
+
+
+#if !defined(CONFIG_MUXERS) && defined(CONFIG_DEMUXERS)
+#define PCMDEF(name, long_name, ext, codec) \
+ PCMINPUTDEF(name, long_name, ext, codec)
+#elif defined(CONFIG_MUXERS) && !defined(CONFIG_DEMUXERS)
+#define PCMDEF(name, long_name, ext, codec) \
+ PCMOUTPUTDEF(name, long_name, ext, codec)
+#elif defined(CONFIG_MUXERS) && defined(CONFIG_DEMUXERS)
+#define PCMDEF(name, long_name, ext, codec) \
+ PCMINPUTDEF(name, long_name, ext, codec)\
+ PCMOUTPUTDEF(name, long_name, ext, codec)
+#else
+#define PCMDEF(name, long_name, ext, codec)
+#endif
+
+#ifdef WORDS_BIGENDIAN
+#define BE_DEF(s) s
+#define LE_DEF(s) NULL
+#else
+#define BE_DEF(s) NULL
+#define LE_DEF(s) s
+#endif
+
+
+PCMDEF(s16le, "pcm signed 16 bit little endian format",
+ LE_DEF("sw"), CODEC_ID_PCM_S16LE)
+
+PCMDEF(s16be, "pcm signed 16 bit big endian format",
+ BE_DEF("sw"), CODEC_ID_PCM_S16BE)
+
+PCMDEF(u16le, "pcm unsigned 16 bit little endian format",
+ LE_DEF("uw"), CODEC_ID_PCM_U16LE)
+
+PCMDEF(u16be, "pcm unsigned 16 bit big endian format",
+ BE_DEF("uw"), CODEC_ID_PCM_U16BE)
+
+PCMDEF(s8, "pcm signed 8 bit format",
+ "sb", CODEC_ID_PCM_S8)
+
+PCMDEF(u8, "pcm unsigned 8 bit format",
+ "ub", CODEC_ID_PCM_U8)
+
+PCMDEF(mulaw, "pcm mu law format",
+ "ul", CODEC_ID_PCM_MULAW)
+
+PCMDEF(alaw, "pcm A law format",
+ "al", CODEC_ID_PCM_ALAW)
+
+static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int packet_size, ret, width, height;
+ AVStream *st = s->streams[0];
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
+ if (packet_size < 0)
+ return -1;
+
+ ret= av_get_packet(&s->pb, pkt, packet_size);
+
+ pkt->stream_index = 0;
+ if (ret != packet_size) {
+ return AVERROR_IO;
+ } else {
+ return 0;
+ }
+}
+
+AVInputFormat rawvideo_demuxer = {
+ "rawvideo",
+ "raw video format",
+ 0,
+ NULL,
+ raw_read_header,
+ rawvideo_read_packet,
+ raw_read_close,
+ .extensions = "yuv,cif,qcif",
+ .value = CODEC_ID_RAWVIDEO,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat rawvideo_muxer = {
+ "rawvideo",
+ "raw video format",
+ NULL,
+ "yuv",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_MUXERS
+static int null_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ return 0;
+}
+
+AVOutputFormat null_muxer = {
+ "null",
+ "null video format",
+ NULL,
+ NULL,
+ 0,
+#ifdef WORDS_BIGENDIAN
+ CODEC_ID_PCM_S16BE,
+#else
+ CODEC_ID_PCM_S16LE,
+#endif
+ CODEC_ID_RAWVIDEO,
+ raw_write_header,
+ null_write_packet,
+ raw_write_trailer,
+ .flags = AVFMT_NOFILE | AVFMT_RAWPICTURE | AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
diff --git a/contrib/ffmpeg/libavformat/riff.c b/contrib/ffmpeg/libavformat/riff.c
new file mode 100644
index 000000000..d315c66af
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/riff.c
@@ -0,0 +1,468 @@
+/*
+ * RIFF codec tags
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "avcodec.h"
+#include "riff.h"
+
+/* Note: when encoding, the first matching tag is used, so order is
+ important if multiple tags possible for a given codec. */
+const CodecTag codec_bmp_tags[] = {
+ { CODEC_ID_H264, MKTAG('H', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('h', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('x', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
+ { CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') },
+
+ { CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
+ { CODEC_ID_H263P, MKTAG('H', '2', '6', '3') },
+ { CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* intel h263 */
+ { CODEC_ID_H261, MKTAG('H', '2', '6', '1') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_H263P, MKTAG('U', '2', '6', '3') },
+ { CODEC_ID_H263P, MKTAG('v', 'i', 'v', '1') },
+
+ { CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4')},
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
+ { CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
+ { CODEC_ID_MPEG4, MKTAG(0x04, 0, 0, 0) }, /* some broken avi use this */
+
+ /* added based on MPlayer */
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', '1') },
+ { CODEC_ID_MPEG4, MKTAG('B', 'L', 'Z', '0') },
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MPEG4, MKTAG('U', 'M', 'P', '4') },
+ { CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') },
+ { CODEC_ID_MPEG4, MKTAG('S', 'E', 'D', 'G') },
+
+ { CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') },
+
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3'), .invalid_asf = 1 }, /* default signature when using MSMPEG4 */
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
+
+ { CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') },
+
+ { CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
+
+ { CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('M', 'P', 'E', 'G') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
+ { CODEC_ID_MPEG1VIDEO, 0x10000001 },
+ { CODEC_ID_MPEG2VIDEO, 0x10000002 },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') },
+ { CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
+ { CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') },
+ { CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') },
+ { CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') }, /* Pegasus lossless JPEG */
+ { CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - decoder */
+ { CODEC_ID_JPEGLS, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - encoder */
+ { CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') },
+ { CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') },
+ { CODEC_ID_CYUV, MKTAG('C', 'Y', 'U', 'V') },
+ { CODEC_ID_RAWVIDEO, 0 },
+ { CODEC_ID_RAWVIDEO, MKTAG('I', '4', '2', '0') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'Y', '2') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', '2') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') },
+ { CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') },
+ { CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') },
+ { CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '1') },
+ { CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '2') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
+ { CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
+ { CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') },
+ { CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') },
+ { CODEC_ID_VCR1, MKTAG('V', 'C', 'R', '1') },
+ { CODEC_ID_FFV1, MKTAG('F', 'F', 'V', '1') },
+ { CODEC_ID_XAN_WC4, MKTAG('X', 'x', 'a', 'n') },
+ { CODEC_ID_MSRLE, MKTAG('m', 'r', 'l', 'e') },
+ { CODEC_ID_MSRLE, MKTAG(0x1, 0x0, 0x0, 0x0) },
+ { CODEC_ID_MSVIDEO1, MKTAG('M', 'S', 'V', 'C') },
+ { CODEC_ID_MSVIDEO1, MKTAG('m', 's', 'v', 'c') },
+ { CODEC_ID_MSVIDEO1, MKTAG('C', 'R', 'A', 'M') },
+ { CODEC_ID_MSVIDEO1, MKTAG('c', 'r', 'a', 'm') },
+ { CODEC_ID_MSVIDEO1, MKTAG('W', 'H', 'A', 'M') },
+ { CODEC_ID_MSVIDEO1, MKTAG('w', 'h', 'a', 'm') },
+ { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') },
+ { CODEC_ID_TRUEMOTION1, MKTAG('D', 'U', 'C', 'K') },
+ { CODEC_ID_MSZH, MKTAG('M', 'S', 'Z', 'H') },
+ { CODEC_ID_ZLIB, MKTAG('Z', 'L', 'I', 'B') },
+ { CODEC_ID_SNOW, MKTAG('S', 'N', 'O', 'W') },
+ { CODEC_ID_4XM, MKTAG('4', 'X', 'M', 'V') },
+ { CODEC_ID_FLV1, MKTAG('F', 'L', 'V', '1') },
+ { CODEC_ID_FLASHSV, MKTAG('F', 'S', 'V', '1') },
+ { CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') },
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') },
+ { CODEC_ID_TSCC, MKTAG('t', 's', 'c', 'c') },
+ { CODEC_ID_ULTI, MKTAG('U', 'L', 'T', 'I') },
+ { CODEC_ID_VIXL, MKTAG('V', 'I', 'X', 'L') },
+ { CODEC_ID_QPEG, MKTAG('Q', 'P', 'E', 'G') },
+ { CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
+ { CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
+ { CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
+ { CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
+ { CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
+ { CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
+ { CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
+ { CODEC_ID_INDEO2, MKTAG('R', 'T', '2', '1') },
+ { CODEC_ID_FRAPS, MKTAG('F', 'P', 'S', '1') },
+ { CODEC_ID_THEORA, MKTAG('t', 'h', 'e', 'o') },
+ { CODEC_ID_TRUEMOTION2, MKTAG('T', 'M', '2', '0') },
+ { CODEC_ID_CSCD, MKTAG('C', 'S', 'C', 'D') },
+ { CODEC_ID_ZMBV, MKTAG('Z', 'M', 'B', 'V') },
+ { CODEC_ID_KMVC, MKTAG('K', 'M', 'V', 'C') },
+ { CODEC_ID_CAVS, MKTAG('C', 'A', 'V', 'S') },
+ { CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') },
+ { CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') },
+ { CODEC_ID_NONE, 0 },
+};
+
+const CodecTag codec_wav_tags[] = {
+ { CODEC_ID_MP2, 0x50 },
+ { CODEC_ID_MP3, 0x55 },
+ { CODEC_ID_AC3, 0x2000 },
+ { CODEC_ID_DTS, 0x2001 },
+ { CODEC_ID_PCM_S16LE, 0x01 },
+ { CODEC_ID_PCM_U8, 0x01 }, /* must come after s16le in this list */
+ { CODEC_ID_PCM_S24LE, 0x01 },
+ { CODEC_ID_PCM_S32LE, 0x01 },
+ { CODEC_ID_PCM_ALAW, 0x06 },
+ { CODEC_ID_PCM_MULAW, 0x07 },
+ { CODEC_ID_ADPCM_MS, 0x02 },
+ { CODEC_ID_ADPCM_IMA_WAV, 0x11 },
+ { CODEC_ID_ADPCM_YAMAHA, 0x20 },
+ { CODEC_ID_ADPCM_G726, 0x45 },
+ { CODEC_ID_ADPCM_IMA_DK4, 0x61 }, /* rogue format number */
+ { CODEC_ID_ADPCM_IMA_DK3, 0x62 }, /* rogue format number */
+ { CODEC_ID_WMAV1, 0x160 },
+ { CODEC_ID_WMAV2, 0x161 },
+ { CODEC_ID_AAC, 0x706d },
+ { CODEC_ID_AAC, 0xff },
+ { CODEC_ID_VORBIS, ('V'<<8)+'o' }, //HACK/FIXME, does vorbis in WAV/AVI have an (in)official id?
+ { CODEC_ID_SONIC, 0x2048 },
+ { CODEC_ID_SONIC_LS, 0x2048 },
+ { CODEC_ID_ADPCM_CT, 0x200 },
+ { CODEC_ID_ADPCM_SWF, ('S'<<8)+'F' },
+ { CODEC_ID_TRUESPEECH, 0x22 },
+ { CODEC_ID_FLAC, 0xF1AC },
+ { CODEC_ID_IMC, 0x401 },
+
+ /* FIXME: All of the IDs below are not 16 bit and thus illegal. */
+ // for NuppelVideo (nuv.c)
+ { CODEC_ID_PCM_S16LE, MKTAG('R', 'A', 'W', 'A') },
+ { CODEC_ID_MP3, MKTAG('L', 'A', 'M', 'E') },
+ { CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
+ { 0, 0 },
+};
+
+unsigned int codec_get_tag(const CodecTag *tags, int id)
+{
+ while (tags->id != CODEC_ID_NONE) {
+ if (tags->id == id)
+ return tags->tag;
+ tags++;
+ }
+ return 0;
+}
+
+unsigned int codec_get_asf_tag(const CodecTag *tags, unsigned int id)
+{
+ while (tags->id != CODEC_ID_NONE) {
+ if (!tags->invalid_asf && tags->id == id)
+ return tags->tag;
+ tags++;
+ }
+ return 0;
+}
+
+enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag)
+{
+ while (tags->id != CODEC_ID_NONE) {
+ if( toupper((tag >> 0)&0xFF) == toupper((tags->tag >> 0)&0xFF)
+ && toupper((tag >> 8)&0xFF) == toupper((tags->tag >> 8)&0xFF)
+ && toupper((tag >>16)&0xFF) == toupper((tags->tag >>16)&0xFF)
+ && toupper((tag >>24)&0xFF) == toupper((tags->tag >>24)&0xFF))
+ return tags->id;
+ tags++;
+ }
+ return CODEC_ID_NONE;
+}
+
+unsigned int codec_get_bmp_tag(int id)
+{
+ return codec_get_tag(codec_bmp_tags, id);
+}
+
+unsigned int codec_get_wav_tag(int id)
+{
+ return codec_get_tag(codec_wav_tags, id);
+}
+
+enum CodecID codec_get_bmp_id(unsigned int tag)
+{
+ return codec_get_id(codec_bmp_tags, tag);
+}
+
+enum CodecID codec_get_wav_id(unsigned int tag)
+{
+ return codec_get_id(codec_wav_tags, tag);
+}
+
+#ifdef CONFIG_MUXERS
+offset_t start_tag(ByteIOContext *pb, const char *tag)
+{
+ put_tag(pb, tag);
+ put_le32(pb, 0);
+ return url_ftell(pb);
+}
+
+void end_tag(ByteIOContext *pb, offset_t start)
+{
+ offset_t pos;
+
+ pos = url_ftell(pb);
+ url_fseek(pb, start - 4, SEEK_SET);
+ put_le32(pb, (uint32_t)(pos - start));
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+/* WAVEFORMATEX header */
+/* returns the size or -1 on error */
+int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
+{
+ int bps, blkalign, bytespersec;
+ int hdrsize = 18;
+
+ if(!enc->codec_tag || enc->codec_tag > 0xffff)
+ enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
+ if(!enc->codec_tag || enc->codec_tag > 0xffff)
+ return -1;
+
+ put_le16(pb, enc->codec_tag);
+ put_le16(pb, enc->channels);
+ put_le32(pb, enc->sample_rate);
+ if (enc->codec_id == CODEC_ID_PCM_U8 ||
+ enc->codec_id == CODEC_ID_PCM_ALAW ||
+ enc->codec_id == CODEC_ID_PCM_MULAW) {
+ bps = 8;
+ } else if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
+ bps = 0;
+ } else if (enc->codec_id == CODEC_ID_ADPCM_IMA_WAV || enc->codec_id == CODEC_ID_ADPCM_MS || enc->codec_id == CODEC_ID_ADPCM_G726 || enc->codec_id == CODEC_ID_ADPCM_YAMAHA) { //
+ bps = 4;
+ } else if (enc->codec_id == CODEC_ID_PCM_S24LE) {
+ bps = 24;
+ } else if (enc->codec_id == CODEC_ID_PCM_S32LE) {
+ bps = 32;
+ } else {
+ bps = 16;
+ }
+
+ if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
+ blkalign = enc->frame_size; //this is wrong, but seems many demuxers dont work if this is set correctly
+ //blkalign = 144 * enc->bit_rate/enc->sample_rate;
+ } else if (enc->codec_id == CODEC_ID_ADPCM_G726) { //
+ blkalign = 1;
+ } else if (enc->block_align != 0) { /* specified by the codec */
+ blkalign = enc->block_align;
+ } else
+ blkalign = enc->channels*bps >> 3;
+ if (enc->codec_id == CODEC_ID_PCM_U8 ||
+ enc->codec_id == CODEC_ID_PCM_S24LE ||
+ enc->codec_id == CODEC_ID_PCM_S32LE ||
+ enc->codec_id == CODEC_ID_PCM_S16LE) {
+ bytespersec = enc->sample_rate * blkalign;
+ } else {
+ bytespersec = enc->bit_rate / 8;
+ }
+ put_le32(pb, bytespersec); /* bytes per second */
+ put_le16(pb, blkalign); /* block align */
+ put_le16(pb, bps); /* bits per sample */
+ if (enc->codec_id == CODEC_ID_MP3) {
+ put_le16(pb, 12); /* wav_extra_size */
+ hdrsize += 12;
+ put_le16(pb, 1); /* wID */
+ put_le32(pb, 2); /* fdwFlags */
+ put_le16(pb, 1152); /* nBlockSize */
+ put_le16(pb, 1); /* nFramesPerBlock */
+ put_le16(pb, 1393); /* nCodecDelay */
+ } else if (enc->codec_id == CODEC_ID_MP2) {
+ put_le16(pb, 22); /* wav_extra_size */
+ hdrsize += 22;
+ put_le16(pb, 2); /* fwHeadLayer */
+ put_le32(pb, enc->bit_rate); /* dwHeadBitrate */
+ put_le16(pb, enc->channels == 2 ? 1 : 8); /* fwHeadMode */
+ put_le16(pb, 0); /* fwHeadModeExt */
+ put_le16(pb, 1); /* wHeadEmphasis */
+ put_le16(pb, 16); /* fwHeadFlags */
+ put_le32(pb, 0); /* dwPTSLow */
+ put_le32(pb, 0); /* dwPTSHigh */
+ } else if (enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) {
+ put_le16(pb, 2); /* wav_extra_size */
+ hdrsize += 2;
+ put_le16(pb, ((enc->block_align - 4 * enc->channels) / (4 * enc->channels)) * 8 + 1); /* wSamplesPerBlock */
+ } else if(enc->extradata_size){
+ put_le16(pb, enc->extradata_size);
+ put_buffer(pb, enc->extradata, enc->extradata_size);
+ hdrsize += enc->extradata_size;
+ if(hdrsize&1){
+ hdrsize++;
+ put_byte(pb, 0);
+ }
+ } else {
+ hdrsize -= 2;
+ }
+
+ return hdrsize;
+}
+
+/* BITMAPINFOHEADER header */
+void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf)
+{
+ put_le32(pb, 40 + enc->extradata_size); /* size */
+ put_le32(pb, enc->width);
+ put_le32(pb, enc->height);
+ put_le16(pb, 1); /* planes */
+
+ put_le16(pb, enc->bits_per_sample ? enc->bits_per_sample : 24); /* depth */
+ /* compression type */
+ put_le32(pb, for_asf ? (enc->codec_tag ? enc->codec_tag : codec_get_asf_tag(tags, enc->codec_id)) : enc->codec_tag); //
+ put_le32(pb, enc->width * enc->height * 3);
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+
+ put_buffer(pb, enc->extradata, enc->extradata_size);
+
+ if (enc->extradata_size & 1)
+ put_byte(pb, 0);
+}
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_DEMUXERS
+/* We could be given one of the three possible structures here:
+ * WAVEFORMAT, PCMWAVEFORMAT or WAVEFORMATEX. Each structure
+ * is an expansion of the previous one with the fields added
+ * at the bottom. PCMWAVEFORMAT adds 'WORD wBitsPerSample' and
+ * WAVEFORMATEX adds 'WORD cbSize' and basically makes itself
+ * an openended structure.
+ */
+void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
+{
+ int id;
+
+ id = get_le16(pb);
+ codec->codec_type = CODEC_TYPE_AUDIO;
+ codec->codec_tag = id;
+ codec->channels = get_le16(pb);
+ codec->sample_rate = get_le32(pb);
+ codec->bit_rate = get_le32(pb) * 8;
+ codec->block_align = get_le16(pb);
+ if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */
+ codec->bits_per_sample = 8;
+ }else
+ codec->bits_per_sample = get_le16(pb);
+ codec->codec_id = wav_codec_get_id(id, codec->bits_per_sample);
+
+ if (size > 16) { /* We're obviously dealing with WAVEFORMATEX */
+ codec->extradata_size = get_le16(pb);
+ if (codec->extradata_size > 0) {
+ if (codec->extradata_size > size - 18)
+ codec->extradata_size = size - 18;
+ codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, codec->extradata, codec->extradata_size);
+ } else
+ codec->extradata_size = 0;
+
+ /* It is possible for the chunk to contain garbage at the end */
+ if (size - codec->extradata_size - 18 > 0)
+ url_fskip(pb, size - codec->extradata_size - 18);
+ }
+}
+
+
+int wav_codec_get_id(unsigned int tag, int bps)
+{
+ int id;
+ id = codec_get_id(codec_wav_tags, tag);
+ if (id <= 0)
+ return id;
+ /* handle specific u8 codec */
+ if (id == CODEC_ID_PCM_S16LE && bps == 8)
+ id = CODEC_ID_PCM_U8;
+ if (id == CODEC_ID_PCM_S16LE && bps == 24)
+ id = CODEC_ID_PCM_S24LE;
+ if (id == CODEC_ID_PCM_S16LE && bps == 32)
+ id = CODEC_ID_PCM_S32LE;
+ return id;
+}
+#endif // CONFIG_DEMUXERS
+
+void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale)
+{
+ int gcd;
+
+ *au_ssize= stream->block_align;
+ if(stream->frame_size && stream->sample_rate){
+ *au_scale=stream->frame_size;
+ *au_rate= stream->sample_rate;
+ }else if(stream->codec_type == CODEC_TYPE_VIDEO){
+ *au_scale= stream->time_base.num;
+ *au_rate = stream->time_base.den;
+ }else{
+ *au_scale= stream->block_align ? stream->block_align*8 : 8;
+ *au_rate = stream->bit_rate;
+ }
+ gcd= ff_gcd(*au_scale, *au_rate);
+ *au_scale /= gcd;
+ *au_rate /= gcd;
+}
diff --git a/contrib/ffmpeg/libavformat/riff.h b/contrib/ffmpeg/libavformat/riff.h
new file mode 100644
index 000000000..240855a8b
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/riff.h
@@ -0,0 +1,51 @@
+/*
+ * RIFF codec tags
+ * copyright (c) 2000 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FF_RIFF_H
+#define FF_RIFF_H
+
+offset_t start_tag(ByteIOContext *pb, const char *tag);
+void end_tag(ByteIOContext *pb, offset_t start);
+
+typedef struct CodecTag {
+ int id;
+ unsigned int tag;
+ unsigned int invalid_asf : 1;
+} CodecTag;
+
+void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf);
+int put_wav_header(ByteIOContext *pb, AVCodecContext *enc);
+int wav_codec_get_id(unsigned int tag, int bps);
+void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size);
+
+extern const CodecTag codec_bmp_tags[];
+extern const CodecTag codec_wav_tags[];
+
+unsigned int codec_get_tag(const CodecTag *tags, int id);
+enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag);
+unsigned int codec_get_bmp_tag(int id);
+unsigned int codec_get_wav_tag(int id);
+enum CodecID codec_get_bmp_id(unsigned int tag);
+enum CodecID codec_get_wav_id(unsigned int tag);
+unsigned int codec_get_asf_tag(const CodecTag *tags, unsigned int id);
+void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/rm.c b/contrib/ffmpeg/libavformat/rm.c
new file mode 100644
index 000000000..b4ddf1b02
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rm.c
@@ -0,0 +1,1146 @@
+/*
+ * "Real" compatible muxer and demuxer.
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+/* in ms */
+#define BUFFER_DURATION 0
+
+typedef struct {
+ int nb_packets;
+ int packet_total_size;
+ int packet_max_size;
+ /* codec related output */
+ int bit_rate;
+ float frame_rate;
+ int nb_frames; /* current frame number */
+ int total_frames; /* total number of frames */
+ int num;
+ AVCodecContext *enc;
+} StreamInfo;
+
+typedef struct {
+ StreamInfo streams[2];
+ StreamInfo *audio_stream, *video_stream;
+ int data_pos; /* position of the data after the header */
+ int nb_packets;
+ int old_format;
+ int current_stream;
+ int remaining_len;
+ /// Audio descrambling matrix parameters
+ uint8_t *audiobuf; ///< place to store reordered audio data
+ int64_t audiotimestamp; ///< Audio packet timestamp
+ int sub_packet_cnt; // Subpacket counter, used while reading
+ int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
+ int audio_stream_num; ///< Stream number for audio packets
+ int audio_pkt_cnt; ///< Output packet counter
+ int audio_framesize; /// Audio frame size from container
+ int sub_packet_lengths[16]; /// Length of each aac subpacket
+} RMContext;
+
+#ifdef CONFIG_MUXERS
+static void put_str(ByteIOContext *s, const char *tag)
+{
+ put_be16(s,strlen(tag));
+ while (*tag) {
+ put_byte(s, *tag++);
+ }
+}
+
+static void put_str8(ByteIOContext *s, const char *tag)
+{
+ put_byte(s, strlen(tag));
+ while (*tag) {
+ put_byte(s, *tag++);
+ }
+}
+
+static void rv10_write_header(AVFormatContext *ctx,
+ int data_size, int index_pos)
+{
+ RMContext *rm = ctx->priv_data;
+ ByteIOContext *s = &ctx->pb;
+ StreamInfo *stream;
+ unsigned char *data_offset_ptr, *start_ptr;
+ const char *desc, *mimetype;
+ int nb_packets, packet_total_size, packet_max_size, size, packet_avg_size, i;
+ int bit_rate, v, duration, flags, data_pos;
+
+ start_ptr = s->buf_ptr;
+
+ put_tag(s, ".RMF");
+ put_be32(s,18); /* header size */
+ put_be16(s,0);
+ put_be32(s,0);
+ put_be32(s,4 + ctx->nb_streams); /* num headers */
+
+ put_tag(s,"PROP");
+ put_be32(s, 50);
+ put_be16(s, 0);
+ packet_max_size = 0;
+ packet_total_size = 0;
+ nb_packets = 0;
+ bit_rate = 0;
+ duration = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ StreamInfo *stream = &rm->streams[i];
+ bit_rate += stream->bit_rate;
+ if (stream->packet_max_size > packet_max_size)
+ packet_max_size = stream->packet_max_size;
+ nb_packets += stream->nb_packets;
+ packet_total_size += stream->packet_total_size;
+ /* select maximum duration */
+ v = (int) (1000.0 * (float)stream->total_frames / stream->frame_rate);
+ if (v > duration)
+ duration = v;
+ }
+ put_be32(s, bit_rate); /* max bit rate */
+ put_be32(s, bit_rate); /* avg bit rate */
+ put_be32(s, packet_max_size); /* max packet size */
+ if (nb_packets > 0)
+ packet_avg_size = packet_total_size / nb_packets;
+ else
+ packet_avg_size = 0;
+ put_be32(s, packet_avg_size); /* avg packet size */
+ put_be32(s, nb_packets); /* num packets */
+ put_be32(s, duration); /* duration */
+ put_be32(s, BUFFER_DURATION); /* preroll */
+ put_be32(s, index_pos); /* index offset */
+ /* computation of data the data offset */
+ data_offset_ptr = s->buf_ptr;
+ put_be32(s, 0); /* data offset : will be patched after */
+ put_be16(s, ctx->nb_streams); /* num streams */
+ flags = 1 | 2; /* save allowed & perfect play */
+ if (url_is_streamed(s))
+ flags |= 4; /* live broadcast */
+ put_be16(s, flags);
+
+ /* comments */
+
+ put_tag(s,"CONT");
+ size = strlen(ctx->title) + strlen(ctx->author) + strlen(ctx->copyright) +
+ strlen(ctx->comment) + 4 * 2 + 10;
+ put_be32(s,size);
+ put_be16(s,0);
+ put_str(s, ctx->title);
+ put_str(s, ctx->author);
+ put_str(s, ctx->copyright);
+ put_str(s, ctx->comment);
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ int codec_data_size;
+
+ stream = &rm->streams[i];
+
+ if (stream->enc->codec_type == CODEC_TYPE_AUDIO) {
+ desc = "The Audio Stream";
+ mimetype = "audio/x-pn-realaudio";
+ codec_data_size = 73;
+ } else {
+ desc = "The Video Stream";
+ mimetype = "video/x-pn-realvideo";
+ codec_data_size = 34;
+ }
+
+ put_tag(s,"MDPR");
+ size = 10 + 9 * 4 + strlen(desc) + strlen(mimetype) + codec_data_size;
+ put_be32(s, size);
+ put_be16(s, 0);
+
+ put_be16(s, i); /* stream number */
+ put_be32(s, stream->bit_rate); /* max bit rate */
+ put_be32(s, stream->bit_rate); /* avg bit rate */
+ put_be32(s, stream->packet_max_size); /* max packet size */
+ if (stream->nb_packets > 0)
+ packet_avg_size = stream->packet_total_size /
+ stream->nb_packets;
+ else
+ packet_avg_size = 0;
+ put_be32(s, packet_avg_size); /* avg packet size */
+ put_be32(s, 0); /* start time */
+ put_be32(s, BUFFER_DURATION); /* preroll */
+ /* duration */
+ if (url_is_streamed(s) || !stream->total_frames)
+ put_be32(s, (int)(3600 * 1000));
+ else
+ put_be32(s, (int)(stream->total_frames * 1000 / stream->frame_rate));
+ put_str8(s, desc);
+ put_str8(s, mimetype);
+ put_be32(s, codec_data_size);
+
+ if (stream->enc->codec_type == CODEC_TYPE_AUDIO) {
+ int coded_frame_size, fscode, sample_rate;
+ sample_rate = stream->enc->sample_rate;
+ coded_frame_size = (stream->enc->bit_rate *
+ stream->enc->frame_size) / (8 * sample_rate);
+ /* audio codec info */
+ put_tag(s, ".ra");
+ put_byte(s, 0xfd);
+ put_be32(s, 0x00040000); /* version */
+ put_tag(s, ".ra4");
+ put_be32(s, 0x01b53530); /* stream length */
+ put_be16(s, 4); /* unknown */
+ put_be32(s, 0x39); /* header size */
+
+ switch(sample_rate) {
+ case 48000:
+ case 24000:
+ case 12000:
+ fscode = 1;
+ break;
+ default:
+ case 44100:
+ case 22050:
+ case 11025:
+ fscode = 2;
+ break;
+ case 32000:
+ case 16000:
+ case 8000:
+ fscode = 3;
+ }
+ put_be16(s, fscode); /* codec additional info, for AC3, seems
+ to be a frequency code */
+ /* special hack to compensate rounding errors... */
+ if (coded_frame_size == 557)
+ coded_frame_size--;
+ put_be32(s, coded_frame_size); /* frame length */
+ put_be32(s, 0x51540); /* unknown */
+ put_be32(s, 0x249f0); /* unknown */
+ put_be32(s, 0x249f0); /* unknown */
+ put_be16(s, 0x01);
+ /* frame length : seems to be very important */
+ put_be16(s, coded_frame_size);
+ put_be32(s, 0); /* unknown */
+ put_be16(s, stream->enc->sample_rate); /* sample rate */
+ put_be32(s, 0x10); /* unknown */
+ put_be16(s, stream->enc->channels);
+ put_str8(s, "Int0"); /* codec name */
+ put_str8(s, "dnet"); /* codec name */
+ put_be16(s, 0); /* title length */
+ put_be16(s, 0); /* author length */
+ put_be16(s, 0); /* copyright length */
+ put_byte(s, 0); /* end of header */
+ } else {
+ /* video codec info */
+ put_be32(s,34); /* size */
+ if(stream->enc->codec_id == CODEC_ID_RV10)
+ put_tag(s,"VIDORV10");
+ else
+ put_tag(s,"VIDORV20");
+ put_be16(s, stream->enc->width);
+ put_be16(s, stream->enc->height);
+ put_be16(s, (int) stream->frame_rate); /* frames per seconds ? */
+ put_be32(s,0); /* unknown meaning */
+ put_be16(s, (int) stream->frame_rate); /* unknown meaning */
+ put_be32(s,0); /* unknown meaning */
+ put_be16(s, 8); /* unknown meaning */
+ /* Seems to be the codec version: only use basic H263. The next
+ versions seems to add a diffential DC coding as in
+ MPEG... nothing new under the sun */
+ if(stream->enc->codec_id == CODEC_ID_RV10)
+ put_be32(s,0x10000000);
+ else
+ put_be32(s,0x20103001);
+ //put_be32(s,0x10003000);
+ }
+ }
+
+ /* patch data offset field */
+ data_pos = s->buf_ptr - start_ptr;
+ rm->data_pos = data_pos;
+ data_offset_ptr[0] = data_pos >> 24;
+ data_offset_ptr[1] = data_pos >> 16;
+ data_offset_ptr[2] = data_pos >> 8;
+ data_offset_ptr[3] = data_pos;
+
+ /* data stream */
+ put_tag(s,"DATA");
+ put_be32(s,data_size + 10 + 8);
+ put_be16(s,0);
+
+ put_be32(s, nb_packets); /* number of packets */
+ put_be32(s,0); /* next data header */
+}
+
+static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
+ int length, int key_frame)
+{
+ int timestamp;
+ ByteIOContext *s = &ctx->pb;
+
+ stream->nb_packets++;
+ stream->packet_total_size += length;
+ if (length > stream->packet_max_size)
+ stream->packet_max_size = length;
+
+ put_be16(s,0); /* version */
+ put_be16(s,length + 12);
+ put_be16(s, stream->num); /* stream number */
+ timestamp = (1000 * (float)stream->nb_frames) / stream->frame_rate;
+ put_be32(s, timestamp); /* timestamp */
+ put_byte(s, 0); /* reserved */
+ put_byte(s, key_frame ? 2 : 0); /* flags */
+}
+
+static int rm_write_header(AVFormatContext *s)
+{
+ RMContext *rm = s->priv_data;
+ StreamInfo *stream;
+ int n;
+ AVCodecContext *codec;
+
+ for(n=0;n<s->nb_streams;n++) {
+ s->streams[n]->id = n;
+ codec = s->streams[n]->codec;
+ stream = &rm->streams[n];
+ memset(stream, 0, sizeof(StreamInfo));
+ stream->num = n;
+ stream->bit_rate = codec->bit_rate;
+ stream->enc = codec;
+
+ switch(codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ rm->audio_stream = stream;
+ stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
+ /* XXX: dummy values */
+ stream->packet_max_size = 1024;
+ stream->nb_packets = 0;
+ stream->total_frames = stream->nb_packets;
+ break;
+ case CODEC_TYPE_VIDEO:
+ rm->video_stream = stream;
+ stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
+ /* XXX: dummy values */
+ stream->packet_max_size = 4096;
+ stream->nb_packets = 0;
+ stream->total_frames = stream->nb_packets;
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ rv10_write_header(s, 0, 0);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags)
+{
+ uint8_t *buf1;
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ StreamInfo *stream = rm->audio_stream;
+ int i;
+
+ /* XXX: suppress this malloc */
+ buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
+
+ write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY));
+
+ /* for AC3, the words seems to be reversed */
+ for(i=0;i<size;i+=2) {
+ buf1[i] = buf[i+1];
+ buf1[i+1] = buf[i];
+ }
+ put_buffer(pb, buf1, size);
+ put_flush_packet(pb);
+ stream->nb_frames++;
+ av_free(buf1);
+ return 0;
+}
+
+static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int flags)
+{
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ StreamInfo *stream = rm->video_stream;
+ int key_frame = !!(flags & PKT_FLAG_KEY);
+
+ /* XXX: this is incorrect: should be a parameter */
+
+ /* Well, I spent some time finding the meaning of these bits. I am
+ not sure I understood everything, but it works !! */
+#if 1
+ write_packet_header(s, stream, size + 7, key_frame);
+ /* bit 7: '1' if final packet of a frame converted in several packets */
+ put_byte(pb, 0x81);
+ /* bit 7: '1' if I frame. bits 6..0 : sequence number in current
+ frame starting from 1 */
+ if (key_frame) {
+ put_byte(pb, 0x81);
+ } else {
+ put_byte(pb, 0x01);
+ }
+ put_be16(pb, 0x4000 + (size)); /* total frame size */
+ put_be16(pb, 0x4000 + (size)); /* offset from the start or the end */
+#else
+ /* full frame */
+ write_packet_header(s, size + 6);
+ put_byte(pb, 0xc0);
+ put_be16(pb, 0x4000 + size); /* total frame size */
+ put_be16(pb, 0x4000 + packet_number * 126); /* position in stream */
+#endif
+ put_byte(pb, stream->nb_frames & 0xff);
+
+ put_buffer(pb, buf, size);
+ put_flush_packet(pb);
+
+ stream->nb_frames++;
+ return 0;
+}
+
+static int rm_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ if (s->streams[pkt->stream_index]->codec->codec_type ==
+ CODEC_TYPE_AUDIO)
+ return rm_write_audio(s, pkt->data, pkt->size, pkt->flags);
+ else
+ return rm_write_video(s, pkt->data, pkt->size, pkt->flags);
+}
+
+static int rm_write_trailer(AVFormatContext *s)
+{
+ RMContext *rm = s->priv_data;
+ int data_size, index_pos, i;
+ ByteIOContext *pb = &s->pb;
+
+ if (!url_is_streamed(&s->pb)) {
+ /* end of file: finish to write header */
+ index_pos = url_fseek(pb, 0, SEEK_CUR);
+ data_size = index_pos - rm->data_pos;
+
+ /* index */
+ put_tag(pb, "INDX");
+ put_be32(pb, 10 + 10 * s->nb_streams);
+ put_be16(pb, 0);
+
+ for(i=0;i<s->nb_streams;i++) {
+ put_be32(pb, 0); /* zero indices */
+ put_be16(pb, i); /* stream number */
+ put_be32(pb, 0); /* next index */
+ }
+ /* undocumented end header */
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+
+ url_fseek(pb, 0, SEEK_SET);
+ for(i=0;i<s->nb_streams;i++)
+ rm->streams[i].total_frames = rm->streams[i].nb_frames;
+ rv10_write_header(s, data_size, index_pos);
+ } else {
+ /* undocumented end header */
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/***************************************************/
+
+static void get_str(ByteIOContext *pb, char *buf, int buf_size)
+{
+ int len, i;
+ char *q;
+
+ len = get_be16(pb);
+ q = buf;
+ for(i=0;i<len;i++) {
+ if (i < buf_size - 1)
+ *q++ = get_byte(pb);
+ }
+ *q = '\0';
+}
+
+static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
+{
+ int len, i;
+ char *q;
+
+ len = get_byte(pb);
+ q = buf;
+ for(i=0;i<len;i++) {
+ if (i < buf_size - 1)
+ *q++ = get_byte(pb);
+ }
+ *q = '\0';
+}
+
+static int rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
+ int read_all)
+{
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ char buf[256];
+ uint32_t version;
+ int i;
+
+ /* ra type header */
+ version = get_be32(pb); /* version */
+ if (((version >> 16) & 0xff) == 3) {
+ int64_t startpos = url_ftell(pb);
+ /* very old version */
+ for(i = 0; i < 14; i++)
+ get_byte(pb);
+ get_str8(pb, s->title, sizeof(s->title));
+ get_str8(pb, s->author, sizeof(s->author));
+ get_str8(pb, s->copyright, sizeof(s->copyright));
+ get_str8(pb, s->comment, sizeof(s->comment));
+ if ((startpos + (version & 0xffff)) >= url_ftell(pb) + 2) {
+ // fourcc (should always be "lpcJ")
+ get_byte(pb);
+ get_str8(pb, buf, sizeof(buf));
+ }
+ // Skip extra header crap (this should never happen)
+ if ((startpos + (version & 0xffff)) > url_ftell(pb))
+ url_fskip(pb, (version & 0xffff) + startpos - url_ftell(pb));
+ st->codec->sample_rate = 8000;
+ st->codec->channels = 1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_RA_144;
+ } else {
+ int flavor, sub_packet_h, coded_framesize, sub_packet_size;
+ /* old version (4) */
+ get_be32(pb); /* .ra4 */
+ get_be32(pb); /* data size */
+ get_be16(pb); /* version2 */
+ get_be32(pb); /* header size */
+ flavor= get_be16(pb); /* add codec info / flavor */
+ rm->coded_framesize = coded_framesize = get_be32(pb); /* coded frame size */
+ get_be32(pb); /* ??? */
+ get_be32(pb); /* ??? */
+ get_be32(pb); /* ??? */
+ rm->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
+ st->codec->block_align= get_be16(pb); /* frame size */
+ rm->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */
+ get_be16(pb); /* ??? */
+ if (((version >> 16) & 0xff) == 5) {
+ get_be16(pb); get_be16(pb); get_be16(pb); }
+ st->codec->sample_rate = get_be16(pb);
+ get_be32(pb);
+ st->codec->channels = get_be16(pb);
+ if (((version >> 16) & 0xff) == 5) {
+ get_be32(pb);
+ buf[0] = get_byte(pb);
+ buf[1] = get_byte(pb);
+ buf[2] = get_byte(pb);
+ buf[3] = get_byte(pb);
+ buf[4] = 0;
+ } else {
+ get_str8(pb, buf, sizeof(buf)); /* desc */
+ get_str8(pb, buf, sizeof(buf)); /* desc */
+ }
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ if (!strcmp(buf, "dnet")) {
+ st->codec->codec_id = CODEC_ID_AC3;
+ } else if (!strcmp(buf, "28_8")) {
+ st->codec->codec_id = CODEC_ID_RA_288;
+ st->codec->extradata_size= 0;
+ rm->audio_framesize = st->codec->block_align;
+ st->codec->block_align = coded_framesize;
+
+ if(rm->audio_framesize >= UINT_MAX / sub_packet_h){
+ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
+ return -1;
+ }
+
+ rm->audiobuf = av_malloc(rm->audio_framesize * sub_packet_h);
+ } else if (!strcmp(buf, "cook")) {
+ int codecdata_length, i;
+ get_be16(pb); get_byte(pb);
+ if (((version >> 16) & 0xff) == 5)
+ get_byte(pb);
+ codecdata_length = get_be32(pb);
+ if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
+ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
+ return -1;
+ }
+
+ st->codec->codec_id = CODEC_ID_COOK;
+ st->codec->extradata_size= codecdata_length;
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ for(i = 0; i < codecdata_length; i++)
+ ((uint8_t*)st->codec->extradata)[i] = get_byte(pb);
+ rm->audio_framesize = st->codec->block_align;
+ st->codec->block_align = rm->sub_packet_size;
+
+ if(rm->audio_framesize >= UINT_MAX / sub_packet_h){
+ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
+ return -1;
+ }
+
+ rm->audiobuf = av_malloc(rm->audio_framesize * sub_packet_h);
+ } else if (!strcmp(buf, "raac") || !strcmp(buf, "racp")) {
+ int codecdata_length, i;
+ get_be16(pb); get_byte(pb);
+ if (((version >> 16) & 0xff) == 5)
+ get_byte(pb);
+ st->codec->codec_id = CODEC_ID_AAC;
+ codecdata_length = get_be32(pb);
+ if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
+ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
+ return -1;
+ }
+ if (codecdata_length >= 1) {
+ st->codec->extradata_size = codecdata_length - 1;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_byte(pb);
+ for(i = 0; i < st->codec->extradata_size; i++)
+ ((uint8_t*)st->codec->extradata)[i] = get_byte(pb);
+ }
+ } else {
+ st->codec->codec_id = CODEC_ID_NONE;
+ pstrcpy(st->codec->codec_name, sizeof(st->codec->codec_name),
+ buf);
+ }
+ if (read_all) {
+ get_byte(pb);
+ get_byte(pb);
+ get_byte(pb);
+
+ get_str8(pb, s->title, sizeof(s->title));
+ get_str8(pb, s->author, sizeof(s->author));
+ get_str8(pb, s->copyright, sizeof(s->copyright));
+ get_str8(pb, s->comment, sizeof(s->comment));
+ }
+ }
+ return 0;
+}
+
+static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap)
+{
+ RMContext *rm = s->priv_data;
+ AVStream *st;
+
+ rm->old_format = 1;
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ return rm_read_audio_stream_info(s, st, 1);
+}
+
+static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ RMContext *rm = s->priv_data;
+ AVStream *st;
+ ByteIOContext *pb = &s->pb;
+ unsigned int tag, v;
+ int tag_size, size, codec_data_size, i;
+ int64_t codec_pos;
+ unsigned int start_time, duration;
+ char buf[128];
+ int flags = 0;
+
+ tag = get_le32(pb);
+ if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
+ /* very old .ra format */
+ return rm_read_header_old(s, ap);
+ } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
+ return AVERROR_IO;
+ }
+
+ get_be32(pb); /* header size */
+ get_be16(pb);
+ get_be32(pb);
+ get_be32(pb); /* number of headers */
+
+ for(;;) {
+ if (url_feof(pb))
+ goto fail;
+ tag = get_le32(pb);
+ tag_size = get_be32(pb);
+ get_be16(pb);
+#if 0
+ printf("tag=%c%c%c%c (%08x) size=%d\n",
+ (tag) & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff,
+ tag,
+ tag_size);
+#endif
+ if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
+ goto fail;
+ switch(tag) {
+ case MKTAG('P', 'R', 'O', 'P'):
+ /* file header */
+ get_be32(pb); /* max bit rate */
+ get_be32(pb); /* avg bit rate */
+ get_be32(pb); /* max packet size */
+ get_be32(pb); /* avg packet size */
+ get_be32(pb); /* nb packets */
+ get_be32(pb); /* duration */
+ get_be32(pb); /* preroll */
+ get_be32(pb); /* index offset */
+ get_be32(pb); /* data offset */
+ get_be16(pb); /* nb streams */
+ flags = get_be16(pb); /* flags */
+ break;
+ case MKTAG('C', 'O', 'N', 'T'):
+ get_str(pb, s->title, sizeof(s->title));
+ get_str(pb, s->author, sizeof(s->author));
+ get_str(pb, s->copyright, sizeof(s->copyright));
+ get_str(pb, s->comment, sizeof(s->comment));
+ break;
+ case MKTAG('M', 'D', 'P', 'R'):
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ st->id = get_be16(pb);
+ get_be32(pb); /* max bit rate */
+ st->codec->bit_rate = get_be32(pb); /* bit rate */
+ get_be32(pb); /* max packet size */
+ get_be32(pb); /* avg packet size */
+ start_time = get_be32(pb); /* start time */
+ get_be32(pb); /* preroll */
+ duration = get_be32(pb); /* duration */
+ st->start_time = start_time;
+ st->duration = duration;
+ get_str8(pb, buf, sizeof(buf)); /* desc */
+ get_str8(pb, buf, sizeof(buf)); /* mimetype */
+ codec_data_size = get_be32(pb);
+ codec_pos = url_ftell(pb);
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ av_set_pts_info(st, 64, 1, 1000);
+
+ v = get_be32(pb);
+ if (v == MKTAG(0xfd, 'a', 'r', '.')) {
+ /* ra type header */
+ if (rm_read_audio_stream_info(s, st, 0))
+ return -1;
+ } else {
+ int fps, fps2;
+ if (get_le32(pb) != MKTAG('V', 'I', 'D', 'O')) {
+ fail1:
+ av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
+ goto skip;
+ }
+ st->codec->codec_tag = get_le32(pb);
+// av_log(NULL, AV_LOG_DEBUG, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
+ if ( st->codec->codec_tag != MKTAG('R', 'V', '1', '0')
+ && st->codec->codec_tag != MKTAG('R', 'V', '2', '0')
+ && st->codec->codec_tag != MKTAG('R', 'V', '3', '0')
+ && st->codec->codec_tag != MKTAG('R', 'V', '4', '0'))
+ goto fail1;
+ st->codec->width = get_be16(pb);
+ st->codec->height = get_be16(pb);
+ st->codec->time_base.num= 1;
+ fps= get_be16(pb);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ get_be32(pb);
+ fps2= get_be16(pb);
+ get_be16(pb);
+
+ st->codec->extradata_size= codec_data_size - (url_ftell(pb) - codec_pos);
+
+ if(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
+ //check is redundant as get_buffer() will catch this
+ av_log(s, AV_LOG_ERROR, "st->codec->extradata_size too large\n");
+ return -1;
+ }
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
+
+// av_log(NULL, AV_LOG_DEBUG, "fps= %d fps2= %d\n", fps, fps2);
+ st->codec->time_base.den = fps * st->codec->time_base.num;
+ switch(((uint8_t*)st->codec->extradata)[4]>>4){
+ case 1: st->codec->codec_id = CODEC_ID_RV10; break;
+ case 2: st->codec->codec_id = CODEC_ID_RV20; break;
+ case 3: st->codec->codec_id = CODEC_ID_RV30; break;
+ case 4: st->codec->codec_id = CODEC_ID_RV40; break;
+ default: goto fail1;
+ }
+ }
+skip:
+ /* skip codec info */
+ size = url_ftell(pb) - codec_pos;
+ url_fskip(pb, codec_data_size - size);
+ break;
+ case MKTAG('D', 'A', 'T', 'A'):
+ goto header_end;
+ default:
+ /* unknown tag: skip it */
+ url_fskip(pb, tag_size - 10);
+ break;
+ }
+ }
+ header_end:
+ rm->nb_packets = get_be32(pb); /* number of packets */
+ if (!rm->nb_packets && (flags & 4))
+ rm->nb_packets = 3600 * 25;
+ get_be32(pb); /* next data header */
+ return 0;
+
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ av_free(s->streams[i]);
+ }
+ return AVERROR_IO;
+}
+
+static int get_num(ByteIOContext *pb, int *len)
+{
+ int n, n1;
+
+ n = get_be16(pb);
+ (*len)-=2;
+ if (n >= 0x4000) {
+ return n - 0x4000;
+ } else {
+ n1 = get_be16(pb);
+ (*len)-=2;
+ return (n << 16) | n1;
+ }
+}
+
+/* multiple of 20 bytes for ra144 (ugly) */
+#define RAW_PACKET_SIZE 1000
+
+static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int len, num, res, i;
+ AVStream *st;
+ uint32_t state=0xFFFFFFFF;
+
+ while(!url_feof(pb)){
+ *pos= url_ftell(pb);
+ if(rm->remaining_len > 0){
+ num= rm->current_stream;
+ len= rm->remaining_len;
+ *timestamp = AV_NOPTS_VALUE;
+ *flags= 0;
+ }else{
+ state= (state<<8) + get_byte(pb);
+
+ if(state == MKBETAG('I', 'N', 'D', 'X')){
+ len = get_be16(pb) - 6;
+ if(len<0)
+ continue;
+ goto skip;
+ }
+
+ if(state > (unsigned)0xFFFF || state < 12)
+ continue;
+ len=state;
+ state= 0xFFFFFFFF;
+
+ num = get_be16(pb);
+ *timestamp = get_be32(pb);
+ res= get_byte(pb); /* reserved */
+ *flags = get_byte(pb); /* flags */
+
+
+ len -= 12;
+ }
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (num == st->id)
+ break;
+ }
+ if (i == s->nb_streams) {
+skip:
+ /* skip packet if unknown number */
+ url_fskip(pb, len);
+ rm->remaining_len -= len;
+ continue;
+ }
+ *stream_index= i;
+
+ return len;
+ }
+ return -1;
+}
+
+static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ int i, len, tmp, j;
+ int64_t timestamp, pos;
+ uint8_t *ptr;
+ int flags;
+
+ if (rm->audio_pkt_cnt) {
+ // If there are queued audio packet return them first
+ st = s->streams[rm->audio_stream_num];
+ if (st->codec->codec_id == CODEC_ID_AAC)
+ av_get_packet(pb, pkt, rm->sub_packet_lengths[rm->sub_packet_cnt - rm->audio_pkt_cnt]);
+ else {
+ av_new_packet(pkt, st->codec->block_align);
+ memcpy(pkt->data, rm->audiobuf + st->codec->block_align *
+ (rm->sub_packet_h * rm->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
+ st->codec->block_align);
+ }
+ rm->audio_pkt_cnt--;
+ pkt->flags = 0;
+ pkt->stream_index = rm->audio_stream_num;
+ } else if (rm->old_format) {
+ st = s->streams[0];
+ if (st->codec->codec_id == CODEC_ID_RA_288) {
+ int x, y;
+
+ for (y = 0; y < rm->sub_packet_h; y++)
+ for (x = 0; x < rm->sub_packet_h/2; x++)
+ if (get_buffer(pb, rm->audiobuf+x*2*rm->audio_framesize+y*rm->coded_framesize, rm->coded_framesize) <= 0)
+ return AVERROR_IO;
+ rm->audio_stream_num = 0;
+ rm->audio_pkt_cnt = rm->sub_packet_h * rm->audio_framesize / st->codec->block_align - 1;
+ // Release first audio packet
+ av_new_packet(pkt, st->codec->block_align);
+ memcpy(pkt->data, rm->audiobuf, st->codec->block_align);
+ pkt->flags |= PKT_FLAG_KEY; // Mark first packet as keyframe
+ pkt->stream_index = 0;
+ } else {
+ /* just read raw bytes */
+ len = RAW_PACKET_SIZE;
+ len= av_get_packet(pb, pkt, len);
+ pkt->stream_index = 0;
+ if (len <= 0) {
+ return AVERROR_IO;
+ }
+ pkt->size = len;
+ }
+ } else {
+ int seq=1;
+resync:
+ len=sync(s, &timestamp, &flags, &i, &pos);
+ if(len<0)
+ return AVERROR_IO;
+ st = s->streams[i];
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ int h, pic_num, len2, pos;
+
+ h= get_byte(pb); len--;
+ if(!(h & 0x40)){
+ seq = get_byte(pb); len--;
+ }
+
+ if((h & 0xc0) == 0x40){
+ len2= pos= 0;
+ }else{
+ len2 = get_num(pb, &len);
+ pos = get_num(pb, &len);
+ }
+ /* picture number */
+ pic_num= get_byte(pb); len--;
+ rm->remaining_len= len;
+ rm->current_stream= st->id;
+
+// av_log(NULL, AV_LOG_DEBUG, "%X len:%d pos:%d len2:%d pic_num:%d\n",h, len, pos, len2, pic_num);
+ if(len2 && len2<len)
+ len=len2;
+ rm->remaining_len-= len;
+ av_get_packet(pb, pkt, len);
+ }
+
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if ((st->codec->codec_id == CODEC_ID_RA_288) ||
+ (st->codec->codec_id == CODEC_ID_COOK)) {
+ int x;
+ int sps = rm->sub_packet_size;
+ int cfs = rm->coded_framesize;
+ int h = rm->sub_packet_h;
+ int y = rm->sub_packet_cnt;
+ int w = rm->audio_framesize;
+
+ if (flags & 2)
+ y = rm->sub_packet_cnt = 0;
+ if (!y)
+ rm->audiotimestamp = timestamp;
+
+ switch(st->codec->codec_id) {
+ case CODEC_ID_RA_288:
+ for (x = 0; x < h/2; x++)
+ get_buffer(pb, rm->audiobuf+x*2*w+y*cfs, cfs);
+ break;
+ case CODEC_ID_COOK:
+ for (x = 0; x < w/sps; x++)
+ get_buffer(pb, rm->audiobuf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
+ break;
+ }
+
+ if (++(rm->sub_packet_cnt) < h)
+ goto resync;
+ else {
+ rm->sub_packet_cnt = 0;
+ rm->audio_stream_num = i;
+ rm->audio_pkt_cnt = h * w / st->codec->block_align - 1;
+ // Release first audio packet
+ av_new_packet(pkt, st->codec->block_align);
+ memcpy(pkt->data, rm->audiobuf, st->codec->block_align);
+ timestamp = rm->audiotimestamp;
+ flags = 2; // Mark first packet as keyframe
+ }
+ } else if (st->codec->codec_id == CODEC_ID_AAC) {
+ int x;
+ rm->audio_stream_num = i;
+ rm->sub_packet_cnt = (get_be16(pb) & 0xf0) >> 4;
+ if (rm->sub_packet_cnt) {
+ for (x = 0; x < rm->sub_packet_cnt; x++)
+ rm->sub_packet_lengths[x] = get_be16(pb);
+ // Release first audio packet
+ rm->audio_pkt_cnt = rm->sub_packet_cnt - 1;
+ av_get_packet(pb, pkt, rm->sub_packet_lengths[0]);
+ flags = 2; // Mark first packet as keyframe
+ }
+ } else
+ av_get_packet(pb, pkt, len);
+ }
+
+ if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
+ || st->discard >= AVDISCARD_ALL){
+ av_free_packet(pkt);
+ goto resync;
+ }
+
+ pkt->stream_index = i;
+
+#if 0
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(st->codec->codec_id == CODEC_ID_RV20){
+ int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
+ av_log(NULL, AV_LOG_DEBUG, "%d %"PRId64" %d\n", timestamp, timestamp*512LL/25, seq);
+
+ seq |= (timestamp&~0x3FFF);
+ if(seq - timestamp > 0x2000) seq -= 0x4000;
+ if(seq - timestamp < -0x2000) seq += 0x4000;
+ }
+ }
+#endif
+ pkt->pts= timestamp;
+ if(flags&2){
+ pkt->flags |= PKT_FLAG_KEY;
+ if((seq&0x7F) == 1)
+ av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
+ }
+ }
+
+ /* for AC3, needs to swap bytes */
+ if (st->codec->codec_id == CODEC_ID_AC3) {
+ ptr = pkt->data;
+ for(j=0;j<len;j+=2) {
+ tmp = ptr[0];
+ ptr[0] = ptr[1];
+ ptr[1] = tmp;
+ ptr += 2;
+ }
+ }
+ return 0;
+}
+
+static int rm_read_close(AVFormatContext *s)
+{
+ RMContext *rm = s->priv_data;
+
+ av_free(rm->audiobuf);
+ return 0;
+}
+
+static int rm_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
+ p->buf[2] == 'M' && p->buf[3] == 'F' &&
+ p->buf[4] == 0 && p->buf[5] == 0) ||
+ (p->buf[0] == '.' && p->buf[1] == 'r' &&
+ p->buf[2] == 'a' && p->buf[3] == 0xfd))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
+ int64_t *ppos, int64_t pos_limit)
+{
+ RMContext *rm = s->priv_data;
+ int64_t pos, dts;
+ int stream_index2, flags, len, h;
+
+ pos = *ppos;
+
+ if(rm->old_format)
+ return AV_NOPTS_VALUE;
+
+ url_fseek(&s->pb, pos, SEEK_SET);
+ rm->remaining_len=0;
+ for(;;){
+ int seq=1;
+ AVStream *st;
+
+ len=sync(s, &dts, &flags, &stream_index2, &pos);
+ if(len<0)
+ return AV_NOPTS_VALUE;
+
+ st = s->streams[stream_index2];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ h= get_byte(&s->pb); len--;
+ if(!(h & 0x40)){
+ seq = get_byte(&s->pb); len--;
+ }
+ }
+
+ if((flags&2) && (seq&0x7F) == 1){
+// av_log(s, AV_LOG_DEBUG, "%d %d-%d %"PRId64" %d\n", flags, stream_index2, stream_index, dts, seq);
+ av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
+ if(stream_index2 == stream_index)
+ break;
+ }
+
+ url_fskip(&s->pb, len);
+ }
+ *ppos = pos;
+ return dts;
+}
+
+#ifdef CONFIG_RM_DEMUXER
+AVInputFormat rm_demuxer = {
+ "rm",
+ "rm format",
+ sizeof(RMContext),
+ rm_probe,
+ rm_read_header,
+ rm_read_packet,
+ rm_read_close,
+ NULL,
+ rm_read_dts,
+};
+#endif
+#ifdef CONFIG_RM_MUXER
+AVOutputFormat rm_muxer = {
+ "rm",
+ "rm format",
+ "application/vnd.rn-realmedia",
+ "rm,ra",
+ sizeof(RMContext),
+ CODEC_ID_AC3,
+ CODEC_ID_RV10,
+ rm_write_header,
+ rm_write_packet,
+ rm_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/rtp.c b/contrib/ffmpeg/libavformat/rtp.c
new file mode 100644
index 000000000..37a286289
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp.c
@@ -0,0 +1,1099 @@
+/*
+ * RTP input/output format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "mpegts.h"
+#include "bitstream.h"
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#include "rtp_internal.h"
+#include "rtp_h264.h"
+
+//#define DEBUG
+
+
+/* TODO: - add RTCP statistics reporting (should be optional).
+
+ - add support for h263/mpeg4 packetized output : IDEA: send a
+ buffer to 'rtp_write_packet' contains all the packets for ONE
+ frame. Each packet should have a four byte header containing
+ the length in big endian format (same trick as
+ 'url_open_dyn_packet_buf')
+*/
+
+/* from http://www.iana.org/assignments/rtp-parameters last updated 05 January 2005 */
+AVRtpPayloadType_t AVRtpPayloadTypes[]=
+{
+ {0, "PCMU", CODEC_TYPE_AUDIO, CODEC_ID_PCM_MULAW, 8000, 1},
+ {1, "Reserved", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {2, "Reserved", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {3, "GSM", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {4, "G723", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {5, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {6, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 16000, 1},
+ {7, "LPC", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {8, "PCMA", CODEC_TYPE_AUDIO, CODEC_ID_PCM_ALAW, 8000, 1},
+ {9, "G722", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {10, "L16", CODEC_TYPE_AUDIO, CODEC_ID_PCM_S16BE, 44100, 2},
+ {11, "L16", CODEC_TYPE_AUDIO, CODEC_ID_PCM_S16BE, 44100, 1},
+ {12, "QCELP", CODEC_TYPE_AUDIO, CODEC_ID_QCELP, 8000, 1},
+ {13, "CN", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {14, "MPA", CODEC_TYPE_AUDIO, CODEC_ID_MP2, 90000, -1},
+ {15, "G728", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {16, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 11025, 1},
+ {17, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 22050, 1},
+ {18, "G729", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {19, "reserved", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {20, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {21, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {22, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {23, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {24, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {25, "CelB", CODEC_TYPE_VIDEO, CODEC_ID_NONE, 90000, -1},
+ {26, "JPEG", CODEC_TYPE_VIDEO, CODEC_ID_MJPEG, 90000, -1},
+ {27, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {28, "nv", CODEC_TYPE_VIDEO, CODEC_ID_NONE, 90000, -1},
+ {29, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {30, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {31, "H261", CODEC_TYPE_VIDEO, CODEC_ID_H261, 90000, -1},
+ {32, "MPV", CODEC_TYPE_VIDEO, CODEC_ID_MPEG1VIDEO, 90000, -1},
+ {33, "MP2T", CODEC_TYPE_DATA, CODEC_ID_MPEG2TS, 90000, -1},
+ {34, "H263", CODEC_TYPE_VIDEO, CODEC_ID_H263, 90000, -1},
+ {35, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {36, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {37, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {38, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {39, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {40, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {41, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {42, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {43, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {44, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {45, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {46, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {47, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {48, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {49, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {50, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {51, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {52, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {53, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {54, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {55, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {56, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {57, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {58, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {59, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {60, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {61, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {62, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {63, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {64, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {65, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {66, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {67, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {68, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {69, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {70, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {71, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {72, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {73, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {74, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {75, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {76, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {77, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {78, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {79, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {80, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {81, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {82, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {83, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {84, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {85, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {86, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {87, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {88, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {89, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {90, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {91, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {92, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {93, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {94, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {95, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {96, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {97, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {98, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {99, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {100, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {101, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {102, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {103, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {104, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {105, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {106, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {107, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {108, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {109, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {110, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {111, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {112, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {113, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {114, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {115, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {116, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {117, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {118, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {119, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {120, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {121, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {122, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {123, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {124, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {125, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {126, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {127, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {-1, "", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1}
+};
+
+/* statistics functions */
+RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler= NULL;
+
+static RTPDynamicProtocolHandler mp4v_es_handler= {"MP4V-ES", CODEC_TYPE_VIDEO, CODEC_ID_MPEG4};
+static RTPDynamicProtocolHandler mpeg4_generic_handler= {"mpeg4-generic", CODEC_TYPE_AUDIO, CODEC_ID_AAC};
+
+static void register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler)
+{
+ handler->next= RTPFirstDynamicPayloadHandler;
+ RTPFirstDynamicPayloadHandler= handler;
+}
+
+void av_register_rtp_dynamic_payload_handlers()
+{
+ register_dynamic_payload_handler(&mp4v_es_handler);
+ register_dynamic_payload_handler(&mpeg4_generic_handler);
+ register_dynamic_payload_handler(&ff_h264_dynamic_handler);
+}
+
+int rtp_get_codec_info(AVCodecContext *codec, int payload_type)
+{
+ if (AVRtpPayloadTypes[payload_type].codec_id != CODEC_ID_NONE) {
+ codec->codec_type = AVRtpPayloadTypes[payload_type].codec_type;
+ codec->codec_id = AVRtpPayloadTypes[payload_type].codec_id;
+ if (AVRtpPayloadTypes[payload_type].audio_channels > 0)
+ codec->channels = AVRtpPayloadTypes[payload_type].audio_channels;
+ if (AVRtpPayloadTypes[payload_type].clock_rate > 0)
+ codec->sample_rate = AVRtpPayloadTypes[payload_type].clock_rate;
+ return 0;
+ }
+ return -1;
+}
+
+/* return < 0 if unknown payload type */
+int rtp_get_payload_type(AVCodecContext *codec)
+{
+ int i, payload_type;
+
+ /* compute the payload type */
+ for (payload_type = -1, i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
+ if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) {
+ if (codec->codec_id == CODEC_ID_PCM_S16BE)
+ if (codec->channels != AVRtpPayloadTypes[i].audio_channels)
+ continue;
+ payload_type = AVRtpPayloadTypes[i].pt;
+ }
+ return payload_type;
+}
+
+static inline uint32_t decode_be32(const uint8_t *p)
+{
+ return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+}
+
+static inline uint64_t decode_be64(const uint8_t *p)
+{
+ return ((uint64_t)decode_be32(p) << 32) | decode_be32(p + 4);
+}
+
+static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int len)
+{
+ if (buf[1] != 200)
+ return -1;
+ s->last_rtcp_ntp_time = decode_be64(buf + 8);
+ if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE)
+ s->first_rtcp_ntp_time = s->last_rtcp_ntp_time;
+ s->last_rtcp_timestamp = decode_be32(buf + 16);
+ return 0;
+}
+
+#define RTP_SEQ_MOD (1<<16)
+
+/**
+* called on parse open packet
+*/
+static void rtp_init_statistics(RTPStatistics *s, uint16_t base_sequence) // called on parse open packet.
+{
+ memset(s, 0, sizeof(RTPStatistics));
+ s->max_seq= base_sequence;
+ s->probation= 1;
+}
+
+/**
+* called whenever there is a large jump in sequence numbers, or when they get out of probation...
+*/
+static void rtp_init_sequence(RTPStatistics *s, uint16_t seq)
+{
+ s->max_seq= seq;
+ s->cycles= 0;
+ s->base_seq= seq -1;
+ s->bad_seq= RTP_SEQ_MOD + 1;
+ s->received= 0;
+ s->expected_prior= 0;
+ s->received_prior= 0;
+ s->jitter= 0;
+ s->transit= 0;
+}
+
+/**
+* returns 1 if we should handle this packet.
+*/
+static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq)
+{
+ uint16_t udelta= seq - s->max_seq;
+ const int MAX_DROPOUT= 3000;
+ const int MAX_MISORDER = 100;
+ const int MIN_SEQUENTIAL = 2;
+
+ /* source not valid until MIN_SEQUENTIAL packets with sequence seq. numbers have been received */
+ if(s->probation)
+ {
+ if(seq==s->max_seq + 1) {
+ s->probation--;
+ s->max_seq= seq;
+ if(s->probation==0) {
+ rtp_init_sequence(s, seq);
+ s->received++;
+ return 1;
+ }
+ } else {
+ s->probation= MIN_SEQUENTIAL - 1;
+ s->max_seq = seq;
+ }
+ } else if (udelta < MAX_DROPOUT) {
+ // in order, with permissible gap
+ if(seq < s->max_seq) {
+ //sequence number wrapped; count antother 64k cycles
+ s->cycles += RTP_SEQ_MOD;
+ }
+ s->max_seq= seq;
+ } else if (udelta <= RTP_SEQ_MOD - MAX_MISORDER) {
+ // sequence made a large jump...
+ if(seq==s->bad_seq) {
+ // two sequential packets-- assume that the other side restarted without telling us; just resync.
+ rtp_init_sequence(s, seq);
+ } else {
+ s->bad_seq= (seq + 1) & (RTP_SEQ_MOD-1);
+ return 0;
+ }
+ } else {
+ // duplicate or reordered packet...
+ }
+ s->received++;
+ return 1;
+}
+
+#if 0
+/**
+* This function is currently unused; without a valid local ntp time, I don't see how we could calculate the
+* difference between the arrival and sent timestamp. As a result, the jitter and transit statistics values
+* never change. I left this in in case someone else can see a way. (rdm)
+*/
+static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp, uint32_t arrival_timestamp)
+{
+ uint32_t transit= arrival_timestamp - sent_timestamp;
+ int d;
+ s->transit= transit;
+ d= FFABS(transit - s->transit);
+ s->jitter += d - ((s->jitter + 8)>>4);
+}
+#endif
+
+/**
+ * some rtp servers assume client is dead if they don't hear from them...
+ * so we send a Receiver Report to the provided ByteIO context
+ * (we don't have access to the rtcp handle from here)
+ */
+int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
+{
+ ByteIOContext pb;
+ uint8_t *buf;
+ int len;
+ int rtcp_bytes;
+ RTPStatistics *stats= &s->statistics;
+ uint32_t lost;
+ uint32_t extended_max;
+ uint32_t expected_interval;
+ uint32_t received_interval;
+ uint32_t lost_interval;
+ uint32_t expected;
+ uint32_t fraction;
+ uint64_t ntp_time= s->last_rtcp_ntp_time; // TODO: Get local ntp time?
+
+ if (!s->rtp_ctx || (count < 1))
+ return -1;
+
+ /* TODO: I think this is way too often; RFC 1889 has algorithm for this */
+ /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
+ s->octet_count += count;
+ rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
+ RTCP_TX_RATIO_DEN;
+ rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
+ if (rtcp_bytes < 28)
+ return -1;
+ s->last_octet_count = s->octet_count;
+
+ if (url_open_dyn_buf(&pb) < 0)
+ return -1;
+
+ // Receiver Report
+ put_byte(&pb, (RTP_VERSION << 6) + 1); /* 1 report block */
+ put_byte(&pb, 201);
+ put_be16(&pb, 7); /* length in words - 1 */
+ put_be32(&pb, s->ssrc); // our own SSRC
+ put_be32(&pb, s->ssrc); // XXX: should be the server's here!
+ // some placeholders we should really fill...
+ // RFC 1889/p64
+ extended_max= stats->cycles + stats->max_seq;
+ expected= extended_max - stats->base_seq + 1;
+ lost= expected - stats->received;
+ lost= FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
+ expected_interval= expected - stats->expected_prior;
+ stats->expected_prior= expected;
+ received_interval= stats->received - stats->received_prior;
+ stats->received_prior= stats->received;
+ lost_interval= expected_interval - received_interval;
+ if (expected_interval==0 || lost_interval<=0) fraction= 0;
+ else fraction = (lost_interval<<8)/expected_interval;
+
+ fraction= (fraction<<24) | lost;
+
+ put_be32(&pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
+ put_be32(&pb, extended_max); /* max sequence received */
+ put_be32(&pb, stats->jitter>>4); /* jitter */
+
+ if(s->last_rtcp_ntp_time==AV_NOPTS_VALUE)
+ {
+ put_be32(&pb, 0); /* last SR timestamp */
+ put_be32(&pb, 0); /* delay since last SR */
+ } else {
+ uint32_t middle_32_bits= s->last_rtcp_ntp_time>>16; // this is valid, right? do we need to handle 64 bit values special?
+ uint32_t delay_since_last= ntp_time - s->last_rtcp_ntp_time;
+
+ put_be32(&pb, middle_32_bits); /* last SR timestamp */
+ put_be32(&pb, delay_since_last); /* delay since last SR */
+ }
+
+ // CNAME
+ put_byte(&pb, (RTP_VERSION << 6) + 1); /* 1 report block */
+ put_byte(&pb, 202);
+ len = strlen(s->hostname);
+ put_be16(&pb, (6 + len + 3) / 4); /* length in words - 1 */
+ put_be32(&pb, s->ssrc);
+ put_byte(&pb, 0x01);
+ put_byte(&pb, len);
+ put_buffer(&pb, s->hostname, len);
+ // padding
+ for (len = (6 + len) % 4; len % 4; len++) {
+ put_byte(&pb, 0);
+ }
+
+ put_flush_packet(&pb);
+ len = url_close_dyn_buf(&pb, &buf);
+ if ((len > 0) && buf) {
+ int result;
+#if defined(DEBUG)
+ printf("sending %d bytes of RR\n", len);
+#endif
+ result= url_write(s->rtp_ctx, buf, len);
+#if defined(DEBUG)
+ printf("result from url_write: %d\n", result);
+#endif
+ av_free(buf);
+ }
+ return 0;
+}
+
+/**
+ * open a new RTP parse context for stream 'st'. 'st' can be NULL for
+ * MPEG2TS streams to indicate that they should be demuxed inside the
+ * rtp demux (otherwise CODEC_ID_MPEG2TS packets are returned)
+ * TODO: change this to not take rtp_payload data, and use the new dynamic payload system.
+ */
+RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, rtp_payload_data_t *rtp_payload_data)
+{
+ RTPDemuxContext *s;
+
+ s = av_mallocz(sizeof(RTPDemuxContext));
+ if (!s)
+ return NULL;
+ s->payload_type = payload_type;
+ s->last_rtcp_ntp_time = AV_NOPTS_VALUE;
+ s->first_rtcp_ntp_time = AV_NOPTS_VALUE;
+ s->ic = s1;
+ s->st = st;
+ s->rtp_payload_data = rtp_payload_data;
+ rtp_init_statistics(&s->statistics, 0); // do we know the initial sequence from sdp?
+ if (!strcmp(AVRtpPayloadTypes[payload_type].enc_name, "MP2T")) {
+ s->ts = mpegts_parse_open(s->ic);
+ if (s->ts == NULL) {
+ av_free(s);
+ return NULL;
+ }
+ } else {
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MPEG1VIDEO:
+ case CODEC_ID_MPEG2VIDEO:
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ case CODEC_ID_MPEG4:
+ case CODEC_ID_H264:
+ st->need_parsing = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ // needed to send back RTCP RR in RTSP sessions
+ s->rtp_ctx = rtpc;
+ gethostname(s->hostname, sizeof(s->hostname));
+ return s;
+}
+
+static int rtp_parse_mp4_au(RTPDemuxContext *s, const uint8_t *buf)
+{
+ int au_headers_length, au_header_size, i;
+ GetBitContext getbitcontext;
+ rtp_payload_data_t *infos;
+
+ infos = s->rtp_payload_data;
+
+ if (infos == NULL)
+ return -1;
+
+ /* decode the first 2 bytes where are stored the AUHeader sections
+ length in bits */
+ au_headers_length = BE_16(buf);
+
+ if (au_headers_length > RTP_MAX_PACKET_LENGTH)
+ return -1;
+
+ infos->au_headers_length_bytes = (au_headers_length + 7) / 8;
+
+ /* skip AU headers length section (2 bytes) */
+ buf += 2;
+
+ init_get_bits(&getbitcontext, buf, infos->au_headers_length_bytes * 8);
+
+ /* XXX: Wrong if optionnal additional sections are present (cts, dts etc...) */
+ au_header_size = infos->sizelength + infos->indexlength;
+ if (au_header_size <= 0 || (au_headers_length % au_header_size != 0))
+ return -1;
+
+ infos->nb_au_headers = au_headers_length / au_header_size;
+ infos->au_headers = av_malloc(sizeof(struct AUHeaders) * infos->nb_au_headers);
+
+ /* XXX: We handle multiple AU Section as only one (need to fix this for interleaving)
+ In my test, the faad decoder doesnt behave correctly when sending each AU one by one
+ but does when sending the whole as one big packet... */
+ infos->au_headers[0].size = 0;
+ infos->au_headers[0].index = 0;
+ for (i = 0; i < infos->nb_au_headers; ++i) {
+ infos->au_headers[0].size += get_bits_long(&getbitcontext, infos->sizelength);
+ infos->au_headers[0].index = get_bits_long(&getbitcontext, infos->indexlength);
+ }
+
+ infos->nb_au_headers = 1;
+
+ return 0;
+}
+
+/**
+ * This was the second switch in rtp_parse packet. Normalizes time, if required, sets stream_index, etc.
+ */
+static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp)
+{
+ switch(s->st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MPEG1VIDEO:
+ if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE) {
+ int64_t addend;
+
+ int delta_timestamp;
+ /* XXX: is it really necessary to unify the timestamp base ? */
+ /* compute pts from timestamp with received ntp_time */
+ delta_timestamp = timestamp - s->last_rtcp_timestamp;
+ /* convert to 90 kHz without overflow */
+ addend = (s->last_rtcp_ntp_time - s->first_rtcp_ntp_time) >> 14;
+ addend = (addend * 5625) >> 14;
+ pkt->pts = addend + delta_timestamp;
+ }
+ break;
+ case CODEC_ID_AAC:
+ case CODEC_ID_H264:
+ case CODEC_ID_MPEG4:
+ pkt->pts = timestamp;
+ break;
+ default:
+ /* no timestamp info yet */
+ break;
+ }
+ pkt->stream_index = s->st->index;
+}
+
+/**
+ * Parse an RTP or RTCP packet directly sent as a buffer.
+ * @param s RTP parse context.
+ * @param pkt returned packet
+ * @param buf input buffer or NULL to read the next packets
+ * @param len buffer len
+ * @return 0 if a packet is returned, 1 if a packet is returned and more can follow
+ * (use buf as NULL to read the next). -1 if no packet (error or no more packet).
+ */
+int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
+ const uint8_t *buf, int len)
+{
+ unsigned int ssrc, h;
+ int payload_type, seq, ret;
+ AVStream *st;
+ uint32_t timestamp;
+ int rv= 0;
+
+ if (!buf) {
+ /* return the next packets, if any */
+ if(s->st && s->parse_packet) {
+ timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned....
+ rv= s->parse_packet(s, pkt, &timestamp, NULL, 0);
+ finalize_packet(s, pkt, timestamp);
+ return rv;
+ } else {
+ // TODO: Move to a dynamic packet handler (like above)
+ if (s->read_buf_index >= s->read_buf_size)
+ return -1;
+ ret = mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
+ s->read_buf_size - s->read_buf_index);
+ if (ret < 0)
+ return -1;
+ s->read_buf_index += ret;
+ if (s->read_buf_index < s->read_buf_size)
+ return 1;
+ else
+ return 0;
+ }
+ }
+
+ if (len < 12)
+ return -1;
+
+ if ((buf[0] & 0xc0) != (RTP_VERSION << 6))
+ return -1;
+ if (buf[1] >= 200 && buf[1] <= 204) {
+ rtcp_parse_packet(s, buf, len);
+ return -1;
+ }
+ payload_type = buf[1] & 0x7f;
+ seq = (buf[2] << 8) | buf[3];
+ timestamp = decode_be32(buf + 4);
+ ssrc = decode_be32(buf + 8);
+ /* store the ssrc in the RTPDemuxContext */
+ s->ssrc = ssrc;
+
+ /* NOTE: we can handle only one payload type */
+ if (s->payload_type != payload_type)
+ return -1;
+
+ st = s->st;
+ // only do something with this if all the rtp checks pass...
+ if(!rtp_valid_packet_in_sequence(&s->statistics, seq))
+ {
+ av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n",
+ payload_type, seq, ((s->seq + 1) & 0xffff));
+ return -1;
+ }
+
+ s->seq = seq;
+ len -= 12;
+ buf += 12;
+
+ if (!st) {
+ /* specific MPEG2TS demux support */
+ ret = mpegts_parse_packet(s->ts, pkt, buf, len);
+ if (ret < 0)
+ return -1;
+ if (ret < len) {
+ s->read_buf_size = len - ret;
+ memcpy(s->buf, buf + ret, s->read_buf_size);
+ s->read_buf_index = 0;
+ return 1;
+ }
+ } else {
+ // at this point, the RTP header has been stripped; This is ASSUMING that there is only 1 CSRC, which in't wise.
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ /* better than nothing: skip mpeg audio RTP header */
+ if (len <= 4)
+ return -1;
+ h = decode_be32(buf);
+ len -= 4;
+ buf += 4;
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ break;
+ case CODEC_ID_MPEG1VIDEO:
+ /* better than nothing: skip mpeg video RTP header */
+ if (len <= 4)
+ return -1;
+ h = decode_be32(buf);
+ buf += 4;
+ len -= 4;
+ if (h & (1 << 26)) {
+ /* mpeg2 */
+ if (len <= 4)
+ return -1;
+ buf += 4;
+ len -= 4;
+ }
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ break;
+ // moved from below, verbatim. this is because this section handles packets, and the lower switch handles
+ // timestamps.
+ // TODO: Put this into a dynamic packet handler...
+ case CODEC_ID_AAC:
+ if (rtp_parse_mp4_au(s, buf))
+ return -1;
+ {
+ rtp_payload_data_t *infos = s->rtp_payload_data;
+ if (infos == NULL)
+ return -1;
+ buf += infos->au_headers_length_bytes + 2;
+ len -= infos->au_headers_length_bytes + 2;
+
+ /* XXX: Fixme we only handle the case where rtp_parse_mp4_au define
+ one au_header */
+ av_new_packet(pkt, infos->au_headers[0].size);
+ memcpy(pkt->data, buf, infos->au_headers[0].size);
+ buf += infos->au_headers[0].size;
+ len -= infos->au_headers[0].size;
+ }
+ s->read_buf_size = len;
+ s->buf_ptr = buf;
+ rv= 0;
+ break;
+ default:
+ if(s->parse_packet) {
+ rv= s->parse_packet(s, pkt, &timestamp, buf, len);
+ } else {
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ }
+ break;
+ }
+
+ // now perform timestamp things....
+ finalize_packet(s, pkt, timestamp);
+ }
+ return rv;
+}
+
+void rtp_parse_close(RTPDemuxContext *s)
+{
+ // TODO: fold this into the protocol specific data fields.
+ if (!strcmp(AVRtpPayloadTypes[s->payload_type].enc_name, "MP2T")) {
+ mpegts_parse_close(s->ts);
+ }
+ av_free(s);
+}
+
+/* rtp output */
+
+static int rtp_write_header(AVFormatContext *s1)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ int payload_type, max_packet_size, n;
+ AVStream *st;
+
+ if (s1->nb_streams != 1)
+ return -1;
+ st = s1->streams[0];
+
+ payload_type = rtp_get_payload_type(st->codec);
+ if (payload_type < 0)
+ payload_type = RTP_PT_PRIVATE; /* private payload type */
+ s->payload_type = payload_type;
+
+// following 2 FIXMies could be set based on the current time, theres normaly no info leak, as rtp will likely be transmitted immedeatly
+ s->base_timestamp = 0; /* FIXME: was random(), what should this be? */
+ s->timestamp = s->base_timestamp;
+ s->ssrc = 0; /* FIXME: was random(), what should this be? */
+ s->first_packet = 1;
+
+ max_packet_size = url_fget_max_packet_size(&s1->pb);
+ if (max_packet_size <= 12)
+ return AVERROR_IO;
+ s->max_payload_size = max_packet_size - 12;
+
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ s->buf_ptr = s->buf + 4;
+ s->cur_timestamp = 0;
+ break;
+ case CODEC_ID_MPEG1VIDEO:
+ s->cur_timestamp = 0;
+ break;
+ case CODEC_ID_MPEG2TS:
+ n = s->max_payload_size / TS_PACKET_SIZE;
+ if (n < 1)
+ n = 1;
+ s->max_payload_size = n * TS_PACKET_SIZE;
+ s->buf_ptr = s->buf;
+ break;
+ default:
+ s->buf_ptr = s->buf;
+ break;
+ }
+
+ return 0;
+}
+
+/* send an rtcp sender report packet */
+static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time)
+{
+ RTPDemuxContext *s = s1->priv_data;
+#if defined(DEBUG)
+ printf("RTCP: %02x %"PRIx64" %x\n", s->payload_type, ntp_time, s->timestamp);
+#endif
+ put_byte(&s1->pb, (RTP_VERSION << 6));
+ put_byte(&s1->pb, 200);
+ put_be16(&s1->pb, 6); /* length in words - 1 */
+ put_be32(&s1->pb, s->ssrc);
+ put_be64(&s1->pb, ntp_time);
+ put_be32(&s1->pb, s->timestamp);
+ put_be32(&s1->pb, s->packet_count);
+ put_be32(&s1->pb, s->octet_count);
+ put_flush_packet(&s1->pb);
+}
+
+/* send an rtp packet. sequence number is incremented, but the caller
+ must update the timestamp itself */
+static void rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m)
+{
+ RTPDemuxContext *s = s1->priv_data;
+
+#ifdef DEBUG
+ printf("rtp_send_data size=%d\n", len);
+#endif
+
+ /* build the RTP header */
+ put_byte(&s1->pb, (RTP_VERSION << 6));
+ put_byte(&s1->pb, (s->payload_type & 0x7f) | ((m & 0x01) << 7));
+ put_be16(&s1->pb, s->seq);
+ put_be32(&s1->pb, s->timestamp);
+ put_be32(&s1->pb, s->ssrc);
+
+ put_buffer(&s1->pb, buf1, len);
+ put_flush_packet(&s1->pb);
+
+ s->seq++;
+ s->octet_count += len;
+ s->packet_count++;
+}
+
+/* send an integer number of samples and compute time stamp and fill
+ the rtp send buffer before sending. */
+static void rtp_send_samples(AVFormatContext *s1,
+ const uint8_t *buf1, int size, int sample_size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ int len, max_packet_size, n;
+
+ max_packet_size = (s->max_payload_size / sample_size) * sample_size;
+ /* not needed, but who nows */
+ if ((size % sample_size) != 0)
+ av_abort();
+ while (size > 0) {
+ len = (max_packet_size - (s->buf_ptr - s->buf));
+ if (len > size)
+ len = size;
+
+ /* copy data */
+ memcpy(s->buf_ptr, buf1, len);
+ s->buf_ptr += len;
+ buf1 += len;
+ size -= len;
+ n = (s->buf_ptr - s->buf);
+ /* if buffer full, then send it */
+ if (n >= max_packet_size) {
+ rtp_send_data(s1, s->buf, n, 0);
+ s->buf_ptr = s->buf;
+ /* update timestamp */
+ s->timestamp += n / sample_size;
+ }
+ }
+}
+
+/* NOTE: we suppose that exactly one frame is given as argument here */
+/* XXX: test it */
+static void rtp_send_mpegaudio(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int len, count, max_packet_size;
+
+ max_packet_size = s->max_payload_size;
+
+ /* test if we must flush because not enough space */
+ len = (s->buf_ptr - s->buf);
+ if ((len + size) > max_packet_size) {
+ if (len > 4) {
+ rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
+ s->buf_ptr = s->buf + 4;
+ /* 90 KHz time stamp */
+ s->timestamp = s->base_timestamp +
+ (s->cur_timestamp * 90000LL) / st->codec->sample_rate;
+ }
+ }
+
+ /* add the packet */
+ if (size > max_packet_size) {
+ /* big packet: fragment */
+ count = 0;
+ while (size > 0) {
+ len = max_packet_size - 4;
+ if (len > size)
+ len = size;
+ /* build fragmented packet */
+ s->buf[0] = 0;
+ s->buf[1] = 0;
+ s->buf[2] = count >> 8;
+ s->buf[3] = count;
+ memcpy(s->buf + 4, buf1, len);
+ rtp_send_data(s1, s->buf, len + 4, 0);
+ size -= len;
+ buf1 += len;
+ count += len;
+ }
+ } else {
+ if (s->buf_ptr == s->buf + 4) {
+ /* no fragmentation possible */
+ s->buf[0] = 0;
+ s->buf[1] = 0;
+ s->buf[2] = 0;
+ s->buf[3] = 0;
+ }
+ memcpy(s->buf_ptr, buf1, size);
+ s->buf_ptr += size;
+ }
+ s->cur_timestamp += st->codec->frame_size;
+}
+
+/* NOTE: a single frame must be passed with sequence header if
+ needed. XXX: use slices. */
+static void rtp_send_mpegvideo(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int len, h, max_packet_size;
+ uint8_t *q;
+
+ max_packet_size = s->max_payload_size;
+
+ while (size > 0) {
+ /* XXX: more correct headers */
+ h = 0;
+ if (st->codec->sub_id == 2)
+ h |= 1 << 26; /* mpeg 2 indicator */
+ q = s->buf;
+ *q++ = h >> 24;
+ *q++ = h >> 16;
+ *q++ = h >> 8;
+ *q++ = h;
+
+ if (st->codec->sub_id == 2) {
+ h = 0;
+ *q++ = h >> 24;
+ *q++ = h >> 16;
+ *q++ = h >> 8;
+ *q++ = h;
+ }
+
+ len = max_packet_size - (q - s->buf);
+ if (len > size)
+ len = size;
+
+ memcpy(q, buf1, len);
+ q += len;
+
+ /* 90 KHz time stamp */
+ s->timestamp = s->base_timestamp +
+ av_rescale((int64_t)s->cur_timestamp * st->codec->time_base.num, 90000, st->codec->time_base.den); //FIXME pass timestamps
+ rtp_send_data(s1, s->buf, q - s->buf, (len == size));
+
+ buf1 += len;
+ size -= len;
+ }
+ s->cur_timestamp++;
+}
+
+static void rtp_send_raw(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int len, max_packet_size;
+
+ max_packet_size = s->max_payload_size;
+
+ while (size > 0) {
+ len = max_packet_size;
+ if (len > size)
+ len = size;
+
+ /* 90 KHz time stamp */
+ s->timestamp = s->base_timestamp +
+ av_rescale((int64_t)s->cur_timestamp * st->codec->time_base.num, 90000, st->codec->time_base.den); //FIXME pass timestamps
+ rtp_send_data(s1, buf1, len, (len == size));
+
+ buf1 += len;
+ size -= len;
+ }
+ s->cur_timestamp++;
+}
+
+/* NOTE: size is assumed to be an integer multiple of TS_PACKET_SIZE */
+static void rtp_send_mpegts_raw(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ int len, out_len;
+
+ while (size >= TS_PACKET_SIZE) {
+ len = s->max_payload_size - (s->buf_ptr - s->buf);
+ if (len > size)
+ len = size;
+ memcpy(s->buf_ptr, buf1, len);
+ buf1 += len;
+ size -= len;
+ s->buf_ptr += len;
+
+ out_len = s->buf_ptr - s->buf;
+ if (out_len >= s->max_payload_size) {
+ rtp_send_data(s1, s->buf, out_len, 0);
+ s->buf_ptr = s->buf;
+ }
+ }
+}
+
+/* write an RTP packet. 'buf1' must contain a single specific frame. */
+static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int rtcp_bytes;
+ int64_t ntp_time;
+ int size= pkt->size;
+ uint8_t *buf1= pkt->data;
+
+#ifdef DEBUG
+ printf("%d: write len=%d\n", pkt->stream_index, size);
+#endif
+
+ /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
+ rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
+ RTCP_TX_RATIO_DEN;
+ if (s->first_packet || rtcp_bytes >= 28) {
+ /* compute NTP time */
+ /* XXX: 90 kHz timestamp hardcoded */
+ ntp_time = (pkt->pts << 28) / 5625;
+ rtcp_send_sr(s1, ntp_time);
+ s->last_octet_count = s->octet_count;
+ s->first_packet = 0;
+ }
+
+ switch(st->codec->codec_id) {
+ case CODEC_ID_PCM_MULAW:
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_U8:
+ case CODEC_ID_PCM_S8:
+ rtp_send_samples(s1, buf1, size, 1 * st->codec->channels);
+ break;
+ case CODEC_ID_PCM_U16BE:
+ case CODEC_ID_PCM_U16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_S16LE:
+ rtp_send_samples(s1, buf1, size, 2 * st->codec->channels);
+ break;
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ rtp_send_mpegaudio(s1, buf1, size);
+ break;
+ case CODEC_ID_MPEG1VIDEO:
+ rtp_send_mpegvideo(s1, buf1, size);
+ break;
+ case CODEC_ID_MPEG2TS:
+ rtp_send_mpegts_raw(s1, buf1, size);
+ break;
+ default:
+ /* better than nothing : send the codec raw data */
+ rtp_send_raw(s1, buf1, size);
+ break;
+ }
+ return 0;
+}
+
+static int rtp_write_trailer(AVFormatContext *s1)
+{
+ // RTPDemuxContext *s = s1->priv_data;
+ return 0;
+}
+
+AVOutputFormat rtp_muxer = {
+ "rtp",
+ "RTP output format",
+ NULL,
+ NULL,
+ sizeof(RTPDemuxContext),
+ CODEC_ID_PCM_MULAW,
+ CODEC_ID_NONE,
+ rtp_write_header,
+ rtp_write_packet,
+ rtp_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/rtp.h b/contrib/ffmpeg/libavformat/rtp.h
new file mode 100644
index 000000000..60ccc50ee
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp.h
@@ -0,0 +1,118 @@
+/*
+ * RTP definitions
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef RTP_H
+#define RTP_H
+
+#define RTP_MIN_PACKET_LENGTH 12
+#define RTP_MAX_PACKET_LENGTH 1500 /* XXX: suppress this define */
+
+int rtp_init(void);
+int rtp_get_codec_info(AVCodecContext *codec, int payload_type);
+int rtp_get_payload_type(AVCodecContext *codec);
+
+typedef struct RTPDemuxContext RTPDemuxContext;
+typedef struct rtp_payload_data_s rtp_payload_data_s;
+RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, rtp_payload_data_s *rtp_payload_data);
+int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
+ const uint8_t *buf, int len);
+void rtp_parse_close(RTPDemuxContext *s);
+
+extern AVOutputFormat rtp_muxer;
+extern AVInputFormat rtp_demuxer;
+
+int rtp_get_local_port(URLContext *h);
+int rtp_set_remote_url(URLContext *h, const char *uri);
+void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd);
+
+extern URLProtocol rtp_protocol;
+
+#define RTP_PT_PRIVATE 96
+#define RTP_VERSION 2
+#define RTP_MAX_SDES 256 /* maximum text length for SDES */
+
+/* RTCP paquets use 0.5 % of the bandwidth */
+#define RTCP_TX_RATIO_NUM 5
+#define RTCP_TX_RATIO_DEN 1000
+
+/* Structure listing usefull vars to parse RTP packet payload*/
+typedef struct rtp_payload_data_s
+{
+ int sizelength;
+ int indexlength;
+ int indexdeltalength;
+ int profile_level_id;
+ int streamtype;
+ int objecttype;
+ char *mode;
+
+ /* mpeg 4 AU headers */
+ struct AUHeaders {
+ int size;
+ int index;
+ int cts_flag;
+ int cts;
+ int dts_flag;
+ int dts;
+ int rap_flag;
+ int streamstate;
+ } *au_headers;
+ int nb_au_headers;
+ int au_headers_length_bytes;
+ int cur_au_index;
+} rtp_payload_data_t;
+
+typedef struct AVRtpPayloadType_s
+{
+ int pt;
+ const char enc_name[50]; /* XXX: why 50 ? */
+ enum CodecType codec_type;
+ enum CodecID codec_id;
+ int clock_rate;
+ int audio_channels;
+} AVRtpPayloadType_t;
+
+#if 0
+typedef enum {
+ RTCP_SR = 200,
+ RTCP_RR = 201,
+ RTCP_SDES = 202,
+ RTCP_BYE = 203,
+ RTCP_APP = 204
+} rtcp_type_t;
+
+typedef enum {
+ RTCP_SDES_END = 0,
+ RTCP_SDES_CNAME = 1,
+ RTCP_SDES_NAME = 2,
+ RTCP_SDES_EMAIL = 3,
+ RTCP_SDES_PHONE = 4,
+ RTCP_SDES_LOC = 5,
+ RTCP_SDES_TOOL = 6,
+ RTCP_SDES_NOTE = 7,
+ RTCP_SDES_PRIV = 8,
+ RTCP_SDES_IMG = 9,
+ RTCP_SDES_DOOR = 10,
+ RTCP_SDES_SOURCE = 11
+} rtcp_sdes_type_t;
+#endif
+
+extern AVRtpPayloadType_t AVRtpPayloadTypes[];
+#endif /* RTP_H */
diff --git a/contrib/ffmpeg/libavformat/rtp_h264.c b/contrib/ffmpeg/libavformat/rtp_h264.c
new file mode 100644
index 000000000..2568e9ea5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp_h264.c
@@ -0,0 +1,419 @@
+/*
+ * RTP H264 Protocol (RFC3984)
+ * Copyright (c) 2006 Ryan Martell.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file rtp_h264.c
+ * @brief H.264 / RTP Code (RFC3984)
+ * @author Ryan Martell <rdm4@martellventures.com>
+ *
+ * @note Notes:
+ * Notes:
+ * This currently supports packetization mode:
+ * Single Nal Unit Mode (0), or
+ * Non-Interleaved Mode (1). It currently does not support
+ * Interleaved Mode (2). (This requires implementing STAP-B, MTAP16, MTAP24, FU-B packet types)
+ *
+ * @note TODO:
+ * 1) RTCP sender reports for udp streams are required..
+ *
+ */
+
+#include "avformat.h"
+#include "mpegts.h"
+#include "bitstream.h"
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <assert.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#include "rtp_internal.h"
+#include "rtp_h264.h"
+#include "base64.h"
+
+/**
+ RTP/H264 specific private data.
+*/
+typedef struct h264_rtp_extra_data {
+ unsigned long cookie; ///< sanity check, to make sure we get the pointer we're expecting.
+
+ //sdp setup parameters
+ uint8_t profile_idc; ///< from the sdp setup parameters.
+ uint8_t profile_iop; ///< from the sdp setup parameters.
+ uint8_t level_idc; ///< from the sdp setup parameters.
+ int packetization_mode; ///< from the sdp setup parameters.
+#ifdef DEBUG
+ int packet_types_received[32];
+#endif
+} h264_rtp_extra_data;
+
+#define MAGIC_COOKIE (0xdeadbeef) ///< Cookie for the extradata; to verify we are what we think we are, and that we haven't been freed.
+#define DEAD_COOKIE (0xdeaddead) ///< Cookie for the extradata; once it is freed.
+
+/* ---------------- private code */
+static void sdp_parse_fmtp_config_h264(AVStream * stream,
+ h264_rtp_extra_data * h264_data,
+ char *attr, char *value)
+{
+ AVCodecContext *codec = stream->codec;
+ assert(codec->codec_id == CODEC_ID_H264);
+ assert(h264_data != NULL);
+
+ if (!strcmp(attr, "packetization-mode")) {
+ av_log(NULL, AV_LOG_DEBUG, "H.264/RTP Packetization Mode: %d\n", atoi(attr));
+ h264_data->packetization_mode = atoi(attr);
+ /*
+ Packetization Mode:
+ 0 or not present: Single NAL mode (Only nals from 1-23 are allowed)
+ 1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed.
+ 2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.
+ */
+ if (h264_data->packetization_mode > 1)
+ av_log(stream, AV_LOG_ERROR,
+ "H.264/RTP Interleaved RTP mode is not supported yet.");
+ } else if (!strcmp(attr, "profile-level-id")) {
+ if (strlen(value) == 6) {
+ char buffer[3];
+ // 6 characters=3 bytes, in hex.
+ uint8_t profile_idc;
+ uint8_t profile_iop;
+ uint8_t level_idc;
+
+ buffer[0] = value[0]; buffer[1] = value[1]; buffer[2] = '\0';
+ profile_idc = strtol(buffer, NULL, 16);
+ buffer[0] = value[2]; buffer[1] = value[3];
+ profile_iop = strtol(buffer, NULL, 16);
+ buffer[0] = value[4]; buffer[1] = value[5];
+ level_idc = strtol(buffer, NULL, 16);
+
+ // set the parameters...
+ av_log(NULL, AV_LOG_DEBUG,
+ "H.264/RTP Profile IDC: %x Profile IOP: %x Level: %x\n",
+ profile_idc, profile_iop, level_idc);
+ h264_data->profile_idc = profile_idc;
+ h264_data->profile_iop = profile_iop;
+ h264_data->level_idc = level_idc;
+ }
+ } else if (!strcmp(attr, "sprop-parameter-sets")) {
+ uint8_t start_sequence[]= { 0, 0, 1 };
+ codec->extradata_size= 0;
+ codec->extradata= NULL;
+
+ while (*value) {
+ char base64packet[1024];
+ uint8_t decoded_packet[1024];
+ uint32_t packet_size;
+ char *dst = base64packet;
+
+ while (*value && *value != ','
+ && (dst - base64packet) < sizeof(base64packet) - 1) {
+ *dst++ = *value++;
+ }
+ *dst++ = '\0';
+
+ if (*value == ',')
+ value++;
+
+ packet_size= av_base64_decode(decoded_packet, base64packet, sizeof(decoded_packet));
+ if (packet_size) {
+ uint8_t *dest= av_malloc(packet_size+sizeof(start_sequence)+codec->extradata_size);
+ if(dest)
+ {
+ if(codec->extradata_size)
+ {
+ // av_realloc?
+ memcpy(dest, codec->extradata, codec->extradata_size);
+ av_free(codec->extradata);
+ }
+
+ memcpy(dest+codec->extradata_size, start_sequence, sizeof(start_sequence));
+ memcpy(dest+codec->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
+
+ codec->extradata= dest;
+ codec->extradata_size+= sizeof(start_sequence)+packet_size;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "H.264/RTP Unable to allocate memory for extradata!");
+ }
+ }
+ }
+ av_log(NULL, AV_LOG_DEBUG, "H.264/RTP Extradata set to %p (size: %d)!", codec->extradata, codec->extradata_size);
+ }
+}
+
+// return 0 on packet, no more left, 1 on packet, 1 on partial packet...
+static int h264_handle_packet(RTPDemuxContext * s,
+ AVPacket * pkt,
+ uint32_t * timestamp,
+ const uint8_t * buf,
+ int len)
+{
+// h264_rtp_extra_data *data = s->dynamic_protocol_context;
+ uint8_t nal = buf[0];
+ uint8_t type = (nal & 0x1f);
+ int result= 0;
+ uint8_t start_sequence[]= {0, 0, 1};
+
+ assert(data);
+ assert(data->cookie == MAGIC_COOKIE);
+ assert(buf);
+
+ if (type >= 1 && type <= 23)
+ type = 1; // simplify the case. (these are all the nal types used internally by the h264 codec)
+ switch (type) {
+ case 0: // undefined;
+ result= -1;
+ break;
+
+ case 1:
+ av_new_packet(pkt, len+sizeof(start_sequence));
+ memcpy(pkt->data, start_sequence, sizeof(start_sequence));
+ memcpy(pkt->data+sizeof(start_sequence), buf, len);
+#ifdef DEBUG
+ data->packet_types_received[nal & 0x1f]++;
+#endif
+ break;
+
+ case 24: // STAP-A (one packet, multiple nals)
+ // consume the STAP-A NAL
+ buf++;
+ len--;
+ // first we are going to figure out the total size....
+ {
+ int pass= 0;
+ int total_length= 0;
+ uint8_t *dst= NULL;
+
+ for(pass= 0; pass<2; pass++) {
+ const uint8_t *src= buf;
+ int src_len= len;
+
+ do {
+ uint16_t nal_size = BE_16(src); // this going to be a problem if unaligned (can it be?)
+
+ // consume the length of the aggregate...
+ src += 2;
+ src_len -= 2;
+
+ if (nal_size <= src_len) {
+ if(pass==0) {
+ // counting...
+ total_length+= sizeof(start_sequence)+nal_size;
+ } else {
+ // copying
+ assert(dst);
+ memcpy(dst, start_sequence, sizeof(start_sequence));
+ dst+= sizeof(start_sequence);
+ memcpy(dst, src, nal_size);
+#ifdef DEBUG
+ data->packet_types_received[*src & 0x1f]++;
+#endif
+ dst+= nal_size;
+ }
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "nal size exceeds length: %d %d\n", nal_size, src_len);
+ }
+
+ // eat what we handled...
+ src += nal_size;
+ src_len -= nal_size;
+
+ if (src_len < 0)
+ av_log(NULL, AV_LOG_ERROR,
+ "Consumed more bytes than we got! (%d)\n", src_len);
+ } while (src_len > 2); // because there could be rtp padding..
+
+ if(pass==0) {
+ // now we know the total size of the packet (with the start sequences added)
+ av_new_packet(pkt, total_length);
+ dst= pkt->data;
+ } else {
+ assert(dst-pkt->data==total_length);
+ }
+ }
+ }
+ break;
+
+ case 25: // STAP-B
+ case 26: // MTAP-16
+ case 27: // MTAP-24
+ case 29: // FU-B
+ av_log(NULL, AV_LOG_ERROR,
+ "Unhandled type (%d) (See RFC for implementation details\n",
+ type);
+ result= -1;
+ break;
+
+ case 28: // FU-A (fragmented nal)
+ buf++;
+ len--; // skip the fu_indicator
+ {
+ // these are the same as above, we just redo them here for clarity...
+ uint8_t fu_indicator = nal;
+ uint8_t fu_header = *buf; // read the fu_header.
+ uint8_t start_bit = (fu_header & 0x80) >> 7;
+// uint8_t end_bit = (fu_header & 0x40) >> 6;
+ uint8_t nal_type = (fu_header & 0x1f);
+ uint8_t reconstructed_nal;
+
+ // reconstruct this packet's true nal; only the data follows..
+ reconstructed_nal = fu_indicator & (0xe0); // the original nal forbidden bit and NRI are stored in this packet's nal;
+ reconstructed_nal |= (nal_type & 0x1f);
+
+ // skip the fu_header...
+ buf++;
+ len--;
+
+#ifdef DEBUG
+ if (start_bit)
+ data->packet_types_received[nal_type & 0x1f]++;
+#endif
+ if(start_bit) {
+ // copy in the start sequence, and the reconstructed nal....
+ av_new_packet(pkt, sizeof(start_sequence)+sizeof(nal)+len);
+ memcpy(pkt->data, start_sequence, sizeof(start_sequence));
+ pkt->data[sizeof(start_sequence)]= reconstructed_nal;
+ memcpy(pkt->data+sizeof(start_sequence)+sizeof(nal), buf, len);
+ } else {
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ }
+ }
+ break;
+
+ case 30: // undefined
+ case 31: // undefined
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Undefined type (%d)", type);
+ result= -1;
+ break;
+ }
+
+ return result;
+}
+
+/* ---------------- public code */
+static void *h264_new_extradata()
+{
+ h264_rtp_extra_data *data =
+ av_mallocz(sizeof(h264_rtp_extra_data) +
+ FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (data) {
+ data->cookie = MAGIC_COOKIE;
+ }
+
+ return data;
+}
+
+static void h264_free_extradata(void *d)
+{
+ h264_rtp_extra_data *data = (h264_rtp_extra_data *) d;
+#ifdef DEBUG
+ int ii;
+
+ for (ii = 0; ii < 32; ii++) {
+ if (data->packet_types_received[ii])
+ av_log(NULL, AV_LOG_DEBUG, "Received %d packets of type %d\n",
+ data->packet_types_received[ii], ii);
+ }
+#endif
+
+ assert(data);
+ assert(data->cookie == MAGIC_COOKIE);
+
+ // avoid stale pointers (assert)
+ data->cookie = DEAD_COOKIE;
+
+ // and clear out this...
+ av_free(data);
+}
+
+static int parse_h264_sdp_line(AVStream * stream, void *data,
+ const char *line)
+{
+ AVCodecContext *codec = stream->codec;
+ h264_rtp_extra_data *h264_data = (h264_rtp_extra_data *) data;
+ const char *p = line;
+
+ assert(h264_data->cookie == MAGIC_COOKIE);
+
+ if (strstart(p, "framesize:", &p)) {
+ char buf1[50];
+ char *dst = buf1;
+
+ // remove the protocol identifier..
+ while (*p && *p == ' ') p++; // strip spaces.
+ while (*p && *p != ' ') p++; // eat protocol identifier
+ while (*p && *p == ' ') p++; // strip trailing spaces.
+ while (*p && *p != '-' && (buf1 - dst) < sizeof(buf1) - 1) {
+ *dst++ = *p++;
+ }
+ *dst = '\0';
+
+ // a='framesize:96 320-240'
+ // set our parameters..
+ codec->width = atoi(buf1);
+ codec->height = atoi(p + 1); // skip the -
+ codec->pix_fmt = PIX_FMT_YUV420P;
+ } else if (strstart(p, "fmtp:", &p)) {
+ char attr[256];
+ char value[4096];
+
+ // remove the protocol identifier..
+ while (*p && *p == ' ') p++; // strip spaces.
+ while (*p && *p != ' ') p++; // eat protocol identifier
+ while (*p && *p == ' ') p++; // strip trailing spaces.
+
+ /* loop on each attribute */
+ while (rtsp_next_attr_and_value
+ (&p, attr, sizeof(attr), value, sizeof(value))) {
+ /* grab the codec extra_data from the config parameter of the fmtp line */
+ sdp_parse_fmtp_config_h264(stream, h264_data, attr, value);
+ }
+ } else if (strstart(p, "cliprect:", &p)) {
+ // could use this if we wanted.
+ }
+
+ av_set_pts_info(stream, 33, 1, 90000); // 33 should be right, because the pts is 64 bit? (done elsewhere; this is a one time thing)
+
+ return 0; // keep processing it the normal way...
+}
+
+/**
+This is the structure for expanding on the dynamic rtp protocols (makes everything static. yay!)
+*/
+RTPDynamicProtocolHandler ff_h264_dynamic_handler = {
+ "H264",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_H264,
+ parse_h264_sdp_line,
+ h264_new_extradata,
+ h264_free_extradata,
+ h264_handle_packet
+};
diff --git a/contrib/ffmpeg/libavformat/rtp_h264.h b/contrib/ffmpeg/libavformat/rtp_h264.h
new file mode 100644
index 000000000..19508574d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp_h264.h
@@ -0,0 +1,26 @@
+/*
+ * RTP H264 Protocol (RFC3984)
+ * Copyright (c) 2006 Ryan Martell.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef RTP_H264_H
+#define RTP_H264_H
+
+extern RTPDynamicProtocolHandler ff_h264_dynamic_handler;
+#endif /* RTP_H264_H */
diff --git a/contrib/ffmpeg/libavformat/rtp_internal.h b/contrib/ffmpeg/libavformat/rtp_internal.h
new file mode 100644
index 000000000..3edcf49c8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp_internal.h
@@ -0,0 +1,110 @@
+/*
+ * RTP definitions
+ * Copyright (c) 2006 Ryan Martell <rdm4@martellventures.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+// this is a bit of a misnomer, because rtp & rtsp internal structures and prototypes are in here.
+#ifndef RTP_INTERNAL_H
+#define RTP_INTERNAL_H
+
+// these statistics are used for rtcp receiver reports...
+typedef struct {
+ uint16_t max_seq; ///< highest sequence number seen
+ uint32_t cycles; ///< shifted count of sequence number cycles
+ uint32_t base_seq; ///< base sequence number
+ uint32_t bad_seq; ///< last bad sequence number + 1
+ int probation; ///< sequence packets till source is valid
+ int received; ///< packets received
+ int expected_prior; ///< packets expected in last interval
+ int received_prior; ///< packets received in last interval
+ uint32_t transit; ///< relative transit time for previous packet
+ uint32_t jitter; ///< estimated jitter.
+} RTPStatistics;
+
+
+typedef int (*DynamicPayloadPacketHandlerProc) (struct RTPDemuxContext * s,
+ AVPacket * pkt,
+ uint32_t *timestamp,
+ const uint8_t * buf,
+ int len);
+
+typedef struct RTPDynamicProtocolHandler_s {
+ // fields from AVRtpDynamicPayloadType_s
+ const char enc_name[50]; /* XXX: still why 50 ? ;-) */
+ enum CodecType codec_type;
+ enum CodecID codec_id;
+
+ // may be null
+ int (*parse_sdp_a_line) (AVStream * stream,
+ void *protocol_data,
+ const char *line); ///< Parse the a= line from the sdp field
+ void *(*open) (); ///< allocate any data needed by the rtp parsing for this dynamic data.
+ void (*close)(void *protocol_data); ///< free any data needed by the rtp parsing for this dynamic data.
+ DynamicPayloadPacketHandlerProc parse_packet; ///< parse handler for this dynamic packet.
+
+ struct RTPDynamicProtocolHandler_s *next;
+} RTPDynamicProtocolHandler;
+
+// moved out of rtp.c, because the h264 decoder needs to know about this structure..
+struct RTPDemuxContext {
+ AVFormatContext *ic;
+ AVStream *st;
+ int payload_type;
+ uint32_t ssrc;
+ uint16_t seq;
+ uint32_t timestamp;
+ uint32_t base_timestamp;
+ uint32_t cur_timestamp;
+ int max_payload_size;
+ struct MpegTSContext *ts; /* only used for MP2T payloads */
+ int read_buf_index;
+ int read_buf_size;
+ /* used to send back RTCP RR */
+ URLContext *rtp_ctx;
+ char hostname[256];
+
+ RTPStatistics statistics; ///< Statistics for this stream (used by RTCP receiver reports)
+
+ /* rtcp sender statistics receive */
+ int64_t last_rtcp_ntp_time; // TODO: move into statistics
+ int64_t first_rtcp_ntp_time; // TODO: move into statistics
+ uint32_t last_rtcp_timestamp; // TODO: move into statistics
+
+ /* rtcp sender statistics */
+ unsigned int packet_count; // TODO: move into statistics (outgoing)
+ unsigned int octet_count; // TODO: move into statistics (outgoing)
+ unsigned int last_octet_count; // TODO: move into statistics (outgoing)
+ int first_packet;
+ /* buffer for output */
+ uint8_t buf[RTP_MAX_PACKET_LENGTH];
+ uint8_t *buf_ptr;
+
+ /* special infos for au headers parsing */
+ rtp_payload_data_t *rtp_payload_data; // TODO: Move into dynamic payload handlers
+
+ /* dynamic payload stuff */
+ DynamicPayloadPacketHandlerProc parse_packet; ///< This is also copied from the dynamic protocol handler structure
+ void *dynamic_protocol_context; ///< This is a copy from the values setup from the sdp parsing, in rtsp.c don't free me.
+};
+
+extern RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler;
+
+int rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size); ///< from rtsp.c, but used by rtp dynamic protocol handlers.
+#endif /* RTP_INTERNAL_H */
+
diff --git a/contrib/ffmpeg/libavformat/rtpproto.c b/contrib/ffmpeg/libavformat/rtpproto.c
new file mode 100644
index 000000000..d31c509c2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtpproto.c
@@ -0,0 +1,303 @@
+/*
+ * RTP network protocol
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#include <unistd.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+#include <fcntl.h>
+
+#define RTP_TX_BUF_SIZE (64 * 1024)
+#define RTP_RX_BUF_SIZE (128 * 1024)
+
+typedef struct RTPContext {
+ URLContext *rtp_hd, *rtcp_hd;
+ int rtp_fd, rtcp_fd;
+} RTPContext;
+
+/**
+ * If no filename is given to av_open_input_file because you want to
+ * get the local port first, then you must call this function to set
+ * the remote server address.
+ *
+ * @param s1 media file context
+ * @param uri of the remote server
+ * @return zero if no error.
+ */
+int rtp_set_remote_url(URLContext *h, const char *uri)
+{
+ RTPContext *s = h->priv_data;
+ char hostname[256];
+ int port;
+
+ char buf[1024];
+ char path[1024];
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
+ path, sizeof(path), uri);
+
+ snprintf(buf, sizeof(buf), "udp://%s:%d%s", hostname, port, path);
+ udp_set_remote_url(s->rtp_hd, buf);
+
+ snprintf(buf, sizeof(buf), "udp://%s:%d%s", hostname, port + 1, path);
+ udp_set_remote_url(s->rtcp_hd, buf);
+ return 0;
+}
+
+
+/* add option to url of the form:
+ "http://host:port/path?option1=val1&option2=val2... */
+static void url_add_option(char *buf, int buf_size, const char *fmt, ...)
+{
+ char buf1[1024];
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (strchr(buf, '?'))
+ pstrcat(buf, buf_size, "&");
+ else
+ pstrcat(buf, buf_size, "?");
+ vsnprintf(buf1, sizeof(buf1), fmt, ap);
+ pstrcat(buf, buf_size, buf1);
+ va_end(ap);
+}
+
+static void build_udp_url(char *buf, int buf_size,
+ const char *hostname, int port,
+ int local_port, int multicast, int ttl)
+{
+ snprintf(buf, buf_size, "udp://%s:%d", hostname, port);
+ if (local_port >= 0)
+ url_add_option(buf, buf_size, "localport=%d", local_port);
+ if (multicast)
+ url_add_option(buf, buf_size, "multicast=1", multicast);
+ if (ttl >= 0)
+ url_add_option(buf, buf_size, "ttl=%d", ttl);
+}
+
+/*
+ * url syntax: rtp://host:port[?option=val...]
+ * option: 'multicast=1' : enable multicast
+ * 'ttl=n' : set the ttl value (for multicast only)
+ * 'localport=n' : set the local port to n
+ *
+ */
+static int rtp_open(URLContext *h, const char *uri, int flags)
+{
+ RTPContext *s;
+ int port, is_output, is_multicast, ttl, local_port;
+ char hostname[256];
+ char buf[1024];
+ char path[1024];
+ const char *p;
+
+ is_output = (flags & URL_WRONLY);
+
+ s = av_mallocz(sizeof(RTPContext));
+ if (!s)
+ return -ENOMEM;
+ h->priv_data = s;
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
+ path, sizeof(path), uri);
+ /* extract parameters */
+ is_multicast = 0;
+ ttl = -1;
+ local_port = -1;
+ p = strchr(uri, '?');
+ if (p) {
+ is_multicast = find_info_tag(buf, sizeof(buf), "multicast", p);
+ if (find_info_tag(buf, sizeof(buf), "ttl", p)) {
+ ttl = strtol(buf, NULL, 10);
+ }
+ if (find_info_tag(buf, sizeof(buf), "localport", p)) {
+ local_port = strtol(buf, NULL, 10);
+ }
+ }
+
+ build_udp_url(buf, sizeof(buf),
+ hostname, port, local_port, is_multicast, ttl);
+ if (url_open(&s->rtp_hd, buf, flags) < 0)
+ goto fail;
+ local_port = udp_get_local_port(s->rtp_hd);
+ /* XXX: need to open another connexion if the port is not even */
+
+ /* well, should suppress localport in path */
+
+ build_udp_url(buf, sizeof(buf),
+ hostname, port + 1, local_port + 1, is_multicast, ttl);
+ if (url_open(&s->rtcp_hd, buf, flags) < 0)
+ goto fail;
+
+ /* just to ease handle access. XXX: need to suppress direct handle
+ access */
+ s->rtp_fd = udp_get_file_handle(s->rtp_hd);
+ s->rtcp_fd = udp_get_file_handle(s->rtcp_hd);
+
+ h->max_packet_size = url_get_max_packet_size(s->rtp_hd);
+ h->is_streamed = 1;
+ return 0;
+
+ fail:
+ if (s->rtp_hd)
+ url_close(s->rtp_hd);
+ if (s->rtcp_hd)
+ url_close(s->rtcp_hd);
+ av_free(s);
+ return AVERROR_IO;
+}
+
+static int rtp_read(URLContext *h, uint8_t *buf, int size)
+{
+ RTPContext *s = h->priv_data;
+ struct sockaddr_in from;
+ socklen_t from_len;
+ int len, fd_max, n;
+ fd_set rfds;
+#if 0
+ for(;;) {
+ from_len = sizeof(from);
+ len = recvfrom (s->rtp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ return AVERROR_IO;
+ }
+ break;
+ }
+#else
+ for(;;) {
+ /* build fdset to listen to RTP and RTCP packets */
+ FD_ZERO(&rfds);
+ fd_max = s->rtp_fd;
+ FD_SET(s->rtp_fd, &rfds);
+ if (s->rtcp_fd > fd_max)
+ fd_max = s->rtcp_fd;
+ FD_SET(s->rtcp_fd, &rfds);
+ n = select(fd_max + 1, &rfds, NULL, NULL, NULL);
+ if (n > 0) {
+ /* first try RTCP */
+ if (FD_ISSET(s->rtcp_fd, &rfds)) {
+ from_len = sizeof(from);
+ len = recvfrom (s->rtcp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ return AVERROR_IO;
+ }
+ break;
+ }
+ /* then RTP */
+ if (FD_ISSET(s->rtp_fd, &rfds)) {
+ from_len = sizeof(from);
+ len = recvfrom (s->rtp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ return AVERROR_IO;
+ }
+ break;
+ }
+ }
+ }
+#endif
+ return len;
+}
+
+static int rtp_write(URLContext *h, uint8_t *buf, int size)
+{
+ RTPContext *s = h->priv_data;
+ int ret;
+ URLContext *hd;
+
+ if (buf[1] >= 200 && buf[1] <= 204) {
+ /* RTCP payload type */
+ hd = s->rtcp_hd;
+ } else {
+ /* RTP payload type */
+ hd = s->rtp_hd;
+ }
+
+ ret = url_write(hd, buf, size);
+#if 0
+ {
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 10 * 1000000;
+ nanosleep(&ts, NULL);
+ }
+#endif
+ return ret;
+}
+
+static int rtp_close(URLContext *h)
+{
+ RTPContext *s = h->priv_data;
+
+ url_close(s->rtp_hd);
+ url_close(s->rtcp_hd);
+ av_free(s);
+ return 0;
+}
+
+/**
+ * Return the local port used by the RTP connexion
+ * @param s1 media file context
+ * @return the local port number
+ */
+int rtp_get_local_port(URLContext *h)
+{
+ RTPContext *s = h->priv_data;
+ return udp_get_local_port(s->rtp_hd);
+}
+
+/**
+ * Return the rtp and rtcp file handles for select() usage to wait for several RTP
+ * streams at the same time.
+ * @param h media file context
+ */
+void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd)
+{
+ RTPContext *s = h->priv_data;
+
+ *prtp_fd = s->rtp_fd;
+ *prtcp_fd = s->rtcp_fd;
+}
+
+URLProtocol rtp_protocol = {
+ "rtp",
+ rtp_open,
+ rtp_read,
+ rtp_write,
+ NULL, /* seek */
+ rtp_close,
+};
diff --git a/contrib/ffmpeg/libavformat/rtsp.c b/contrib/ffmpeg/libavformat/rtsp.c
new file mode 100644
index 000000000..787cdd685
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtsp.c
@@ -0,0 +1,1493 @@
+/*
+ * RTSP/SDP client
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#include <unistd.h> /* for select() prototype */
+#include <sys/time.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+
+#include "rtp_internal.h"
+
+//#define DEBUG
+//#define DEBUG_RTP_TCP
+
+enum RTSPClientState {
+ RTSP_STATE_IDLE,
+ RTSP_STATE_PLAYING,
+ RTSP_STATE_PAUSED,
+};
+
+typedef struct RTSPState {
+ URLContext *rtsp_hd; /* RTSP TCP connexion handle */
+ int nb_rtsp_streams;
+ struct RTSPStream **rtsp_streams;
+
+ enum RTSPClientState state;
+ int64_t seek_timestamp;
+
+ /* XXX: currently we use unbuffered input */
+ // ByteIOContext rtsp_gb;
+ int seq; /* RTSP command sequence number */
+ char session_id[512];
+ enum RTSPProtocol protocol;
+ char last_reply[2048]; /* XXX: allocate ? */
+ RTPDemuxContext *cur_rtp;
+} RTSPState;
+
+typedef struct RTSPStream {
+ URLContext *rtp_handle; /* RTP stream handle */
+ RTPDemuxContext *rtp_ctx; /* RTP parse context */
+
+ int stream_index; /* corresponding stream index, if any. -1 if none (MPEG2TS case) */
+ int interleaved_min, interleaved_max; /* interleave ids, if TCP transport */
+ char control_url[1024]; /* url for this stream (from SDP) */
+
+ int sdp_port; /* port (from SDP content - not used in RTSP) */
+ struct in_addr sdp_ip; /* IP address (from SDP content - not used in RTSP) */
+ int sdp_ttl; /* IP TTL (from SDP content - not used in RTSP) */
+ int sdp_payload_type; /* payload type - only used in SDP */
+ rtp_payload_data_t rtp_payload_data; /* rtp payload parsing infos from SDP */
+
+ RTPDynamicProtocolHandler *dynamic_handler; ///< Only valid if it's a dynamic protocol. (This is the handler structure)
+ void *dynamic_protocol_context; ///< Only valid if it's a dynamic protocol. (This is any private data associated with the dynamic protocol)
+} RTSPStream;
+
+static int rtsp_read_play(AVFormatContext *s);
+
+/* XXX: currently, the only way to change the protocols consists in
+ changing this variable */
+
+int rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_UDP);
+
+FFRTSPCallback *ff_rtsp_callback = NULL;
+
+static int rtsp_probe(AVProbeData *p)
+{
+ if (strstart(p->filename, "rtsp:", NULL))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+static int redir_isspace(int c)
+{
+ return (c == ' ' || c == '\t' || c == '\n' || c == '\r');
+}
+
+static void skip_spaces(const char **pp)
+{
+ const char *p;
+ p = *pp;
+ while (redir_isspace(*p))
+ p++;
+ *pp = p;
+}
+
+static void get_word_sep(char *buf, int buf_size, const char *sep,
+ const char **pp)
+{
+ const char *p;
+ char *q;
+
+ p = *pp;
+ if (*p == '/')
+ p++;
+ skip_spaces(&p);
+ q = buf;
+ while (!strchr(sep, *p) && *p != '\0') {
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ *pp = p;
+}
+
+static void get_word(char *buf, int buf_size, const char **pp)
+{
+ const char *p;
+ char *q;
+
+ p = *pp;
+ skip_spaces(&p);
+ q = buf;
+ while (!redir_isspace(*p) && *p != '\0') {
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ *pp = p;
+}
+
+/* parse the rtpmap description: <codec_name>/<clock_rate>[/<other
+ params>] */
+static int sdp_parse_rtpmap(AVCodecContext *codec, RTSPStream *rtsp_st, int payload_type, const char *p)
+{
+ char buf[256];
+ int i;
+ AVCodec *c;
+ const char *c_name;
+
+ /* Loop into AVRtpDynamicPayloadTypes[] and AVRtpPayloadTypes[] and
+ see if we can handle this kind of payload */
+ get_word_sep(buf, sizeof(buf), "/", &p);
+ if (payload_type >= RTP_PT_PRIVATE) {
+ RTPDynamicProtocolHandler *handler= RTPFirstDynamicPayloadHandler;
+ while(handler) {
+ if (!strcmp(buf, handler->enc_name) && (codec->codec_type == handler->codec_type)) {
+ codec->codec_id = handler->codec_id;
+ rtsp_st->dynamic_handler= handler;
+ if(handler->open) {
+ rtsp_st->dynamic_protocol_context= handler->open();
+ }
+ break;
+ }
+ handler= handler->next;
+ }
+ } else {
+ /* We are in a standard case ( from http://www.iana.org/assignments/rtp-parameters) */
+ /* search into AVRtpPayloadTypes[] */
+ for (i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
+ if (!strcmp(buf, AVRtpPayloadTypes[i].enc_name) && (codec->codec_type == AVRtpPayloadTypes[i].codec_type)){
+ codec->codec_id = AVRtpPayloadTypes[i].codec_id;
+ break;
+ }
+ }
+
+ c = avcodec_find_decoder(codec->codec_id);
+ if (c && c->name)
+ c_name = c->name;
+ else
+ c_name = (char *)NULL;
+
+ if (c_name) {
+ get_word_sep(buf, sizeof(buf), "/", &p);
+ i = atoi(buf);
+ switch (codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ av_log(codec, AV_LOG_DEBUG, " audio codec set to : %s\n", c_name);
+ codec->sample_rate = RTSP_DEFAULT_AUDIO_SAMPLERATE;
+ codec->channels = RTSP_DEFAULT_NB_AUDIO_CHANNELS;
+ if (i > 0) {
+ codec->sample_rate = i;
+ get_word_sep(buf, sizeof(buf), "/", &p);
+ i = atoi(buf);
+ if (i > 0)
+ codec->channels = i;
+ // TODO: there is a bug here; if it is a mono stream, and less than 22000Hz, faad upconverts to stereo and twice the
+ // frequency. No problem, but the sample rate is being set here by the sdp line. Upcoming patch forthcoming. (rdm)
+ }
+ av_log(codec, AV_LOG_DEBUG, " audio samplerate set to : %i\n", codec->sample_rate);
+ av_log(codec, AV_LOG_DEBUG, " audio channels set to : %i\n", codec->channels);
+ break;
+ case CODEC_TYPE_VIDEO:
+ av_log(codec, AV_LOG_DEBUG, " video codec set to : %s\n", c_name);
+ break;
+ default:
+ break;
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+/* return the length and optionnaly the data */
+static int hex_to_data(uint8_t *data, const char *p)
+{
+ int c, len, v;
+
+ len = 0;
+ v = 1;
+ for(;;) {
+ skip_spaces(&p);
+ if (p == '\0')
+ break;
+ c = toupper((unsigned char)*p++);
+ if (c >= '0' && c <= '9')
+ c = c - '0';
+ else if (c >= 'A' && c <= 'F')
+ c = c - 'A' + 10;
+ else
+ break;
+ v = (v << 4) | c;
+ if (v & 0x100) {
+ if (data)
+ data[len] = v;
+ len++;
+ v = 1;
+ }
+ }
+ return len;
+}
+
+static void sdp_parse_fmtp_config(AVCodecContext *codec, char *attr, char *value)
+{
+ switch (codec->codec_id) {
+ case CODEC_ID_MPEG4:
+ case CODEC_ID_AAC:
+ if (!strcmp(attr, "config")) {
+ /* decode the hexa encoded parameter */
+ int len = hex_to_data(NULL, value);
+ codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!codec->extradata)
+ return;
+ codec->extradata_size = len;
+ hex_to_data(codec->extradata, value);
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+typedef struct attrname_map
+{
+ const char *str;
+ uint16_t type;
+ uint32_t offset;
+} attrname_map_t;
+
+/* All known fmtp parmeters and the corresping RTPAttrTypeEnum */
+#define ATTR_NAME_TYPE_INT 0
+#define ATTR_NAME_TYPE_STR 1
+static attrname_map_t attr_names[]=
+{
+ {"SizeLength", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, sizelength)},
+ {"IndexLength", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, indexlength)},
+ {"IndexDeltaLength", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, indexdeltalength)},
+ {"profile-level-id", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, profile_level_id)},
+ {"StreamType", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, streamtype)},
+ {"mode", ATTR_NAME_TYPE_STR, offsetof(rtp_payload_data_t, mode)},
+ {NULL, -1, -1},
+};
+
+/** parse the attribute line from the fmtp a line of an sdp resonse. This is broken out as a function
+* because it is used in rtp_h264.c, which is forthcoming.
+*/
+int rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size)
+{
+ skip_spaces(p);
+ if(**p)
+ {
+ get_word_sep(attr, attr_size, "=", p);
+ if (**p == '=')
+ (*p)++;
+ get_word_sep(value, value_size, ";", p);
+ if (**p == ';')
+ (*p)++;
+ return 1;
+ }
+ return 0;
+}
+
+/* parse a SDP line and save stream attributes */
+static void sdp_parse_fmtp(AVStream *st, const char *p)
+{
+ char attr[256];
+ char value[4096];
+ int i;
+
+ RTSPStream *rtsp_st = st->priv_data;
+ AVCodecContext *codec = st->codec;
+ rtp_payload_data_t *rtp_payload_data = &rtsp_st->rtp_payload_data;
+
+ /* loop on each attribute */
+ while(rtsp_next_attr_and_value(&p, attr, sizeof(attr), value, sizeof(value)))
+ {
+ /* grab the codec extra_data from the config parameter of the fmtp line */
+ sdp_parse_fmtp_config(codec, attr, value);
+ /* Looking for a known attribute */
+ for (i = 0; attr_names[i].str; ++i) {
+ if (!strcasecmp(attr, attr_names[i].str)) {
+ if (attr_names[i].type == ATTR_NAME_TYPE_INT)
+ *(int *)((char *)rtp_payload_data + attr_names[i].offset) = atoi(value);
+ else if (attr_names[i].type == ATTR_NAME_TYPE_STR)
+ *(char **)((char *)rtp_payload_data + attr_names[i].offset) = av_strdup(value);
+ }
+ }
+ }
+}
+
+/** Parse a string \p in the form of Range:npt=xx-xx, and determine the start
+ * and end time.
+ * Used for seeking in the rtp stream.
+ */
+static void rtsp_parse_range_npt(const char *p, int64_t *start, int64_t *end)
+{
+ char buf[256];
+
+ skip_spaces(&p);
+ if (!stristart(p, "npt=", &p))
+ return;
+
+ *start = AV_NOPTS_VALUE;
+ *end = AV_NOPTS_VALUE;
+
+ get_word_sep(buf, sizeof(buf), "-", &p);
+ *start = parse_date(buf, 1);
+ if (*p == '-') {
+ p++;
+ get_word_sep(buf, sizeof(buf), "-", &p);
+ *end = parse_date(buf, 1);
+ }
+// av_log(NULL, AV_LOG_DEBUG, "Range Start: %lld\n", *start);
+// av_log(NULL, AV_LOG_DEBUG, "Range End: %lld\n", *end);
+}
+
+typedef struct SDPParseState {
+ /* SDP only */
+ struct in_addr default_ip;
+ int default_ttl;
+} SDPParseState;
+
+static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
+ int letter, const char *buf)
+{
+ RTSPState *rt = s->priv_data;
+ char buf1[64], st_type[64];
+ const char *p;
+ int codec_type, payload_type, i;
+ AVStream *st;
+ RTSPStream *rtsp_st;
+ struct in_addr sdp_ip;
+ int ttl;
+
+#ifdef DEBUG
+ printf("sdp: %c='%s'\n", letter, buf);
+#endif
+
+ p = buf;
+ switch(letter) {
+ case 'c':
+ get_word(buf1, sizeof(buf1), &p);
+ if (strcmp(buf1, "IN") != 0)
+ return;
+ get_word(buf1, sizeof(buf1), &p);
+ if (strcmp(buf1, "IP4") != 0)
+ return;
+ get_word_sep(buf1, sizeof(buf1), "/", &p);
+ if (inet_aton(buf1, &sdp_ip) == 0)
+ return;
+ ttl = 16;
+ if (*p == '/') {
+ p++;
+ get_word_sep(buf1, sizeof(buf1), "/", &p);
+ ttl = atoi(buf1);
+ }
+ if (s->nb_streams == 0) {
+ s1->default_ip = sdp_ip;
+ s1->default_ttl = ttl;
+ } else {
+ st = s->streams[s->nb_streams - 1];
+ rtsp_st = st->priv_data;
+ rtsp_st->sdp_ip = sdp_ip;
+ rtsp_st->sdp_ttl = ttl;
+ }
+ break;
+ case 's':
+ pstrcpy(s->title, sizeof(s->title), p);
+ break;
+ case 'i':
+ if (s->nb_streams == 0) {
+ pstrcpy(s->comment, sizeof(s->comment), p);
+ break;
+ }
+ break;
+ case 'm':
+ /* new stream */
+ get_word(st_type, sizeof(st_type), &p);
+ if (!strcmp(st_type, "audio")) {
+ codec_type = CODEC_TYPE_AUDIO;
+ } else if (!strcmp(st_type, "video")) {
+ codec_type = CODEC_TYPE_VIDEO;
+ } else {
+ return;
+ }
+ rtsp_st = av_mallocz(sizeof(RTSPStream));
+ if (!rtsp_st)
+ return;
+ rtsp_st->stream_index = -1;
+ dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
+
+ rtsp_st->sdp_ip = s1->default_ip;
+ rtsp_st->sdp_ttl = s1->default_ttl;
+
+ get_word(buf1, sizeof(buf1), &p); /* port */
+ rtsp_st->sdp_port = atoi(buf1);
+
+ get_word(buf1, sizeof(buf1), &p); /* protocol (ignored) */
+
+ /* XXX: handle list of formats */
+ get_word(buf1, sizeof(buf1), &p); /* format list */
+ rtsp_st->sdp_payload_type = atoi(buf1);
+
+ if (!strcmp(AVRtpPayloadTypes[rtsp_st->sdp_payload_type].enc_name, "MP2T")) {
+ /* no corresponding stream */
+ } else {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return;
+ st->priv_data = rtsp_st;
+ rtsp_st->stream_index = st->index;
+ st->codec->codec_type = codec_type;
+ if (rtsp_st->sdp_payload_type < RTP_PT_PRIVATE) {
+ /* if standard payload type, we can find the codec right now */
+ rtp_get_codec_info(st->codec, rtsp_st->sdp_payload_type);
+ }
+ }
+ /* put a default control url */
+ pstrcpy(rtsp_st->control_url, sizeof(rtsp_st->control_url), s->filename);
+ break;
+ case 'a':
+ if (strstart(p, "control:", &p) && s->nb_streams > 0) {
+ char proto[32];
+ /* get the control url */
+ st = s->streams[s->nb_streams - 1];
+ rtsp_st = st->priv_data;
+
+ /* XXX: may need to add full url resolution */
+ url_split(proto, sizeof(proto), NULL, 0, NULL, 0, NULL, NULL, 0, p);
+ if (proto[0] == '\0') {
+ /* relative control URL */
+ pstrcat(rtsp_st->control_url, sizeof(rtsp_st->control_url), "/");
+ pstrcat(rtsp_st->control_url, sizeof(rtsp_st->control_url), p);
+ } else {
+ pstrcpy(rtsp_st->control_url, sizeof(rtsp_st->control_url), p);
+ }
+ } else if (strstart(p, "rtpmap:", &p)) {
+ /* NOTE: rtpmap is only supported AFTER the 'm=' tag */
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ sdp_parse_rtpmap(st->codec, rtsp_st, payload_type, p);
+ }
+ }
+ } else if (strstart(p, "fmtp:", &p)) {
+ /* NOTE: fmtp is only supported AFTER the 'a=rtpmap:xxx' tag */
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
+ if(!rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf)) {
+ sdp_parse_fmtp(st, p);
+ }
+ } else {
+ sdp_parse_fmtp(st, p);
+ }
+ }
+ }
+ } else if(strstart(p, "framesize:", &p)) {
+ // let dynamic protocol handlers have a stab at the line.
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
+ rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf);
+ }
+ }
+ }
+ } else if(strstart(p, "range:", &p)) {
+ int64_t start, end;
+
+ // this is so that seeking on a streamed file can work.
+ rtsp_parse_range_npt(p, &start, &end);
+ s->start_time= start;
+ s->duration= (end==AV_NOPTS_VALUE)?AV_NOPTS_VALUE:end-start; // AV_NOPTS_VALUE means live broadcast (and can't seek)
+ }
+ break;
+ }
+}
+
+static int sdp_parse(AVFormatContext *s, const char *content)
+{
+ const char *p;
+ int letter;
+ char buf[1024], *q;
+ SDPParseState sdp_parse_state, *s1 = &sdp_parse_state;
+
+ memset(s1, 0, sizeof(SDPParseState));
+ p = content;
+ for(;;) {
+ skip_spaces(&p);
+ letter = *p;
+ if (letter == '\0')
+ break;
+ p++;
+ if (*p != '=')
+ goto next_line;
+ p++;
+ /* get the content */
+ q = buf;
+ while (*p != '\n' && *p != '\r' && *p != '\0') {
+ if ((q - buf) < sizeof(buf) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ sdp_parse_line(s, s1, letter, buf);
+ next_line:
+ while (*p != '\n' && *p != '\0')
+ p++;
+ if (*p == '\n')
+ p++;
+ }
+ return 0;
+}
+
+static void rtsp_parse_range(int *min_ptr, int *max_ptr, const char **pp)
+{
+ const char *p;
+ int v;
+
+ p = *pp;
+ skip_spaces(&p);
+ v = strtol(p, (char **)&p, 10);
+ if (*p == '-') {
+ p++;
+ *min_ptr = v;
+ v = strtol(p, (char **)&p, 10);
+ *max_ptr = v;
+ } else {
+ *min_ptr = v;
+ *max_ptr = v;
+ }
+ *pp = p;
+}
+
+/* XXX: only one transport specification is parsed */
+static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
+{
+ char transport_protocol[16];
+ char profile[16];
+ char lower_transport[16];
+ char parameter[16];
+ RTSPTransportField *th;
+ char buf[256];
+
+ reply->nb_transports = 0;
+
+ for(;;) {
+ skip_spaces(&p);
+ if (*p == '\0')
+ break;
+
+ th = &reply->transports[reply->nb_transports];
+
+ get_word_sep(transport_protocol, sizeof(transport_protocol),
+ "/", &p);
+ if (*p == '/')
+ p++;
+ get_word_sep(profile, sizeof(profile), "/;,", &p);
+ lower_transport[0] = '\0';
+ if (*p == '/') {
+ p++;
+ get_word_sep(lower_transport, sizeof(lower_transport),
+ ";,", &p);
+ }
+ if (!strcasecmp(lower_transport, "TCP"))
+ th->protocol = RTSP_PROTOCOL_RTP_TCP;
+ else
+ th->protocol = RTSP_PROTOCOL_RTP_UDP;
+
+ if (*p == ';')
+ p++;
+ /* get each parameter */
+ while (*p != '\0' && *p != ',') {
+ get_word_sep(parameter, sizeof(parameter), "=;,", &p);
+ if (!strcmp(parameter, "port")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->port_min, &th->port_max, &p);
+ }
+ } else if (!strcmp(parameter, "client_port")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->client_port_min,
+ &th->client_port_max, &p);
+ }
+ } else if (!strcmp(parameter, "server_port")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->server_port_min,
+ &th->server_port_max, &p);
+ }
+ } else if (!strcmp(parameter, "interleaved")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->interleaved_min,
+ &th->interleaved_max, &p);
+ }
+ } else if (!strcmp(parameter, "multicast")) {
+ if (th->protocol == RTSP_PROTOCOL_RTP_UDP)
+ th->protocol = RTSP_PROTOCOL_RTP_UDP_MULTICAST;
+ } else if (!strcmp(parameter, "ttl")) {
+ if (*p == '=') {
+ p++;
+ th->ttl = strtol(p, (char **)&p, 10);
+ }
+ } else if (!strcmp(parameter, "destination")) {
+ struct in_addr ipaddr;
+
+ if (*p == '=') {
+ p++;
+ get_word_sep(buf, sizeof(buf), ";,", &p);
+ if (inet_aton(buf, &ipaddr))
+ th->destination = ntohl(ipaddr.s_addr);
+ }
+ }
+ while (*p != ';' && *p != '\0' && *p != ',')
+ p++;
+ if (*p == ';')
+ p++;
+ }
+ if (*p == ',')
+ p++;
+
+ reply->nb_transports++;
+ }
+}
+
+void rtsp_parse_line(RTSPHeader *reply, const char *buf)
+{
+ const char *p;
+
+ /* NOTE: we do case independent match for broken servers */
+ p = buf;
+ if (stristart(p, "Session:", &p)) {
+ get_word_sep(reply->session_id, sizeof(reply->session_id), ";", &p);
+ } else if (stristart(p, "Content-Length:", &p)) {
+ reply->content_length = strtol(p, NULL, 10);
+ } else if (stristart(p, "Transport:", &p)) {
+ rtsp_parse_transport(reply, p);
+ } else if (stristart(p, "CSeq:", &p)) {
+ reply->seq = strtol(p, NULL, 10);
+ } else if (stristart(p, "Range:", &p)) {
+ rtsp_parse_range_npt(p, &reply->range_start, &reply->range_end);
+ }
+}
+
+static int url_readbuf(URLContext *h, unsigned char *buf, int size)
+{
+ int ret, len;
+
+ len = 0;
+ while (len < size) {
+ ret = url_read(h, buf+len, size-len);
+ if (ret < 1)
+ return ret;
+ len += ret;
+ }
+ return len;
+}
+
+/* skip a RTP/TCP interleaved packet */
+static void rtsp_skip_packet(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ int ret, len, len1;
+ uint8_t buf[1024];
+
+ ret = url_readbuf(rt->rtsp_hd, buf, 3);
+ if (ret != 3)
+ return;
+ len = (buf[1] << 8) | buf[2];
+#ifdef DEBUG
+ printf("skipping RTP packet len=%d\n", len);
+#endif
+ /* skip payload */
+ while (len > 0) {
+ len1 = len;
+ if (len1 > sizeof(buf))
+ len1 = sizeof(buf);
+ ret = url_readbuf(rt->rtsp_hd, buf, len1);
+ if (ret != len1)
+ return;
+ len -= len1;
+ }
+}
+
+static void rtsp_send_cmd(AVFormatContext *s,
+ const char *cmd, RTSPHeader *reply,
+ unsigned char **content_ptr)
+{
+ RTSPState *rt = s->priv_data;
+ char buf[4096], buf1[1024], *q;
+ unsigned char ch;
+ const char *p;
+ int content_length, line_count;
+ unsigned char *content = NULL;
+
+ memset(reply, 0, sizeof(RTSPHeader));
+
+ rt->seq++;
+ pstrcpy(buf, sizeof(buf), cmd);
+ snprintf(buf1, sizeof(buf1), "CSeq: %d\r\n", rt->seq);
+ pstrcat(buf, sizeof(buf), buf1);
+ if (rt->session_id[0] != '\0' && !strstr(cmd, "\nIf-Match:")) {
+ snprintf(buf1, sizeof(buf1), "Session: %s\r\n", rt->session_id);
+ pstrcat(buf, sizeof(buf), buf1);
+ }
+ pstrcat(buf, sizeof(buf), "\r\n");
+#ifdef DEBUG
+ printf("Sending:\n%s--\n", buf);
+#endif
+ url_write(rt->rtsp_hd, buf, strlen(buf));
+
+ /* parse reply (XXX: use buffers) */
+ line_count = 0;
+ rt->last_reply[0] = '\0';
+ for(;;) {
+ q = buf;
+ for(;;) {
+ if (url_readbuf(rt->rtsp_hd, &ch, 1) != 1)
+ break;
+ if (ch == '\n')
+ break;
+ if (ch == '$') {
+ /* XXX: only parse it if first char on line ? */
+ rtsp_skip_packet(s);
+ } else if (ch != '\r') {
+ if ((q - buf) < sizeof(buf) - 1)
+ *q++ = ch;
+ }
+ }
+ *q = '\0';
+#ifdef DEBUG
+ printf("line='%s'\n", buf);
+#endif
+ /* test if last line */
+ if (buf[0] == '\0')
+ break;
+ p = buf;
+ if (line_count == 0) {
+ /* get reply code */
+ get_word(buf1, sizeof(buf1), &p);
+ get_word(buf1, sizeof(buf1), &p);
+ reply->status_code = atoi(buf1);
+ } else {
+ rtsp_parse_line(reply, p);
+ pstrcat(rt->last_reply, sizeof(rt->last_reply), p);
+ pstrcat(rt->last_reply, sizeof(rt->last_reply), "\n");
+ }
+ line_count++;
+ }
+
+ if (rt->session_id[0] == '\0' && reply->session_id[0] != '\0')
+ pstrcpy(rt->session_id, sizeof(rt->session_id), reply->session_id);
+
+ content_length = reply->content_length;
+ if (content_length > 0) {
+ /* leave some room for a trailing '\0' (useful for simple parsing) */
+ content = av_malloc(content_length + 1);
+ (void)url_readbuf(rt->rtsp_hd, content, content_length);
+ content[content_length] = '\0';
+ }
+ if (content_ptr)
+ *content_ptr = content;
+}
+
+/* useful for modules: set RTSP callback function */
+
+void rtsp_set_callback(FFRTSPCallback *rtsp_cb)
+{
+ ff_rtsp_callback = rtsp_cb;
+}
+
+
+/* close and free RTSP streams */
+static void rtsp_close_streams(RTSPState *rt)
+{
+ int i;
+ RTSPStream *rtsp_st;
+
+ for(i=0;i<rt->nb_rtsp_streams;i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ if (rtsp_st) {
+ if (rtsp_st->rtp_ctx)
+ rtp_parse_close(rtsp_st->rtp_ctx);
+ if (rtsp_st->rtp_handle)
+ url_close(rtsp_st->rtp_handle);
+ if (rtsp_st->dynamic_handler && rtsp_st->dynamic_protocol_context)
+ rtsp_st->dynamic_handler->close(rtsp_st->dynamic_protocol_context);
+ }
+ av_free(rtsp_st);
+ }
+ av_free(rt->rtsp_streams);
+}
+
+static int rtsp_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RTSPState *rt = s->priv_data;
+ char host[1024], path[1024], tcpname[1024], cmd[2048];
+ URLContext *rtsp_hd;
+ int port, i, j, ret, err;
+ RTSPHeader reply1, *reply = &reply1;
+ unsigned char *content = NULL;
+ RTSPStream *rtsp_st;
+ int protocol_mask;
+ AVStream *st;
+
+ /* extract hostname and port */
+ url_split(NULL, 0, NULL, 0,
+ host, sizeof(host), &port, path, sizeof(path), s->filename);
+ if (port < 0)
+ port = RTSP_DEFAULT_PORT;
+
+ /* open the tcp connexion */
+ snprintf(tcpname, sizeof(tcpname), "tcp://%s:%d", host, port);
+ if (url_open(&rtsp_hd, tcpname, URL_RDWR) < 0)
+ return AVERROR_IO;
+ rt->rtsp_hd = rtsp_hd;
+ rt->seq = 0;
+
+ /* describe the stream */
+ snprintf(cmd, sizeof(cmd),
+ "DESCRIBE %s RTSP/1.0\r\n"
+ "Accept: application/sdp\r\n",
+ s->filename);
+ rtsp_send_cmd(s, cmd, reply, &content);
+ if (!content) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ if (reply->status_code != RTSP_STATUS_OK) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ /* now we got the SDP description, we parse it */
+ ret = sdp_parse(s, (const char *)content);
+ av_freep(&content);
+ if (ret < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ protocol_mask = rtsp_default_protocols;
+
+ /* for each stream, make the setup request */
+ /* XXX: we assume the same server is used for the control of each
+ RTSP stream */
+
+ for(j = RTSP_RTP_PORT_MIN, i = 0; i < rt->nb_rtsp_streams; ++i) {
+ char transport[2048];
+
+ rtsp_st = rt->rtsp_streams[i];
+
+ /* compute available transports */
+ transport[0] = '\0';
+
+ /* RTP/UDP */
+ if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP)) {
+ char buf[256];
+
+ /* first try in specified port range */
+ if (RTSP_RTP_PORT_MIN != 0) {
+ while(j <= RTSP_RTP_PORT_MAX) {
+ snprintf(buf, sizeof(buf), "rtp://?localport=%d", j);
+ if (url_open(&rtsp_st->rtp_handle, buf, URL_RDWR) == 0) {
+ j += 2; /* we will use two port by rtp stream (rtp and rtcp) */
+ goto rtp_opened;
+ }
+ }
+ }
+
+/* then try on any port
+** if (url_open(&rtsp_st->rtp_handle, "rtp://", URL_RDONLY) < 0) {
+** err = AVERROR_INVALIDDATA;
+** goto fail;
+** }
+*/
+
+ rtp_opened:
+ port = rtp_get_local_port(rtsp_st->rtp_handle);
+ if (transport[0] != '\0')
+ pstrcat(transport, sizeof(transport), ",");
+ snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1,
+ "RTP/AVP/UDP;unicast;client_port=%d-%d",
+ port, port + 1);
+ }
+
+ /* RTP/TCP */
+ else if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_TCP)) {
+ if (transport[0] != '\0')
+ pstrcat(transport, sizeof(transport), ",");
+ snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1,
+ "RTP/AVP/TCP");
+ }
+
+ else if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP_MULTICAST)) {
+ if (transport[0] != '\0')
+ pstrcat(transport, sizeof(transport), ",");
+ snprintf(transport + strlen(transport),
+ sizeof(transport) - strlen(transport) - 1,
+ "RTP/AVP/UDP;multicast");
+ }
+ snprintf(cmd, sizeof(cmd),
+ "SETUP %s RTSP/1.0\r\n"
+ "Transport: %s\r\n",
+ rtsp_st->control_url, transport);
+ rtsp_send_cmd(s, cmd, reply, NULL);
+ if (reply->status_code != RTSP_STATUS_OK ||
+ reply->nb_transports != 1) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ /* XXX: same protocol for all streams is required */
+ if (i > 0) {
+ if (reply->transports[0].protocol != rt->protocol) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ } else {
+ rt->protocol = reply->transports[0].protocol;
+ }
+
+ /* close RTP connection if not choosen */
+ if (reply->transports[0].protocol != RTSP_PROTOCOL_RTP_UDP &&
+ (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP))) {
+ url_close(rtsp_st->rtp_handle);
+ rtsp_st->rtp_handle = NULL;
+ }
+
+ switch(reply->transports[0].protocol) {
+ case RTSP_PROTOCOL_RTP_TCP:
+ rtsp_st->interleaved_min = reply->transports[0].interleaved_min;
+ rtsp_st->interleaved_max = reply->transports[0].interleaved_max;
+ break;
+
+ case RTSP_PROTOCOL_RTP_UDP:
+ {
+ char url[1024];
+
+ /* XXX: also use address if specified */
+ snprintf(url, sizeof(url), "rtp://%s:%d",
+ host, reply->transports[0].server_port_min);
+ if (rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ break;
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ {
+ char url[1024];
+ int ttl;
+
+ ttl = reply->transports[0].ttl;
+ if (!ttl)
+ ttl = 16;
+ snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
+ host,
+ reply->transports[0].server_port_min,
+ ttl);
+ if (url_open(&rtsp_st->rtp_handle, url, URL_RDWR) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ break;
+ }
+ /* open the RTP context */
+ st = NULL;
+ if (rtsp_st->stream_index >= 0)
+ st = s->streams[rtsp_st->stream_index];
+ if (!st)
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ rtsp_st->rtp_ctx = rtp_parse_open(s, st, rtsp_st->rtp_handle, rtsp_st->sdp_payload_type, &rtsp_st->rtp_payload_data);
+
+ if (!rtsp_st->rtp_ctx) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ } else {
+ if(rtsp_st->dynamic_handler) {
+ rtsp_st->rtp_ctx->dynamic_protocol_context= rtsp_st->dynamic_protocol_context;
+ rtsp_st->rtp_ctx->parse_packet= rtsp_st->dynamic_handler->parse_packet;
+ }
+ }
+ }
+
+ /* use callback if available to extend setup */
+ if (ff_rtsp_callback) {
+ if (ff_rtsp_callback(RTSP_ACTION_CLIENT_SETUP, rt->session_id,
+ NULL, 0, rt->last_reply) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+
+
+ rt->state = RTSP_STATE_IDLE;
+ rt->seek_timestamp = 0; /* default is to start stream at position
+ zero */
+ if (ap->initial_pause) {
+ /* do not start immediately */
+ } else {
+ if (rtsp_read_play(s) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ return 0;
+ fail:
+ rtsp_close_streams(rt);
+ av_freep(&content);
+ url_close(rt->rtsp_hd);
+ return err;
+}
+
+static int tcp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
+ uint8_t *buf, int buf_size)
+{
+ RTSPState *rt = s->priv_data;
+ int id, len, i, ret;
+ RTSPStream *rtsp_st;
+
+#ifdef DEBUG_RTP_TCP
+ printf("tcp_read_packet:\n");
+#endif
+ redo:
+ for(;;) {
+ ret = url_readbuf(rt->rtsp_hd, buf, 1);
+#ifdef DEBUG_RTP_TCP
+ printf("ret=%d c=%02x [%c]\n", ret, buf[0], buf[0]);
+#endif
+ if (ret != 1)
+ return -1;
+ if (buf[0] == '$')
+ break;
+ }
+ ret = url_readbuf(rt->rtsp_hd, buf, 3);
+ if (ret != 3)
+ return -1;
+ id = buf[0];
+ len = (buf[1] << 8) | buf[2];
+#ifdef DEBUG_RTP_TCP
+ printf("id=%d len=%d\n", id, len);
+#endif
+ if (len > buf_size || len < 12)
+ goto redo;
+ /* get the data */
+ ret = url_readbuf(rt->rtsp_hd, buf, len);
+ if (ret != len)
+ return -1;
+
+ /* find the matching stream */
+ for(i = 0; i < rt->nb_rtsp_streams; i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ if (id >= rtsp_st->interleaved_min &&
+ id <= rtsp_st->interleaved_max)
+ goto found;
+ }
+ goto redo;
+ found:
+ *prtsp_st = rtsp_st;
+ return len;
+}
+
+static int udp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
+ uint8_t *buf, int buf_size)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPStream *rtsp_st;
+ fd_set rfds;
+ int fd1, fd2, fd_max, n, i, ret;
+ struct timeval tv;
+
+ for(;;) {
+ if (url_interrupt_cb())
+ return -1;
+ FD_ZERO(&rfds);
+ fd_max = -1;
+ for(i = 0; i < rt->nb_rtsp_streams; i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ /* currently, we cannot probe RTCP handle because of blocking restrictions */
+ rtp_get_file_handles(rtsp_st->rtp_handle, &fd1, &fd2);
+ if (fd1 > fd_max)
+ fd_max = fd1;
+ FD_SET(fd1, &rfds);
+ }
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ n = select(fd_max + 1, &rfds, NULL, NULL, &tv);
+ if (n > 0) {
+ for(i = 0; i < rt->nb_rtsp_streams; i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ rtp_get_file_handles(rtsp_st->rtp_handle, &fd1, &fd2);
+ if (FD_ISSET(fd1, &rfds)) {
+ ret = url_read(rtsp_st->rtp_handle, buf, buf_size);
+ if (ret > 0) {
+ *prtsp_st = rtsp_st;
+ return ret;
+ }
+ }
+ }
+ }
+ }
+}
+
+static int rtsp_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPStream *rtsp_st;
+ int ret, len;
+ uint8_t buf[RTP_MAX_PACKET_LENGTH];
+
+ /* get next frames from the same RTP packet */
+ if (rt->cur_rtp) {
+ ret = rtp_parse_packet(rt->cur_rtp, pkt, NULL, 0);
+ if (ret == 0) {
+ rt->cur_rtp = NULL;
+ return 0;
+ } else if (ret == 1) {
+ return 0;
+ } else {
+ rt->cur_rtp = NULL;
+ }
+ }
+
+ /* read next RTP packet */
+ redo:
+ switch(rt->protocol) {
+ default:
+ case RTSP_PROTOCOL_RTP_TCP:
+ len = tcp_read_packet(s, &rtsp_st, buf, sizeof(buf));
+ break;
+ case RTSP_PROTOCOL_RTP_UDP:
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ len = udp_read_packet(s, &rtsp_st, buf, sizeof(buf));
+ if (rtsp_st->rtp_ctx)
+ rtp_check_and_send_back_rr(rtsp_st->rtp_ctx, len);
+ break;
+ }
+ if (len < 0)
+ return AVERROR_IO;
+ ret = rtp_parse_packet(rtsp_st->rtp_ctx, pkt, buf, len);
+ if (ret < 0)
+ goto redo;
+ if (ret == 1) {
+ /* more packets may follow, so we save the RTP context */
+ rt->cur_rtp = rtsp_st->rtp_ctx;
+ }
+ return 0;
+}
+
+static int rtsp_read_play(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPHeader reply1, *reply = &reply1;
+ char cmd[1024];
+
+ av_log(s, AV_LOG_DEBUG, "hello state=%d\n", rt->state);
+
+ if (rt->state == RTSP_STATE_PAUSED) {
+ snprintf(cmd, sizeof(cmd),
+ "PLAY %s RTSP/1.0\r\n",
+ s->filename);
+ } else {
+ snprintf(cmd, sizeof(cmd),
+ "PLAY %s RTSP/1.0\r\n"
+ "Range: npt=%0.3f-\r\n",
+ s->filename,
+ (double)rt->seek_timestamp / AV_TIME_BASE);
+ }
+ rtsp_send_cmd(s, cmd, reply, NULL);
+ if (reply->status_code != RTSP_STATUS_OK) {
+ return -1;
+ } else {
+ rt->state = RTSP_STATE_PLAYING;
+ return 0;
+ }
+}
+
+/* pause the stream */
+static int rtsp_read_pause(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPHeader reply1, *reply = &reply1;
+ char cmd[1024];
+
+ rt = s->priv_data;
+
+ if (rt->state != RTSP_STATE_PLAYING)
+ return 0;
+
+ snprintf(cmd, sizeof(cmd),
+ "PAUSE %s RTSP/1.0\r\n",
+ s->filename);
+ rtsp_send_cmd(s, cmd, reply, NULL);
+ if (reply->status_code != RTSP_STATUS_OK) {
+ return -1;
+ } else {
+ rt->state = RTSP_STATE_PAUSED;
+ return 0;
+ }
+}
+
+static int rtsp_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
+{
+ RTSPState *rt = s->priv_data;
+
+ rt->seek_timestamp = timestamp;
+ switch(rt->state) {
+ default:
+ case RTSP_STATE_IDLE:
+ break;
+ case RTSP_STATE_PLAYING:
+ if (rtsp_read_play(s) != 0)
+ return -1;
+ break;
+ case RTSP_STATE_PAUSED:
+ rt->state = RTSP_STATE_IDLE;
+ break;
+ }
+ return 0;
+}
+
+static int rtsp_read_close(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPHeader reply1, *reply = &reply1;
+ char cmd[1024];
+
+#if 0
+ /* NOTE: it is valid to flush the buffer here */
+ if (rt->protocol == RTSP_PROTOCOL_RTP_TCP) {
+ url_fclose(&rt->rtsp_gb);
+ }
+#endif
+ snprintf(cmd, sizeof(cmd),
+ "TEARDOWN %s RTSP/1.0\r\n",
+ s->filename);
+ rtsp_send_cmd(s, cmd, reply, NULL);
+
+ if (ff_rtsp_callback) {
+ ff_rtsp_callback(RTSP_ACTION_CLIENT_TEARDOWN, rt->session_id,
+ NULL, 0, NULL);
+ }
+
+ rtsp_close_streams(rt);
+ url_close(rt->rtsp_hd);
+ return 0;
+}
+
+AVInputFormat rtsp_demuxer = {
+ "rtsp",
+ "RTSP input format",
+ sizeof(RTSPState),
+ rtsp_probe,
+ rtsp_read_header,
+ rtsp_read_packet,
+ rtsp_read_close,
+ rtsp_read_seek,
+ .flags = AVFMT_NOFILE,
+ .read_play = rtsp_read_play,
+ .read_pause = rtsp_read_pause,
+};
+
+static int sdp_probe(AVProbeData *p1)
+{
+ const char *p = p1->buf, *p_end = p1->buf + p1->buf_size;
+
+ /* we look for a line beginning "c=IN IP4" */
+ while (p < p_end && *p != '\0') {
+ if (p + sizeof("c=IN IP4") - 1 < p_end && strstart(p, "c=IN IP4", NULL))
+ return AVPROBE_SCORE_MAX / 2;
+
+ while(p < p_end - 1 && *p != '\n') p++;
+ if (++p >= p_end)
+ break;
+ if (*p == '\r')
+ p++;
+ }
+ return 0;
+}
+
+#define SDP_MAX_SIZE 8192
+
+static int sdp_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPStream *rtsp_st;
+ int size, i, err;
+ char *content;
+ char url[1024];
+ AVStream *st;
+
+ /* read the whole sdp file */
+ /* XXX: better loading */
+ content = av_malloc(SDP_MAX_SIZE);
+ size = get_buffer(&s->pb, content, SDP_MAX_SIZE - 1);
+ if (size <= 0) {
+ av_free(content);
+ return AVERROR_INVALIDDATA;
+ }
+ content[size] ='\0';
+
+ sdp_parse(s, content);
+ av_free(content);
+
+ /* open each RTP stream */
+ for(i=0;i<rt->nb_rtsp_streams;i++) {
+ rtsp_st = rt->rtsp_streams[i];
+
+ snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
+ inet_ntoa(rtsp_st->sdp_ip),
+ rtsp_st->sdp_port,
+ rtsp_st->sdp_ttl);
+ if (url_open(&rtsp_st->rtp_handle, url, URL_RDWR) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ /* open the RTP context */
+ st = NULL;
+ if (rtsp_st->stream_index >= 0)
+ st = s->streams[rtsp_st->stream_index];
+ if (!st)
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ rtsp_st->rtp_ctx = rtp_parse_open(s, st, rtsp_st->rtp_handle, rtsp_st->sdp_payload_type, &rtsp_st->rtp_payload_data);
+ if (!rtsp_st->rtp_ctx) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ } else {
+ if(rtsp_st->dynamic_handler) {
+ rtsp_st->rtp_ctx->dynamic_protocol_context= rtsp_st->dynamic_protocol_context;
+ rtsp_st->rtp_ctx->parse_packet= rtsp_st->dynamic_handler->parse_packet;
+ }
+ }
+ }
+ return 0;
+ fail:
+ rtsp_close_streams(rt);
+ return err;
+}
+
+static int sdp_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ return rtsp_read_packet(s, pkt);
+}
+
+static int sdp_read_close(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ rtsp_close_streams(rt);
+ return 0;
+}
+
+#ifdef CONFIG_SDP_DEMUXER
+AVInputFormat sdp_demuxer = {
+ "sdp",
+ "SDP",
+ sizeof(RTSPState),
+ sdp_probe,
+ sdp_read_header,
+ sdp_read_packet,
+ sdp_read_close,
+};
+#endif
+
+/* dummy redirector format (used directly in av_open_input_file now) */
+static int redir_probe(AVProbeData *pd)
+{
+ const char *p;
+ p = pd->buf;
+ while (redir_isspace(*p))
+ p++;
+ if (strstart(p, "http://", NULL) ||
+ strstart(p, "rtsp://", NULL))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+/* called from utils.c */
+int redir_open(AVFormatContext **ic_ptr, ByteIOContext *f)
+{
+ char buf[4096], *q;
+ int c;
+ AVFormatContext *ic = NULL;
+
+ /* parse each URL and try to open it */
+ c = url_fgetc(f);
+ while (c != URL_EOF) {
+ /* skip spaces */
+ for(;;) {
+ if (!redir_isspace(c))
+ break;
+ c = url_fgetc(f);
+ }
+ if (c == URL_EOF)
+ break;
+ /* record url */
+ q = buf;
+ for(;;) {
+ if (c == URL_EOF || redir_isspace(c))
+ break;
+ if ((q - buf) < sizeof(buf) - 1)
+ *q++ = c;
+ c = url_fgetc(f);
+ }
+ *q = '\0';
+ //printf("URL='%s'\n", buf);
+ /* try to open the media file */
+ if (av_open_input_file(&ic, buf, NULL, 0, NULL) == 0)
+ break;
+ }
+ *ic_ptr = ic;
+ if (!ic)
+ return AVERROR_IO;
+ else
+ return 0;
+}
+
+AVInputFormat redir_demuxer = {
+ "redir",
+ "Redirector format",
+ 0,
+ redir_probe,
+ NULL,
+ NULL,
+ NULL,
+};
diff --git a/contrib/ffmpeg/libavformat/rtsp.h b/contrib/ffmpeg/libavformat/rtsp.h
new file mode 100644
index 000000000..c08aaa6ac
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtsp.h
@@ -0,0 +1,98 @@
+/*
+ * RTSP definitions
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef RTSP_H
+#define RTSP_H
+
+/* RTSP handling */
+enum RTSPStatusCode {
+#define DEF(n, c, s) c = n,
+#include "rtspcodes.h"
+#undef DEF
+};
+
+enum RTSPProtocol {
+ RTSP_PROTOCOL_RTP_UDP = 0,
+ RTSP_PROTOCOL_RTP_TCP = 1,
+ RTSP_PROTOCOL_RTP_UDP_MULTICAST = 2,
+};
+
+#define RTSP_DEFAULT_PORT 554
+#define RTSP_MAX_TRANSPORTS 8
+#define RTSP_TCP_MAX_PACKET_SIZE 1472
+#define RTSP_DEFAULT_NB_AUDIO_CHANNELS 2
+#define RTSP_DEFAULT_AUDIO_SAMPLERATE 44100
+#define RTSP_RTP_PORT_MIN 5000
+#define RTSP_RTP_PORT_MAX 10000
+
+typedef struct RTSPTransportField {
+ int interleaved_min, interleaved_max; /* interleave ids, if TCP transport */
+ int port_min, port_max; /* RTP ports */
+ int client_port_min, client_port_max; /* RTP ports */
+ int server_port_min, server_port_max; /* RTP ports */
+ int ttl; /* ttl value */
+ uint32_t destination; /* destination IP address */
+ enum RTSPProtocol protocol;
+} RTSPTransportField;
+
+typedef struct RTSPHeader {
+ int content_length;
+ enum RTSPStatusCode status_code; /* response code from server */
+ int nb_transports;
+ /* in AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
+ int64_t range_start, range_end;
+ RTSPTransportField transports[RTSP_MAX_TRANSPORTS];
+ int seq; /* sequence number */
+ char session_id[512];
+} RTSPHeader;
+
+/* the callback can be used to extend the connection setup/teardown step */
+enum RTSPCallbackAction {
+ RTSP_ACTION_SERVER_SETUP,
+ RTSP_ACTION_SERVER_TEARDOWN,
+ RTSP_ACTION_CLIENT_SETUP,
+ RTSP_ACTION_CLIENT_TEARDOWN,
+};
+
+typedef struct RTSPActionServerSetup {
+ uint32_t ipaddr;
+ char transport_option[512];
+} RTSPActionServerSetup;
+
+typedef int FFRTSPCallback(enum RTSPCallbackAction action,
+ const char *session_id,
+ char *buf, int buf_size,
+ void *arg);
+
+void rtsp_set_callback(FFRTSPCallback *rtsp_cb);
+
+int rtsp_init(void);
+void rtsp_parse_line(RTSPHeader *reply, const char *buf);
+
+extern int rtsp_default_protocols;
+extern int rtsp_rtp_port_min;
+extern int rtsp_rtp_port_max;
+extern FFRTSPCallback *ff_rtsp_callback;
+extern AVInputFormat rtsp_demuxer;
+
+int rtsp_pause(AVFormatContext *s);
+int rtsp_resume(AVFormatContext *s);
+
+#endif /* RTSP_H */
diff --git a/contrib/ffmpeg/libavformat/rtspcodes.h b/contrib/ffmpeg/libavformat/rtspcodes.h
new file mode 100644
index 000000000..f7aab31c9
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtspcodes.h
@@ -0,0 +1,31 @@
+/*
+ * RTSP definitions
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+DEF(200, RTSP_STATUS_OK, "OK")
+DEF(405, RTSP_STATUS_METHOD, "Method Not Allowed")
+DEF(453, RTSP_STATUS_BANDWIDTH, "Not Enough Bandwidth")
+DEF(454, RTSP_STATUS_SESSION, "Session Not Found")
+DEF(455, RTSP_STATUS_STATE, "Method Not Valid in This State")
+DEF(459, RTSP_STATUS_AGGREGATE, "Aggregate operation not allowed")
+DEF(460, RTSP_STATUS_ONLY_AGGREGATE, "Only aggregate operation allowed")
+DEF(461, RTSP_STATUS_TRANSPORT, "Unsupported transport")
+DEF(500, RTSP_STATUS_INTERNAL, "Internal Server Error")
+DEF(503, RTSP_STATUS_SERVICE, "Service Unavailable")
+DEF(505, RTSP_STATUS_VERSION, "RTSP Version not supported")
diff --git a/contrib/ffmpeg/libavformat/segafilm.c b/contrib/ffmpeg/libavformat/segafilm.c
new file mode 100644
index 000000000..4feb97262
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/segafilm.c
@@ -0,0 +1,310 @@
+/*
+ * Sega FILM Format (CPK) Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file segafilm.c
+ * Sega FILM (.cpk) file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * For more information regarding the Sega FILM file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define FILM_TAG MKBETAG('F', 'I', 'L', 'M')
+#define FDSC_TAG MKBETAG('F', 'D', 'S', 'C')
+#define STAB_TAG MKBETAG('S', 'T', 'A', 'B')
+#define CVID_TAG MKBETAG('c', 'v', 'i', 'd')
+
+typedef struct {
+ int stream;
+ offset_t sample_offset;
+ unsigned int sample_size;
+ int64_t pts;
+ int keyframe;
+} film_sample_t;
+
+typedef struct FilmDemuxContext {
+ int video_stream_index;
+ int audio_stream_index;
+
+ unsigned int audio_type;
+ unsigned int audio_samplerate;
+ unsigned int audio_bits;
+ unsigned int audio_channels;
+
+ unsigned int video_type;
+ unsigned int sample_count;
+ film_sample_t *sample_table;
+ unsigned int current_sample;
+
+ unsigned int base_clock;
+ unsigned int version;
+ int cvid_extra_bytes; /* the number of bytes thrown into the Cinepak
+ * chunk header to throw off decoders */
+
+ /* buffer used for interleaving stereo PCM data */
+ unsigned char *stereo_buffer;
+ int stereo_buffer_size;
+} FilmDemuxContext;
+
+static int film_probe(AVProbeData *p)
+{
+ if (p->buf_size < 4)
+ return 0;
+
+ if (BE_32(&p->buf[0]) != FILM_TAG)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int film_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char scratch[256];
+ int i;
+ unsigned int data_offset;
+ unsigned int audio_frame_counter;
+
+ film->sample_table = NULL;
+ film->stereo_buffer = NULL;
+ film->stereo_buffer_size = 0;
+
+ /* load the main FILM header */
+ if (get_buffer(pb, scratch, 16) != 16)
+ return AVERROR_IO;
+ data_offset = BE_32(&scratch[4]);
+ film->version = BE_32(&scratch[8]);
+
+ /* load the FDSC chunk */
+ if (film->version == 0) {
+ /* special case for Lemmings .film files; 20-byte header */
+ if (get_buffer(pb, scratch, 20) != 20)
+ return AVERROR_IO;
+ /* make some assumptions about the audio parameters */
+ film->audio_type = CODEC_ID_PCM_S8;
+ film->audio_samplerate = 22050;
+ film->audio_channels = 1;
+ film->audio_bits = 8;
+ } else {
+ /* normal Saturn .cpk files; 32-byte header */
+ if (get_buffer(pb, scratch, 32) != 32)
+ return AVERROR_IO;
+ film->audio_samplerate = BE_16(&scratch[24]);;
+ film->audio_channels = scratch[21];
+ film->audio_bits = scratch[22];
+ if (film->audio_bits == 8)
+ film->audio_type = CODEC_ID_PCM_S8;
+ else if (film->audio_bits == 16)
+ film->audio_type = CODEC_ID_PCM_S16BE;
+ else
+ film->audio_type = 0;
+ }
+
+ if (BE_32(&scratch[0]) != FDSC_TAG)
+ return AVERROR_INVALIDDATA;
+
+ film->cvid_extra_bytes = 0;
+ if (BE_32(&scratch[8]) == CVID_TAG) {
+ film->video_type = CODEC_ID_CINEPAK;
+ if (film->version)
+ film->cvid_extra_bytes = 2;
+ else
+ film->cvid_extra_bytes = 6; /* Lemmings 3DO case */
+ } else
+ film->video_type = 0;
+
+ /* initialize the decoder streams */
+ if (film->video_type) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ film->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = film->video_type;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = BE_32(&scratch[16]);
+ st->codec->height = BE_32(&scratch[12]);
+ }
+
+ if (film->audio_type) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ film->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = film->audio_type;
+ st->codec->codec_tag = 1;
+ st->codec->channels = film->audio_channels;
+ st->codec->bits_per_sample = film->audio_bits;
+ st->codec->sample_rate = film->audio_samplerate;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = st->codec->channels *
+ st->codec->bits_per_sample / 8;
+ }
+
+ /* load the sample table */
+ if (get_buffer(pb, scratch, 16) != 16)
+ return AVERROR_IO;
+ if (BE_32(&scratch[0]) != STAB_TAG)
+ return AVERROR_INVALIDDATA;
+ film->base_clock = BE_32(&scratch[8]);
+ film->sample_count = BE_32(&scratch[12]);
+ if(film->sample_count >= UINT_MAX / sizeof(film_sample_t))
+ return -1;
+ film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t));
+
+ for(i=0; i<s->nb_streams; i++)
+ av_set_pts_info(s->streams[i], 33, 1, film->base_clock);
+
+ audio_frame_counter = 0;
+ for (i = 0; i < film->sample_count; i++) {
+ /* load the next sample record and transfer it to an internal struct */
+ if (get_buffer(pb, scratch, 16) != 16) {
+ av_free(film->sample_table);
+ return AVERROR_IO;
+ }
+ film->sample_table[i].sample_offset =
+ data_offset + BE_32(&scratch[0]);
+ film->sample_table[i].sample_size = BE_32(&scratch[4]);
+ if (BE_32(&scratch[8]) == 0xFFFFFFFF) {
+ film->sample_table[i].stream = film->audio_stream_index;
+ film->sample_table[i].pts = audio_frame_counter;
+ film->sample_table[i].pts *= film->base_clock;
+ film->sample_table[i].pts /= film->audio_samplerate;
+
+ audio_frame_counter += (film->sample_table[i].sample_size /
+ (film->audio_channels * film->audio_bits / 8));
+ } else {
+ film->sample_table[i].stream = film->video_stream_index;
+ film->sample_table[i].pts = BE_32(&scratch[8]) & 0x7FFFFFFF;
+ film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
+ }
+ }
+
+ film->current_sample = 0;
+
+ return 0;
+}
+
+static int film_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ film_sample_t *sample;
+ int ret = 0;
+ int i;
+ int left, right;
+
+ if (film->current_sample >= film->sample_count)
+ return AVERROR_IO;
+
+ sample = &film->sample_table[film->current_sample];
+
+ /* position the stream (will probably be there anyway) */
+ url_fseek(pb, sample->sample_offset, SEEK_SET);
+
+ /* do a special song and dance when loading FILM Cinepak chunks */
+ if ((sample->stream == film->video_stream_index) &&
+ (film->video_type == CODEC_ID_CINEPAK)) {
+ if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes))
+ return AVERROR_NOMEM;
+ if(pkt->size < 10)
+ return -1;
+ pkt->pos= url_ftell(pb);
+ ret = get_buffer(pb, pkt->data, 10);
+ /* skip the non-spec CVID bytes */
+ url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR);
+ ret += get_buffer(pb, pkt->data + 10,
+ sample->sample_size - 10 - film->cvid_extra_bytes);
+ if (ret != sample->sample_size - film->cvid_extra_bytes)
+ ret = AVERROR_IO;
+ } else if ((sample->stream == film->audio_stream_index) &&
+ (film->audio_channels == 2)) {
+ /* stereo PCM needs to be interleaved */
+
+ if (av_new_packet(pkt, sample->sample_size))
+ return AVERROR_NOMEM;
+
+ /* make sure the interleave buffer is large enough */
+ if (sample->sample_size > film->stereo_buffer_size) {
+ av_free(film->stereo_buffer);
+ film->stereo_buffer_size = sample->sample_size;
+ film->stereo_buffer = av_malloc(film->stereo_buffer_size);
+ }
+
+ pkt->pos= url_ftell(pb);
+ ret = get_buffer(pb, film->stereo_buffer, sample->sample_size);
+ if (ret != sample->sample_size)
+ ret = AVERROR_IO;
+
+ left = 0;
+ right = sample->sample_size / 2;
+ for (i = 0; i < sample->sample_size; ) {
+ if (film->audio_bits == 8) {
+ pkt->data[i++] = film->stereo_buffer[left++];
+ pkt->data[i++] = film->stereo_buffer[right++];
+ } else {
+ pkt->data[i++] = film->stereo_buffer[left++];
+ pkt->data[i++] = film->stereo_buffer[left++];
+ pkt->data[i++] = film->stereo_buffer[right++];
+ pkt->data[i++] = film->stereo_buffer[right++];
+ }
+ }
+ } else {
+ ret= av_get_packet(pb, pkt, sample->sample_size);
+ if (ret != sample->sample_size)
+ ret = AVERROR_IO;
+ }
+
+ pkt->stream_index = sample->stream;
+ pkt->pts = sample->pts;
+
+ film->current_sample++;
+
+ return ret;
+}
+
+static int film_read_close(AVFormatContext *s)
+{
+ FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
+
+ av_free(film->sample_table);
+ av_free(film->stereo_buffer);
+
+ return 0;
+}
+
+AVInputFormat segafilm_demuxer = {
+ "film_cpk",
+ "Sega FILM/CPK format",
+ sizeof(FilmDemuxContext),
+ film_probe,
+ film_read_header,
+ film_read_packet,
+ film_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/sgi.c b/contrib/ffmpeg/libavformat/sgi.c
new file mode 100644
index 000000000..bf0297e81
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/sgi.c
@@ -0,0 +1,460 @@
+/*
+ * SGI image format
+ * Todd Kirby <doubleshot@pacbell.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "avio.h"
+
+/* #define DEBUG */
+
+/* sgi image file signature */
+#define SGI_MAGIC 474
+
+#define SGI_HEADER_SIZE 512
+
+#define SGI_GRAYSCALE 1
+#define SGI_RGB 3
+#define SGI_RGBA 4
+
+#define SGI_SINGLE_CHAN 2
+#define SGI_MULTI_CHAN 3
+
+typedef struct SGIInfo{
+ short magic;
+ char rle;
+ char bytes_per_channel;
+ unsigned short dimension;
+ unsigned short xsize;
+ unsigned short ysize;
+ unsigned short zsize;
+} SGIInfo;
+
+
+static int sgi_probe(AVProbeData *pd)
+{
+ /* test for sgi magic */
+ if (pd->buf_size >= 2 && BE_16(&pd->buf[0]) == SGI_MAGIC) {
+ return AVPROBE_SCORE_MAX;
+ } else {
+ return 0;
+ }
+}
+
+/* read sgi header fields */
+static void read_sgi_header(ByteIOContext *f, SGIInfo *info)
+{
+ info->magic = (unsigned short) get_be16(f);
+ info->rle = get_byte(f);
+ info->bytes_per_channel = get_byte(f);
+ info->dimension = (unsigned short)get_be16(f);
+ info->xsize = (unsigned short) get_be16(f);
+ info->ysize = (unsigned short) get_be16(f);
+ info->zsize = (unsigned short) get_be16(f);
+
+ if(info->zsize > 4096)
+ info->zsize= 0;
+
+#ifdef DEBUG
+ printf("sgi header fields:\n");
+ printf(" magic: %d\n", info->magic);
+ printf(" rle: %d\n", info->rle);
+ printf(" bpc: %d\n", info->bytes_per_channel);
+ printf(" dim: %d\n", info->dimension);
+ printf(" xsize: %d\n", info->xsize);
+ printf(" ysize: %d\n", info->ysize);
+ printf(" zsize: %d\n", info->zsize);
+#endif
+
+ return;
+}
+
+
+/* read an uncompressed sgi image */
+static int read_uncompressed_sgi(const SGIInfo *si,
+ AVPicture *pict, ByteIOContext *f)
+{
+ int x, y, z, chan_offset, ret = 0;
+ uint8_t *dest_row;
+
+ /* skip header */
+ url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
+
+ pict->linesize[0] = si->xsize;
+
+ for (z = 0; z < si->zsize; z++) {
+
+#ifndef WORDS_BIGENDIAN
+ /* rgba -> bgra for rgba32 on little endian cpus */
+ if (si->zsize == 4 && z != 3)
+ chan_offset = 2 - z;
+ else
+#endif
+ chan_offset = z;
+
+ for (y = si->ysize - 1; y >= 0; y--) {
+ dest_row = pict->data[0] + (y * si->xsize * si->zsize);
+
+ for (x = 0; x < si->xsize; x++) {
+ dest_row[chan_offset] = get_byte(f);
+ dest_row += si->zsize;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+/* expand an rle row into a channel */
+static int expand_rle_row(ByteIOContext *f, unsigned char *optr,
+ int chan_offset, int pixelstride)
+{
+ unsigned char pixel, count;
+ int length = 0;
+
+#ifndef WORDS_BIGENDIAN
+ /* rgba -> bgra for rgba32 on little endian cpus */
+ if (pixelstride == 4 && chan_offset != 3) {
+ chan_offset = 2 - chan_offset;
+ }
+#endif
+
+ optr += chan_offset;
+
+ while (1) {
+ pixel = get_byte(f);
+
+ if (!(count = (pixel & 0x7f))) {
+ return length;
+ }
+ if (pixel & 0x80) {
+ while (count--) {
+ *optr = get_byte(f);
+ length++;
+ optr += pixelstride;
+ }
+ } else {
+ pixel = get_byte(f);
+
+ while (count--) {
+ *optr = pixel;
+ length++;
+ optr += pixelstride;
+ }
+ }
+ }
+}
+
+
+/* read a run length encoded sgi image */
+static int read_rle_sgi(const SGIInfo *sgi_info,
+ AVPicture *pict, ByteIOContext *f)
+{
+ uint8_t *dest_row;
+ unsigned long *start_table;
+ int y, z, xsize, ysize, zsize, tablen;
+ long start_offset;
+ int ret = 0;
+
+ xsize = sgi_info->xsize;
+ ysize = sgi_info->ysize;
+ zsize = sgi_info->zsize;
+
+ /* skip header */
+ url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
+
+ /* size of rle offset and length tables */
+ tablen = ysize * zsize * sizeof(long);
+
+ start_table = (unsigned long *)av_malloc(tablen);
+
+ if (!get_buffer(f, (uint8_t *)start_table, tablen)) {
+ ret = AVERROR_IO;
+ goto fail;
+ }
+
+ /* skip run length table */
+ url_fseek(f, tablen, SEEK_CUR);
+
+ for (z = 0; z < zsize; z++) {
+ for (y = 0; y < ysize; y++) {
+ dest_row = pict->data[0] + (ysize - 1 - y) * (xsize * zsize);
+
+ start_offset = BE_32(&start_table[y + z * ysize]);
+
+ /* don't seek if already at the next rle start offset */
+ if (url_ftell(f) != start_offset) {
+ url_fseek(f, start_offset, SEEK_SET);
+ }
+
+ if (expand_rle_row(f, dest_row, z, zsize) != xsize) {
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ av_free(start_table);
+
+ return ret;
+}
+
+
+static int sgi_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ SGIInfo sgi_info, *s = &sgi_info;
+ AVImageInfo info1, *info = &info1;
+ int ret;
+
+ read_sgi_header(f, s);
+
+ if (s->bytes_per_channel != 1) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* check for supported image dimensions */
+ if (s->dimension != 2 && s->dimension != 3) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (s->zsize == SGI_GRAYSCALE) {
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (s->zsize == SGI_RGB) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else if (s->zsize == SGI_RGBA) {
+ info->pix_fmt = PIX_FMT_RGBA32;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+
+ info->width = s->xsize;
+ info->height = s->ysize;
+
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ if (s->rle) {
+ return read_rle_sgi(s, &info->pict, f);
+ } else {
+ return read_uncompressed_sgi(s, &info->pict, f);
+ }
+
+ return 0; /* not reached */
+}
+
+#ifdef CONFIG_MUXERS
+static void write_sgi_header(ByteIOContext *f, const SGIInfo *info)
+{
+ int i;
+
+ put_be16(f, SGI_MAGIC);
+ put_byte(f, info->rle);
+ put_byte(f, info->bytes_per_channel);
+ put_be16(f, info->dimension);
+ put_be16(f, info->xsize);
+ put_be16(f, info->ysize);
+ put_be16(f, info->zsize);
+
+ /* The rest are constant in this implementation */
+ put_be32(f, 0L); /* pixmin */
+ put_be32(f, 255L); /* pixmax */
+ put_be32(f, 0L); /* dummy */
+
+ /* name */
+ for (i = 0; i < 80; i++) {
+ put_byte(f, 0);
+ }
+
+ put_be32(f, 0L); /* colormap */
+
+ /* The rest of the 512 byte header is unused. */
+ for (i = 0; i < 404; i++) {
+ put_byte(f, 0);
+ }
+}
+
+
+static int rle_row(ByteIOContext *f, char *row, int stride, int rowsize)
+{
+ int length, count, i, x;
+ char *start, repeat = 0;
+
+ for (x = rowsize, length = 0; x > 0;) {
+ start = row;
+ row += (2 * stride);
+ x -= 2;
+
+ while (x > 0 && (row[-2 * stride] != row[-1 * stride] ||
+ row[-1 * stride] != row[0])) {
+ row += stride;
+ x--;
+ };
+
+ row -= (2 * stride);
+ x += 2;
+
+ count = (row - start) / stride;
+ while (count > 0) {
+ i = count > 126 ? 126 : count;
+ count -= i;
+
+ put_byte(f, 0x80 | i);
+ length++;
+
+ while (i > 0) {
+ put_byte(f, *start);
+ start += stride;
+ i--;
+ length++;
+ };
+ };
+
+ if (x <= 0) {
+ break;
+ }
+
+ start = row;
+ repeat = row[0];
+
+ row += stride;
+ x--;
+
+ while (x > 0 && *row == repeat) {
+ row += stride;
+ x--;
+ };
+
+ count = (row - start) / stride;
+ while (count > 0) {
+ i = count > 126 ? 126 : count;
+ count -= i;
+
+ put_byte(f, i);
+ length++;
+
+ put_byte(f, repeat);
+ length++;
+ };
+ };
+
+ length++;
+
+ put_byte(f, 0);
+ return (length);
+}
+
+
+static int sgi_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ SGIInfo sgi_info, *si = &sgi_info;
+ long *offsettab, *lengthtab;
+ int i, y, z;
+ int tablesize, chan_offset;
+ uint8_t *srcrow;
+
+ si->xsize = info->width;
+ si->ysize = info->height;
+ si->rle = 1;
+ si->bytes_per_channel = 1;
+
+ switch(info->pix_fmt) {
+ case PIX_FMT_GRAY8:
+ si->dimension = SGI_SINGLE_CHAN;
+ si->zsize = SGI_GRAYSCALE;
+ break;
+ case PIX_FMT_RGB24:
+ si->dimension = SGI_MULTI_CHAN;
+ si->zsize = SGI_RGB;
+ break;
+ case PIX_FMT_RGBA32:
+ si->dimension = SGI_MULTI_CHAN;
+ si->zsize = SGI_RGBA;
+ break;
+ default:
+ return AVERROR_INVALIDDATA;
+ }
+
+ write_sgi_header(pb, si);
+
+ tablesize = si->zsize * si->ysize * sizeof(long);
+
+ /* skip rle offset and length tables, write them at the end. */
+ url_fseek(pb, tablesize * 2, SEEK_CUR);
+ put_flush_packet(pb);
+
+ lengthtab = av_malloc(tablesize);
+ offsettab = av_malloc(tablesize);
+
+ for (z = 0; z < si->zsize; z++) {
+
+#ifndef WORDS_BIGENDIAN
+ /* rgba -> bgra for rgba32 on little endian cpus */
+ if (si->zsize == 4 && z != 3)
+ chan_offset = 2 - z;
+ else
+#endif
+ chan_offset = z;
+
+ srcrow = info->pict.data[0] + chan_offset;
+
+ for (y = si->ysize -1; y >= 0; y--) {
+ offsettab[(z * si->ysize) + y] = url_ftell(pb);
+ lengthtab[(z * si->ysize) + y] = rle_row(pb, srcrow,
+ si->zsize, si->xsize);
+ srcrow += info->pict.linesize[0];
+ }
+ }
+
+ url_fseek(pb, 512, SEEK_SET);
+
+ /* write offset table */
+ for (i = 0; i < (si->ysize * si->zsize); i++) {
+ put_be32(pb, offsettab[i]);
+ }
+
+ /* write length table */
+ for (i = 0; i < (si->ysize * si->zsize); i++) {
+ put_be32(pb, lengthtab[i]);
+ }
+
+ put_flush_packet(pb);
+
+ av_free(lengthtab);
+ av_free(offsettab);
+
+ return 0;
+}
+#endif // CONFIG_MUXERS
+
+AVImageFormat sgi_image_format = {
+ "sgi",
+ "sgi,rgb,rgba,bw",
+ sgi_probe,
+ sgi_read,
+ (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGBA32),
+#ifdef CONFIG_MUXERS
+ sgi_write,
+#else
+ NULL,
+#endif // CONFIG_MUXERS
+};
diff --git a/contrib/ffmpeg/libavformat/sierravmd.c b/contrib/ffmpeg/libavformat/sierravmd.c
new file mode 100644
index 000000000..92dbce91d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/sierravmd.c
@@ -0,0 +1,302 @@
+/*
+ * Sierra VMD Format Demuxer
+ * Copyright (c) 2004 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file sierravmd.c
+ * Sierra VMD file demuxer
+ * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
+ * for more information on the Sierra VMD file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define VMD_HEADER_SIZE 0x0330
+#define BYTES_PER_FRAME_RECORD 16
+
+typedef struct {
+ int stream_index;
+ offset_t frame_offset;
+ unsigned int frame_size;
+ int64_t pts;
+ int keyframe;
+ unsigned char frame_record[BYTES_PER_FRAME_RECORD];
+} vmd_frame_t;
+
+typedef struct VmdDemuxContext {
+ int video_stream_index;
+ int audio_stream_index;
+
+ unsigned int frame_count;
+ unsigned int frames_per_block;
+ vmd_frame_t *frame_table;
+ unsigned int current_frame;
+
+ int sample_rate;
+ int64_t audio_sample_counter;
+ int skiphdr;
+
+ unsigned char vmd_header[VMD_HEADER_SIZE];
+} VmdDemuxContext;
+
+static int vmd_probe(AVProbeData *p)
+{
+ if (p->buf_size < 2)
+ return 0;
+
+ /* check if the first 2 bytes of the file contain the appropriate size
+ * of a VMD header chunk */
+ if (LE_16(&p->buf[0]) != VMD_HEADER_SIZE - 2)
+ return 0;
+
+ /* only return half certainty since this check is a bit sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int vmd_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st, *vst;
+ unsigned int toc_offset;
+ unsigned char *raw_frame_table;
+ int raw_frame_table_size;
+ offset_t current_offset;
+ int i, j;
+ unsigned int total_frames;
+ int64_t pts_inc = 1;
+ int64_t current_video_pts = 0, current_audio_pts = 0;
+ unsigned char chunk[BYTES_PER_FRAME_RECORD];
+ int num, den;
+ int sound_buffers;
+
+ /* fetch the main header, including the 2 header length bytes */
+ url_fseek(pb, 0, SEEK_SET);
+ if (get_buffer(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE)
+ return AVERROR_IO;
+
+ /* start up the decoders */
+ vst = av_new_stream(s, 0);
+ if (!vst)
+ return AVERROR_NOMEM;
+ av_set_pts_info(vst, 33, 1, 10);
+ vmd->video_stream_index = vst->index;
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_VMDVIDEO;
+ vst->codec->codec_tag = 0; /* no fourcc */
+ vst->codec->width = LE_16(&vmd->vmd_header[12]);
+ vst->codec->height = LE_16(&vmd->vmd_header[14]);
+ vst->codec->extradata_size = VMD_HEADER_SIZE;
+ vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);
+
+ /* if sample rate is 0, assume no audio */
+ vmd->sample_rate = LE_16(&vmd->vmd_header[804]);
+ if (vmd->sample_rate) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ vmd->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_VMDAUDIO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
+ st->codec->sample_rate = vmd->sample_rate;
+ st->codec->block_align = LE_16(&vmd->vmd_header[806]);
+ if (st->codec->block_align & 0x8000) {
+ st->codec->bits_per_sample = 16;
+ st->codec->block_align = -(st->codec->block_align - 0x10000);
+ } else {
+ st->codec->bits_per_sample = 8;
+ }
+ st->codec->bit_rate = st->codec->sample_rate *
+ st->codec->bits_per_sample * st->codec->channels;
+
+ /* calculate pts */
+ num = st->codec->block_align;
+ den = st->codec->sample_rate * st->codec->channels;
+ av_reduce(&den, &num, den, num, (1UL<<31)-1);
+ av_set_pts_info(vst, 33, num, den);
+ av_set_pts_info(st, 33, num, den);
+ pts_inc = num;
+ }
+
+ toc_offset = LE_32(&vmd->vmd_header[812]);
+ vmd->frame_count = LE_16(&vmd->vmd_header[6]);
+ vmd->frames_per_block = LE_16(&vmd->vmd_header[18]);
+ url_fseek(pb, toc_offset, SEEK_SET);
+
+ raw_frame_table = NULL;
+ vmd->frame_table = NULL;
+ sound_buffers = LE_16(&vmd->vmd_header[808]);
+ raw_frame_table_size = vmd->frame_count * 6;
+ raw_frame_table = av_malloc(raw_frame_table_size);
+ if(vmd->frame_count * vmd->frames_per_block >= UINT_MAX / sizeof(vmd_frame_t)){
+ av_log(s, AV_LOG_ERROR, "vmd->frame_count * vmd->frames_per_block too large\n");
+ return -1;
+ }
+ vmd->frame_table = av_malloc((vmd->frame_count * vmd->frames_per_block + sound_buffers) * sizeof(vmd_frame_t));
+ if (!raw_frame_table || !vmd->frame_table) {
+ av_free(raw_frame_table);
+ av_free(vmd->frame_table);
+ return AVERROR_NOMEM;
+ }
+ if (get_buffer(pb, raw_frame_table, raw_frame_table_size) !=
+ raw_frame_table_size) {
+ av_free(raw_frame_table);
+ av_free(vmd->frame_table);
+ return AVERROR_IO;
+ }
+
+ total_frames = 0;
+ for (i = 0; i < vmd->frame_count; i++) {
+
+ current_offset = LE_32(&raw_frame_table[6 * i + 2]);
+
+ /* handle each entry in index block */
+ for (j = 0; j < vmd->frames_per_block; j++) {
+ int type;
+ uint32_t size;
+
+ get_buffer(pb, chunk, BYTES_PER_FRAME_RECORD);
+ type = chunk[0];
+ size = LE_32(&chunk[2]);
+ if(!size)
+ continue;
+ switch(type) {
+ case 1: /* Audio Chunk */
+ /* first audio chunk contains several audio buffers */
+ if(current_audio_pts){
+ vmd->frame_table[total_frames].frame_offset = current_offset;
+ vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
+ vmd->frame_table[total_frames].frame_size = size;
+ memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
+ vmd->frame_table[total_frames].pts = current_audio_pts;
+ total_frames++;
+ current_audio_pts += pts_inc;
+ }else{
+ uint32_t flags;
+ int k;
+ int noff;
+ int64_t pos;
+
+ pos = url_ftell(pb);
+ url_fseek(pb, current_offset, SEEK_SET);
+ flags = get_le32(pb);
+ noff = 4;
+ url_fseek(pb, pos, SEEK_SET);
+ av_log(s, AV_LOG_DEBUG, "Sound mapping = %08X (%i bufs)\n", flags, sound_buffers);
+ for(k = 0; k < sound_buffers - 1; k++){
+ if(flags & 1) { /* silent block */
+ vmd->frame_table[total_frames].frame_size = 0;
+ }else{
+ vmd->frame_table[total_frames].frame_size = st->codec->block_align + (st->codec->block_align & 1);
+ }
+ noff += vmd->frame_table[total_frames].frame_size;
+ vmd->frame_table[total_frames].frame_offset = current_offset + noff;
+ vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
+ memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
+ vmd->frame_table[total_frames].pts = current_audio_pts;
+ total_frames++;
+ current_audio_pts += pts_inc;
+ flags >>= 1;
+ }
+ }
+ break;
+ case 2: /* Video Chunk */
+ vmd->frame_table[total_frames].frame_offset = current_offset;
+ vmd->frame_table[total_frames].stream_index = vmd->video_stream_index;
+ vmd->frame_table[total_frames].frame_size = size;
+ memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
+ vmd->frame_table[total_frames].pts = current_video_pts;
+ total_frames++;
+ break;
+ }
+ current_offset += size;
+ }
+ current_video_pts += pts_inc;
+ }
+
+ av_free(raw_frame_table);
+
+ vmd->current_frame = 0;
+ vmd->frame_count = total_frames;
+
+ return 0;
+}
+
+static int vmd_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = 0;
+ vmd_frame_t *frame;
+
+ if (vmd->current_frame >= vmd->frame_count)
+ return AVERROR_IO;
+
+ frame = &vmd->frame_table[vmd->current_frame];
+ /* position the stream (will probably be there already) */
+ url_fseek(pb, frame->frame_offset, SEEK_SET);
+
+ if (av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD))
+ return AVERROR_NOMEM;
+ pkt->pos= url_ftell(pb);
+ memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD);
+ ret = get_buffer(pb, pkt->data + BYTES_PER_FRAME_RECORD,
+ frame->frame_size);
+
+ if (ret != frame->frame_size) {
+ av_free_packet(pkt);
+ ret = AVERROR_IO;
+ }
+ pkt->stream_index = frame->stream_index;
+ pkt->pts = frame->pts;
+ av_log(NULL, AV_LOG_INFO, " dispatching %s frame with %d bytes and pts %"PRId64"\n",
+ (frame->frame_record[0] == 0x02) ? "video" : "audio",
+ frame->frame_size + BYTES_PER_FRAME_RECORD,
+ pkt->pts);
+
+ vmd->current_frame++;
+
+ return ret;
+}
+
+static int vmd_read_close(AVFormatContext *s)
+{
+ VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
+
+ av_free(vmd->frame_table);
+
+ return 0;
+}
+
+AVInputFormat vmd_demuxer = {
+ "vmd",
+ "Sierra VMD format",
+ sizeof(VmdDemuxContext),
+ vmd_probe,
+ vmd_read_header,
+ vmd_read_packet,
+ vmd_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/smacker.c b/contrib/ffmpeg/libavformat/smacker.c
new file mode 100644
index 000000000..a08bd2d9f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/smacker.c
@@ -0,0 +1,345 @@
+/*
+ * Smacker demuxer
+ * Copyright (c) 2006 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Based on http://wiki.multimedia.cx/index.php?title=Smacker
+ */
+
+#include "avformat.h"
+#include "riff.h"
+#include "bswap.h"
+
+#define SMACKER_PAL 0x01
+
+enum SAudFlags {
+ SMK_AUD_PACKED = 0x80000000,
+ SMK_AUD_16BITS = 0x20000000,
+ SMK_AUD_STEREO = 0x10000000,
+ SMK_AUD_BINKAUD = 0x08000000,
+ SMK_AUD_USEDCT = 0x04000000
+};
+
+typedef struct SmackerContext {
+ /* Smacker file header */
+ uint32_t magic;
+ uint32_t width, height;
+ uint32_t frames;
+ int pts_inc;
+ uint32_t flags;
+ uint32_t audio[7];
+ uint32_t treesize;
+ uint32_t mmap_size, mclr_size, full_size, type_size;
+ uint32_t rates[7];
+ uint32_t pad;
+ /* frame info */
+ uint32_t *frm_size;
+ uint8_t *frm_flags;
+ /* internal variables */
+ int cur_frame;
+ int is_ver4;
+ int64_t cur_pts;
+ /* current frame for demuxing */
+ uint8_t pal[768];
+ int indexes[7];
+ int videoindex;
+ uint8_t *bufs[7];
+ int buf_sizes[7];
+ int stream_id[7];
+ int curstream;
+ offset_t nextpos;
+ int64_t aud_pts[7];
+} SmackerContext;
+
+typedef struct SmackerFrame {
+ int64_t pts;
+ int stream;
+} SmackerFrame;
+
+/* palette used in Smacker */
+static const uint8_t smk_pal[64] = {
+ 0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x18, 0x1C,
+ 0x20, 0x24, 0x28, 0x2C, 0x30, 0x34, 0x38, 0x3C,
+ 0x41, 0x45, 0x49, 0x4D, 0x51, 0x55, 0x59, 0x5D,
+ 0x61, 0x65, 0x69, 0x6D, 0x71, 0x75, 0x79, 0x7D,
+ 0x82, 0x86, 0x8A, 0x8E, 0x92, 0x96, 0x9A, 0x9E,
+ 0xA2, 0xA6, 0xAA, 0xAE, 0xB2, 0xB6, 0xBA, 0xBE,
+ 0xC3, 0xC7, 0xCB, 0xCF, 0xD3, 0xD7, 0xDB, 0xDF,
+ 0xE3, 0xE7, 0xEB, 0xEF, 0xF3, 0xF7, 0xFB, 0xFF
+};
+
+
+static int smacker_probe(AVProbeData *p)
+{
+ if (p->buf_size < 4)
+ return 0;
+ if(p->buf[0] == 'S' && p->buf[1] == 'M' && p->buf[2] == 'K'
+ && (p->buf[3] == '2' || p->buf[3] == '4'))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int smacker_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ SmackerContext *smk = (SmackerContext *)s->priv_data;
+ AVStream *st, *ast[7];
+ int i, ret;
+ int tbase;
+
+ /* read and check header */
+ smk->magic = get_le32(pb);
+ if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4'))
+ return -1;
+ smk->width = get_le32(pb);
+ smk->height = get_le32(pb);
+ smk->frames = get_le32(pb);
+ smk->pts_inc = (int32_t)get_le32(pb);
+ smk->flags = get_le32(pb);
+ for(i = 0; i < 7; i++)
+ smk->audio[i] = get_le32(pb);
+ smk->treesize = get_le32(pb);
+
+ if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant)
+ av_log(s, AV_LOG_ERROR, "treesize too large\n");
+ return -1;
+ }
+
+//FIXME remove extradata "rebuilding"
+ smk->mmap_size = get_le32(pb);
+ smk->mclr_size = get_le32(pb);
+ smk->full_size = get_le32(pb);
+ smk->type_size = get_le32(pb);
+ for(i = 0; i < 7; i++)
+ smk->rates[i] = get_le32(pb);
+ smk->pad = get_le32(pb);
+ /* setup data */
+ if(smk->frames > 0xFFFFFF) {
+ av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames);
+ return -1;
+ }
+ smk->frm_size = av_malloc(smk->frames * 4);
+ smk->frm_flags = av_malloc(smk->frames);
+
+ smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2'));
+
+ /* read frame info */
+ for(i = 0; i < smk->frames; i++) {
+ smk->frm_size[i] = get_le32(pb);
+ }
+ for(i = 0; i < smk->frames; i++) {
+ smk->frm_flags[i] = get_byte(pb);
+ }
+
+ /* init video codec */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ smk->videoindex = st->index;
+ st->codec->width = smk->width;
+ st->codec->height = smk->height;
+ st->codec->pix_fmt = PIX_FMT_PAL8;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_SMACKVIDEO;
+ st->codec->codec_tag = smk->magic;
+ /* Smacker uses 100000 as internal timebase */
+ if(smk->pts_inc < 0)
+ smk->pts_inc = -smk->pts_inc;
+ else
+ smk->pts_inc *= 100;
+ tbase = 100000;
+ av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1);
+ av_set_pts_info(st, 33, smk->pts_inc, tbase);
+ /* handle possible audio streams */
+ for(i = 0; i < 7; i++) {
+ smk->indexes[i] = -1;
+ if((smk->rates[i] & 0xFFFFFF) && !(smk->rates[i] & SMK_AUD_BINKAUD)){
+ ast[i] = av_new_stream(s, 0);
+ smk->indexes[i] = ast[i]->index;
+ ast[i]->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast[i]->codec->codec_id = (smk->rates[i] & SMK_AUD_PACKED) ? CODEC_ID_SMACKAUDIO : CODEC_ID_PCM_U8;
+ ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A');
+ ast[i]->codec->channels = (smk->rates[i] & SMK_AUD_STEREO) ? 2 : 1;
+ ast[i]->codec->sample_rate = smk->rates[i] & 0xFFFFFF;
+ ast[i]->codec->bits_per_sample = (smk->rates[i] & SMK_AUD_16BITS) ? 16 : 8;
+ if(ast[i]->codec->bits_per_sample == 16 && ast[i]->codec->codec_id == CODEC_ID_PCM_U8)
+ ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE;
+ av_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate
+ * ast[i]->codec->channels * ast[i]->codec->bits_per_sample / 8);
+ }
+ }
+
+
+ /* load trees to extradata, they will be unpacked by decoder */
+ st->codec->extradata = av_malloc(smk->treesize + 16);
+ st->codec->extradata_size = smk->treesize + 16;
+ if(!st->codec->extradata){
+ av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16);
+ av_free(smk->frm_size);
+ av_free(smk->frm_flags);
+ return -1;
+ }
+ ret = get_buffer(pb, st->codec->extradata + 16, st->codec->extradata_size - 16);
+ if(ret != st->codec->extradata_size - 16){
+ av_free(smk->frm_size);
+ av_free(smk->frm_flags);
+ return AVERROR_IO;
+ }
+ ((int32_t*)st->codec->extradata)[0] = le2me_32(smk->mmap_size);
+ ((int32_t*)st->codec->extradata)[1] = le2me_32(smk->mclr_size);
+ ((int32_t*)st->codec->extradata)[2] = le2me_32(smk->full_size);
+ ((int32_t*)st->codec->extradata)[3] = le2me_32(smk->type_size);
+
+ smk->curstream = -1;
+ smk->nextpos = url_ftell(pb);
+
+ return 0;
+}
+
+
+static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ SmackerContext *smk = (SmackerContext *)s->priv_data;
+ int flags;
+ int ret;
+ int i;
+ int frame_size = 0;
+ int palchange = 0;
+ int pos;
+
+ if (url_feof(&s->pb) || smk->cur_frame >= smk->frames)
+ return -EIO;
+
+ /* if we demuxed all streams, pass another frame */
+ if(smk->curstream < 0) {
+ url_fseek(&s->pb, smk->nextpos, 0);
+ frame_size = smk->frm_size[smk->cur_frame] & (~3);
+ flags = smk->frm_flags[smk->cur_frame];
+ /* handle palette change event */
+ pos = url_ftell(&s->pb);
+ if(flags & SMACKER_PAL){
+ int size, sz, t, off, j, pos;
+ uint8_t *pal = smk->pal;
+ uint8_t oldpal[768];
+
+ memcpy(oldpal, pal, 768);
+ size = get_byte(&s->pb);
+ size = size * 4 - 1;
+ frame_size -= size;
+ frame_size--;
+ sz = 0;
+ pos = url_ftell(&s->pb) + size;
+ while(sz < 256){
+ t = get_byte(&s->pb);
+ if(t & 0x80){ /* skip palette entries */
+ sz += (t & 0x7F) + 1;
+ pal += ((t & 0x7F) + 1) * 3;
+ } else if(t & 0x40){ /* copy with offset */
+ off = get_byte(&s->pb) * 3;
+ j = (t & 0x3F) + 1;
+ while(j-- && sz < 256) {
+ *pal++ = oldpal[off + 0];
+ *pal++ = oldpal[off + 1];
+ *pal++ = oldpal[off + 2];
+ sz++;
+ off += 3;
+ }
+ } else { /* new entries */
+ *pal++ = smk_pal[t];
+ *pal++ = smk_pal[get_byte(&s->pb) & 0x3F];
+ *pal++ = smk_pal[get_byte(&s->pb) & 0x3F];
+ sz++;
+ }
+ }
+ url_fseek(&s->pb, pos, 0);
+ palchange |= 1;
+ }
+ flags >>= 1;
+ smk->curstream = -1;
+ /* if audio chunks are present, put them to stack and retrieve later */
+ for(i = 0; i < 7; i++) {
+ if(flags & 1) {
+ int size;
+ size = get_le32(&s->pb) - 4;
+ frame_size -= size;
+ frame_size -= 4;
+ smk->curstream++;
+ smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size);
+ smk->buf_sizes[smk->curstream] = size;
+ ret = get_buffer(&s->pb, smk->bufs[smk->curstream], size);
+ if(ret != size)
+ return AVERROR_IO;
+ smk->stream_id[smk->curstream] = smk->indexes[i];
+ }
+ flags >>= 1;
+ }
+ if (av_new_packet(pkt, frame_size + 768))
+ return AVERROR_NOMEM;
+ if(smk->frm_size[smk->cur_frame] & 1)
+ palchange |= 2;
+ pkt->data[0] = palchange;
+ memcpy(pkt->data + 1, smk->pal, 768);
+ ret = get_buffer(&s->pb, pkt->data + 769, frame_size);
+ if(ret != frame_size)
+ return AVERROR_IO;
+ pkt->stream_index = smk->videoindex;
+ pkt->size = ret + 769;
+ smk->cur_frame++;
+ smk->nextpos = url_ftell(&s->pb);
+ } else {
+ if (av_new_packet(pkt, smk->buf_sizes[smk->curstream]))
+ return AVERROR_NOMEM;
+ memcpy(pkt->data, smk->bufs[smk->curstream], smk->buf_sizes[smk->curstream]);
+ pkt->size = smk->buf_sizes[smk->curstream];
+ pkt->stream_index = smk->stream_id[smk->curstream];
+ pkt->pts = smk->aud_pts[smk->curstream];
+ smk->aud_pts[smk->curstream] += LE_32(pkt->data);
+ smk->curstream--;
+ }
+
+ return 0;
+}
+
+static int smacker_read_close(AVFormatContext *s)
+{
+ SmackerContext *smk = (SmackerContext *)s->priv_data;
+ int i;
+
+ for(i = 0; i < 7; i++)
+ if(smk->bufs[i])
+ av_free(smk->bufs[i]);
+ if(smk->frm_size)
+ av_free(smk->frm_size);
+ if(smk->frm_flags)
+ av_free(smk->frm_flags);
+
+ return 0;
+}
+
+AVInputFormat smacker_demuxer = {
+ "smk",
+ "Smacker Video",
+ sizeof(SmackerContext),
+ smacker_probe,
+ smacker_read_header,
+ smacker_read_packet,
+ smacker_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/sol.c b/contrib/ffmpeg/libavformat/sol.c
new file mode 100644
index 000000000..20e45f75d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/sol.c
@@ -0,0 +1,160 @@
+/*
+ * Sierra SOL demuxer
+ * Copyright Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Based on documents from Game Audio Player and own research
+ */
+
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+#include "bswap.h"
+
+/* if we don't know the size in advance */
+#define AU_UNKOWN_SIZE ((uint32_t)(~0))
+
+static int sol_probe(AVProbeData *p)
+{
+ /* check file header */
+ uint16_t magic;
+ if (p->buf_size <= 14)
+ return 0;
+ magic=le2me_16(*((uint16_t*)p->buf));
+ if ((magic == 0x0B8D || magic == 0x0C0D || magic == 0x0C8D) &&
+ p->buf[2] == 'S' && p->buf[3] == 'O' &&
+ p->buf[4] == 'L' && p->buf[5] == 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+#define SOL_DPCM 1
+#define SOL_16BIT 4
+#define SOL_STEREO 16
+
+static int sol_codec_id(int magic, int type)
+{
+ if (magic == 0x0B8D)
+ {
+ if (type & SOL_DPCM) return CODEC_ID_SOL_DPCM;
+ else return CODEC_ID_PCM_U8;
+ }
+ if (type & SOL_DPCM)
+ {
+ if (type & SOL_16BIT) return CODEC_ID_SOL_DPCM;
+ else if (magic == 0x0C8D) return CODEC_ID_SOL_DPCM;
+ else return CODEC_ID_SOL_DPCM;
+ }
+ if (type & SOL_16BIT) return CODEC_ID_PCM_S16LE;
+ return CODEC_ID_PCM_U8;
+}
+
+static int sol_codec_type(int magic, int type)
+{
+ if (magic == 0x0B8D) return 1;//SOL_DPCM_OLD;
+ if (type & SOL_DPCM)
+ {
+ if (type & SOL_16BIT) return 3;//SOL_DPCM_NEW16;
+ else if (magic == 0x0C8D) return 1;//SOL_DPCM_OLD;
+ else return 2;//SOL_DPCM_NEW8;
+ }
+ return -1;
+}
+
+static int sol_channels(int magic, int type)
+{
+ if (magic == 0x0B8D || !(type & SOL_STEREO)) return 1;
+ return 2;
+}
+
+static int sol_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size;
+ unsigned int magic,tag;
+ ByteIOContext *pb = &s->pb;
+ unsigned int id, codec, channels, rate, type;
+ AVStream *st;
+
+ /* check ".snd" header */
+ magic = get_le16(pb);
+ tag = get_le32(pb);
+ if (tag != MKTAG('S', 'O', 'L', 0))
+ return -1;
+ rate = get_le16(pb);
+ type = get_byte(pb);
+ size = get_le32(pb);
+ if (magic != 0x0B8D)
+ get_byte(pb); /* newer SOLs contain padding byte */
+
+ codec = sol_codec_id(magic, type);
+ channels = sol_channels(magic, type);
+
+ if (codec == CODEC_ID_SOL_DPCM)
+ id = sol_codec_type(magic, type);
+ else id = 0;
+
+ /* now we are ready: build format streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = id;
+ st->codec->codec_id = codec;
+ st->codec->channels = channels;
+ st->codec->sample_rate = rate;
+ av_set_pts_info(st, 64, 1, rate);
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int sol_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret;
+
+ if (url_feof(&s->pb))
+ return -EIO;
+ ret= av_get_packet(&s->pb, pkt, MAX_SIZE);
+ pkt->stream_index = 0;
+
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return 0;
+}
+
+static int sol_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat sol_demuxer = {
+ "sol",
+ "Sierra SOL Format",
+ 0,
+ sol_probe,
+ sol_read_header,
+ sol_read_packet,
+ sol_read_close,
+ pcm_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/swf.c b/contrib/ffmpeg/libavformat/swf.c
new file mode 100644
index 000000000..6029e3678
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/swf.c
@@ -0,0 +1,944 @@
+/*
+ * Flash Compatible Streaming Format
+ * Copyright (c) 2000 Fabrice Bellard.
+ * Copyright (c) 2003 Tinic Uro.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+#include "riff.h" /* for CodecTag */
+
+/* should have a generic way to indicate probable size */
+#define DUMMY_FILE_SIZE (100 * 1024 * 1024)
+#define DUMMY_DURATION 600 /* in seconds */
+
+#define TAG_END 0
+#define TAG_SHOWFRAME 1
+#define TAG_DEFINESHAPE 2
+#define TAG_FREECHARACTER 3
+#define TAG_PLACEOBJECT 4
+#define TAG_REMOVEOBJECT 5
+#define TAG_STREAMHEAD 18
+#define TAG_STREAMBLOCK 19
+#define TAG_JPEG2 21
+#define TAG_PLACEOBJECT2 26
+#define TAG_STREAMHEAD2 45
+#define TAG_VIDEOSTREAM 60
+#define TAG_VIDEOFRAME 61
+
+#define TAG_LONG 0x100
+
+/* flags for shape definition */
+#define FLAG_MOVETO 0x01
+#define FLAG_SETFILL0 0x02
+#define FLAG_SETFILL1 0x04
+
+#define AUDIO_FIFO_SIZE 65536
+
+/* character id used */
+#define BITMAP_ID 0
+#define VIDEO_ID 0
+#define SHAPE_ID 1
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct {
+
+ offset_t duration_pos;
+ offset_t tag_pos;
+
+ int samples_per_frame;
+ int sound_samples;
+ int video_samples;
+ int swf_frame_number;
+ int video_frame_number;
+ int ms_per_frame;
+ int ch_id;
+ int tag;
+
+ uint8_t *audio_fifo;
+ int audio_in_pos;
+ int audio_out_pos;
+ int audio_size;
+
+ int video_type;
+ int audio_type;
+} SWFContext;
+
+static const CodecTag swf_codec_tags[] = {
+ {CODEC_ID_FLV1, 0x02},
+ {CODEC_ID_VP6F, 0x04},
+ {0, 0},
+};
+
+static const int sSampleRates[3][4] = {
+ {44100, 48000, 32000, 0},
+ {22050, 24000, 16000, 0},
+ {11025, 12000, 8000, 0},
+};
+
+static const int sBitRates[2][3][15] = {
+ { { 0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448},
+ { 0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384},
+ { 0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320}
+ },
+ { { 0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256},
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160},
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}
+ },
+};
+
+static const int sSamplesPerFrame[3][3] =
+{
+ { 384, 1152, 1152 },
+ { 384, 1152, 576 },
+ { 384, 1152, 576 }
+};
+
+static const int sBitsPerSlot[3] = {
+ 32,
+ 8,
+ 8
+};
+
+static int swf_mp3_info(void *data, int *byteSize, int *samplesPerFrame, int *sampleRate, int *isMono )
+{
+ uint8_t *dataTmp = (uint8_t *)data;
+ uint32_t header = ( (uint32_t)dataTmp[0] << 24 ) | ( (uint32_t)dataTmp[1] << 16 ) | ( (uint32_t)dataTmp[2] << 8 ) | (uint32_t)dataTmp[3];
+ int layerID = 3 - ((header >> 17) & 0x03);
+ int bitRateID = ((header >> 12) & 0x0f);
+ int sampleRateID = ((header >> 10) & 0x03);
+ int bitRate = 0;
+ int bitsPerSlot = sBitsPerSlot[layerID];
+ int isPadded = ((header >> 9) & 0x01);
+
+ if ( (( header >> 21 ) & 0x7ff) != 0x7ff ) {
+ return 0;
+ }
+
+ *isMono = ((header >> 6) & 0x03) == 0x03;
+
+ if ( (header >> 19 ) & 0x01 ) {
+ *sampleRate = sSampleRates[0][sampleRateID];
+ bitRate = sBitRates[0][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[0][layerID];
+ } else {
+ if ( (header >> 20) & 0x01 ) {
+ *sampleRate = sSampleRates[1][sampleRateID];
+ bitRate = sBitRates[1][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[1][layerID];
+ } else {
+ *sampleRate = sSampleRates[2][sampleRateID];
+ bitRate = sBitRates[1][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[2][layerID];
+ }
+ }
+
+ *byteSize = ( ( ( ( *samplesPerFrame * (bitRate / bitsPerSlot) ) / *sampleRate ) + isPadded ) );
+
+ return 1;
+}
+
+#ifdef CONFIG_MUXERS
+static void put_swf_tag(AVFormatContext *s, int tag)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ swf->tag_pos = url_ftell(pb);
+ swf->tag = tag;
+ /* reserve some room for the tag */
+ if (tag & TAG_LONG) {
+ put_le16(pb, 0);
+ put_le32(pb, 0);
+ } else {
+ put_le16(pb, 0);
+ }
+}
+
+static void put_swf_end_tag(AVFormatContext *s)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t pos;
+ int tag_len, tag;
+
+ pos = url_ftell(pb);
+ tag_len = pos - swf->tag_pos - 2;
+ tag = swf->tag;
+ url_fseek(pb, swf->tag_pos, SEEK_SET);
+ if (tag & TAG_LONG) {
+ tag &= ~TAG_LONG;
+ put_le16(pb, (tag << 6) | 0x3f);
+ put_le32(pb, tag_len - 4);
+ } else {
+ assert(tag_len < 0x3f);
+ put_le16(pb, (tag << 6) | tag_len);
+ }
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+static inline void max_nbits(int *nbits_ptr, int val)
+{
+ int n;
+
+ if (val == 0)
+ return;
+ val = abs(val);
+ n = 1;
+ while (val != 0) {
+ n++;
+ val >>= 1;
+ }
+ if (n > *nbits_ptr)
+ *nbits_ptr = n;
+}
+
+static void put_swf_rect(ByteIOContext *pb,
+ int xmin, int xmax, int ymin, int ymax)
+{
+ PutBitContext p;
+ uint8_t buf[256];
+ int nbits, mask;
+
+ init_put_bits(&p, buf, sizeof(buf));
+
+ nbits = 0;
+ max_nbits(&nbits, xmin);
+ max_nbits(&nbits, xmax);
+ max_nbits(&nbits, ymin);
+ max_nbits(&nbits, ymax);
+ mask = (1 << nbits) - 1;
+
+ /* rectangle info */
+ put_bits(&p, 5, nbits);
+ put_bits(&p, nbits, xmin & mask);
+ put_bits(&p, nbits, xmax & mask);
+ put_bits(&p, nbits, ymin & mask);
+ put_bits(&p, nbits, ymax & mask);
+
+ flush_put_bits(&p);
+ put_buffer(pb, buf, pbBufPtr(&p) - p.buf);
+}
+
+static void put_swf_line_edge(PutBitContext *pb, int dx, int dy)
+{
+ int nbits, mask;
+
+ put_bits(pb, 1, 1); /* edge */
+ put_bits(pb, 1, 1); /* line select */
+ nbits = 2;
+ max_nbits(&nbits, dx);
+ max_nbits(&nbits, dy);
+
+ mask = (1 << nbits) - 1;
+ put_bits(pb, 4, nbits - 2); /* 16 bits precision */
+ if (dx == 0) {
+ put_bits(pb, 1, 0);
+ put_bits(pb, 1, 1);
+ put_bits(pb, nbits, dy & mask);
+ } else if (dy == 0) {
+ put_bits(pb, 1, 0);
+ put_bits(pb, 1, 0);
+ put_bits(pb, nbits, dx & mask);
+ } else {
+ put_bits(pb, 1, 1);
+ put_bits(pb, nbits, dx & mask);
+ put_bits(pb, nbits, dy & mask);
+ }
+}
+
+#define FRAC_BITS 16
+
+/* put matrix */
+static void put_swf_matrix(ByteIOContext *pb,
+ int a, int b, int c, int d, int tx, int ty)
+{
+ PutBitContext p;
+ uint8_t buf[256];
+ int nbits;
+
+ init_put_bits(&p, buf, sizeof(buf));
+
+ put_bits(&p, 1, 1); /* a, d present */
+ nbits = 1;
+ max_nbits(&nbits, a);
+ max_nbits(&nbits, d);
+ put_bits(&p, 5, nbits); /* nb bits */
+ put_bits(&p, nbits, a);
+ put_bits(&p, nbits, d);
+
+ put_bits(&p, 1, 1); /* b, c present */
+ nbits = 1;
+ max_nbits(&nbits, c);
+ max_nbits(&nbits, b);
+ put_bits(&p, 5, nbits); /* nb bits */
+ put_bits(&p, nbits, c);
+ put_bits(&p, nbits, b);
+
+ nbits = 1;
+ max_nbits(&nbits, tx);
+ max_nbits(&nbits, ty);
+ put_bits(&p, 5, nbits); /* nb bits */
+ put_bits(&p, nbits, tx);
+ put_bits(&p, nbits, ty);
+
+ flush_put_bits(&p);
+ put_buffer(pb, buf, pbBufPtr(&p) - p.buf);
+}
+
+/* */
+static int swf_write_header(AVFormatContext *s)
+{
+ SWFContext *swf;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc, *audio_enc, *video_enc;
+ PutBitContext p;
+ uint8_t buf1[256];
+ int i, width, height, rate, rate_base;
+
+ swf = av_malloc(sizeof(SWFContext));
+ if (!swf)
+ return -1;
+ s->priv_data = swf;
+
+ swf->ch_id = -1;
+ swf->audio_in_pos = 0;
+ swf->audio_out_pos = 0;
+ swf->audio_size = 0;
+ swf->audio_fifo = av_malloc(AUDIO_FIFO_SIZE);
+ swf->sound_samples = 0;
+ swf->video_samples = 0;
+ swf->swf_frame_number = 0;
+ swf->video_frame_number = 0;
+
+ video_enc = NULL;
+ audio_enc = NULL;
+ for(i=0;i<s->nb_streams;i++) {
+ enc = s->streams[i]->codec;
+ if (enc->codec_type == CODEC_TYPE_AUDIO)
+ audio_enc = enc;
+ else {
+ if ( enc->codec_id == CODEC_ID_VP6F ||
+ enc->codec_id == CODEC_ID_FLV1 ||
+ enc->codec_id == CODEC_ID_MJPEG ) {
+ video_enc = enc;
+ } else {
+ av_log(enc, AV_LOG_ERROR, "SWF only supports VP6, FLV1 and MJPEG\n");
+ return -1;
+ }
+ }
+ }
+
+ if (!video_enc) {
+ /* currenty, cannot work correctly if audio only */
+ swf->video_type = 0;
+ width = 320;
+ height = 200;
+ rate = 10;
+ rate_base= 1;
+ } else {
+ swf->video_type = video_enc->codec_id;
+ width = video_enc->width;
+ height = video_enc->height;
+ rate = video_enc->time_base.den;
+ rate_base = video_enc->time_base.num;
+ }
+
+ if (!audio_enc ) {
+ swf->audio_type = 0;
+ swf->samples_per_frame = ( 44100. * rate_base ) / rate;
+ } else {
+ swf->audio_type = audio_enc->codec_id;
+ swf->samples_per_frame = ( ( audio_enc->sample_rate ) * rate_base ) / rate;
+ }
+
+ put_tag(pb, "FWS");
+ if ( video_enc && video_enc->codec_id == CODEC_ID_VP6F ) {
+ put_byte(pb, 8); /* version (version 8 and above support VP6 codec) */
+ } else if ( video_enc && video_enc->codec_id == CODEC_ID_FLV1 ) {
+ put_byte(pb, 6); /* version (version 6 and above support FLV1 codec) */
+ } else {
+ put_byte(pb, 4); /* version (should use 4 for mpeg audio support) */
+ }
+ put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
+ (will be patched if not streamed) */
+
+ put_swf_rect(pb, 0, width * 20, 0, height * 20);
+ put_le16(pb, (rate * 256) / rate_base); /* frame rate */
+ swf->duration_pos = url_ftell(pb);
+ put_le16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */
+
+ /* define a shape with the jpeg inside */
+ if ( video_enc && (video_enc->codec_id == CODEC_ID_VP6F ||
+ video_enc->codec_id == CODEC_ID_FLV1 )) {
+ } else if ( video_enc && video_enc->codec_id == CODEC_ID_MJPEG ) {
+ put_swf_tag(s, TAG_DEFINESHAPE);
+
+ put_le16(pb, SHAPE_ID); /* ID of shape */
+ /* bounding rectangle */
+ put_swf_rect(pb, 0, width, 0, height);
+ /* style info */
+ put_byte(pb, 1); /* one fill style */
+ put_byte(pb, 0x41); /* clipped bitmap fill */
+ put_le16(pb, BITMAP_ID); /* bitmap ID */
+ /* position of the bitmap */
+ put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
+ 0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
+ put_byte(pb, 0); /* no line style */
+
+ /* shape drawing */
+ init_put_bits(&p, buf1, sizeof(buf1));
+ put_bits(&p, 4, 1); /* one fill bit */
+ put_bits(&p, 4, 0); /* zero line bit */
+
+ put_bits(&p, 1, 0); /* not an edge */
+ put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
+ put_bits(&p, 5, 1); /* nbits */
+ put_bits(&p, 1, 0); /* X */
+ put_bits(&p, 1, 0); /* Y */
+ put_bits(&p, 1, 1); /* set fill style 1 */
+
+ /* draw the rectangle ! */
+ put_swf_line_edge(&p, width, 0);
+ put_swf_line_edge(&p, 0, height);
+ put_swf_line_edge(&p, -width, 0);
+ put_swf_line_edge(&p, 0, -height);
+
+ /* end of shape */
+ put_bits(&p, 1, 0); /* not an edge */
+ put_bits(&p, 5, 0);
+
+ flush_put_bits(&p);
+ put_buffer(pb, buf1, pbBufPtr(&p) - p.buf);
+
+ put_swf_end_tag(s);
+ }
+
+ if (audio_enc && audio_enc->codec_id == CODEC_ID_MP3 ) {
+ int v;
+
+ /* start sound */
+ put_swf_tag(s, TAG_STREAMHEAD2);
+
+ v = 0;
+ switch(audio_enc->sample_rate) {
+ case 11025:
+ v |= 1 << 2;
+ break;
+ case 22050:
+ v |= 2 << 2;
+ break;
+ case 44100:
+ v |= 3 << 2;
+ break;
+ default:
+ /* not supported */
+ av_log(s, AV_LOG_ERROR, "swf doesnt support that sample rate, choose from (44100, 22050, 11025)\n");
+ av_free(swf->audio_fifo);
+ av_free(swf);
+ return -1;
+ }
+ v |= 0x02; /* 16 bit playback */
+ if (audio_enc->channels == 2)
+ v |= 0x01; /* stereo playback */
+ put_byte(&s->pb, v);
+ v |= 0x20; /* mp3 compressed */
+ put_byte(&s->pb, v);
+ put_le16(&s->pb, swf->samples_per_frame); /* avg samples per frame */
+ put_le16(&s->pb, 0);
+
+ put_swf_end_tag(s);
+ }
+
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int swf_write_video(AVFormatContext *s,
+ AVCodecContext *enc, const uint8_t *buf, int size)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int c = 0;
+ int outSize = 0;
+ int outSamples = 0;
+
+ /* Flash Player limit */
+ if ( swf->swf_frame_number == 16000 ) {
+ av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
+ }
+
+ if ( swf->audio_type ) {
+ /* Prescan audio data for this swf frame */
+retry_swf_audio_packet:
+ if ( ( swf->audio_size-outSize ) >= 4 ) {
+ int mp3FrameSize = 0;
+ int mp3SampleRate = 0;
+ int mp3IsMono = 0;
+ int mp3SamplesPerFrame = 0;
+
+ /* copy out mp3 header from ring buffer */
+ uint8_t header[4];
+ for (c=0; c<4; c++) {
+ header[c] = swf->audio_fifo[(swf->audio_in_pos+outSize+c) % AUDIO_FIFO_SIZE];
+ }
+
+ if ( swf_mp3_info(header,&mp3FrameSize,&mp3SamplesPerFrame,&mp3SampleRate,&mp3IsMono) ) {
+ if ( ( swf->audio_size-outSize ) >= mp3FrameSize ) {
+ outSize += mp3FrameSize;
+ outSamples += mp3SamplesPerFrame;
+ if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
+ goto retry_swf_audio_packet;
+ }
+ }
+ } else {
+ /* invalid mp3 data, skip forward
+ we need to do this since the Flash Player
+ does not like custom headers */
+ swf->audio_in_pos ++;
+ swf->audio_size --;
+ swf->audio_in_pos %= AUDIO_FIFO_SIZE;
+ goto retry_swf_audio_packet;
+ }
+ }
+
+ /* audio stream is behind video stream, bail */
+ if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
+ return 0;
+ }
+ }
+
+ if ( swf->video_type == CODEC_ID_VP6F ||
+ swf->video_type == CODEC_ID_FLV1 ) {
+ if ( swf->video_frame_number == 0 ) {
+ /* create a new video object */
+ put_swf_tag(s, TAG_VIDEOSTREAM);
+ put_le16(pb, VIDEO_ID);
+ put_le16(pb, 15000 ); /* hard flash player limit */
+ put_le16(pb, enc->width);
+ put_le16(pb, enc->height);
+ put_byte(pb, 0);
+ put_byte(pb,codec_get_tag(swf_codec_tags,swf->video_type));
+ put_swf_end_tag(s);
+
+ /* place the video object for the first time */
+ put_swf_tag(s, TAG_PLACEOBJECT2);
+ put_byte(pb, 0x36);
+ put_le16(pb, 1);
+ put_le16(pb, VIDEO_ID);
+ put_swf_matrix(pb, 1 << FRAC_BITS, 0, 0, 1 << FRAC_BITS, 0, 0);
+ put_le16(pb, swf->video_frame_number );
+ put_byte(pb, 'v');
+ put_byte(pb, 'i');
+ put_byte(pb, 'd');
+ put_byte(pb, 'e');
+ put_byte(pb, 'o');
+ put_byte(pb, 0x00);
+ put_swf_end_tag(s);
+ } else {
+ /* mark the character for update */
+ put_swf_tag(s, TAG_PLACEOBJECT2);
+ put_byte(pb, 0x11);
+ put_le16(pb, 1);
+ put_le16(pb, swf->video_frame_number );
+ put_swf_end_tag(s);
+ }
+
+ /* set video frame data */
+ put_swf_tag(s, TAG_VIDEOFRAME | TAG_LONG);
+ put_le16(pb, VIDEO_ID);
+ put_le16(pb, swf->video_frame_number++ );
+ put_buffer(pb, buf, size);
+ put_swf_end_tag(s);
+ } else if ( swf->video_type == CODEC_ID_MJPEG ) {
+ if (swf->swf_frame_number > 0) {
+ /* remove the shape */
+ put_swf_tag(s, TAG_REMOVEOBJECT);
+ put_le16(pb, SHAPE_ID); /* shape ID */
+ put_le16(pb, 1); /* depth */
+ put_swf_end_tag(s);
+
+ /* free the bitmap */
+ put_swf_tag(s, TAG_FREECHARACTER);
+ put_le16(pb, BITMAP_ID);
+ put_swf_end_tag(s);
+ }
+
+ put_swf_tag(s, TAG_JPEG2 | TAG_LONG);
+
+ put_le16(pb, BITMAP_ID); /* ID of the image */
+
+ /* a dummy jpeg header seems to be required */
+ put_byte(pb, 0xff);
+ put_byte(pb, 0xd8);
+ put_byte(pb, 0xff);
+ put_byte(pb, 0xd9);
+ /* write the jpeg image */
+ put_buffer(pb, buf, size);
+
+ put_swf_end_tag(s);
+
+ /* draw the shape */
+
+ put_swf_tag(s, TAG_PLACEOBJECT);
+ put_le16(pb, SHAPE_ID); /* shape ID */
+ put_le16(pb, 1); /* depth */
+ put_swf_matrix(pb, 20 << FRAC_BITS, 0, 0, 20 << FRAC_BITS, 0, 0);
+ put_swf_end_tag(s);
+ } else {
+ /* invalid codec */
+ }
+
+ swf->swf_frame_number ++;
+
+ swf->video_samples += swf->samples_per_frame;
+
+ /* streaming sound always should be placed just before showframe tags */
+ if ( outSize > 0 ) {
+ put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
+ put_le16(pb, outSamples);
+ put_le16(pb, 0);
+ for (c=0; c<outSize; c++) {
+ put_byte(pb,swf->audio_fifo[(swf->audio_in_pos+c) % AUDIO_FIFO_SIZE]);
+ }
+ put_swf_end_tag(s);
+
+ /* update FIFO */
+ swf->sound_samples += outSamples;
+ swf->audio_in_pos += outSize;
+ swf->audio_size -= outSize;
+ swf->audio_in_pos %= AUDIO_FIFO_SIZE;
+ }
+
+ /* output the frame */
+ put_swf_tag(s, TAG_SHOWFRAME);
+ put_swf_end_tag(s);
+
+ put_flush_packet(&s->pb);
+
+ return 0;
+}
+
+static int swf_write_audio(AVFormatContext *s,
+ AVCodecContext *enc, const uint8_t *buf, int size)
+{
+ SWFContext *swf = s->priv_data;
+ int c = 0;
+
+ /* Flash Player limit */
+ if ( swf->swf_frame_number == 16000 ) {
+ av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
+ }
+
+ if (enc->codec_id == CODEC_ID_MP3 ) {
+ for (c=0; c<size; c++) {
+ swf->audio_fifo[(swf->audio_out_pos+c)%AUDIO_FIFO_SIZE] = buf[c];
+ }
+ swf->audio_size += size;
+ swf->audio_out_pos += size;
+ swf->audio_out_pos %= AUDIO_FIFO_SIZE;
+ }
+
+ /* if audio only stream make sure we add swf frames */
+ if ( swf->video_type == 0 ) {
+ swf_write_video(s, enc, 0, 0);
+ }
+
+ return 0;
+}
+
+static int swf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
+ if (codec->codec_type == CODEC_TYPE_AUDIO)
+ return swf_write_audio(s, codec, pkt->data, pkt->size);
+ else
+ return swf_write_video(s, codec, pkt->data, pkt->size);
+}
+
+static int swf_write_trailer(AVFormatContext *s)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc, *video_enc;
+ int file_size, i;
+
+ video_enc = NULL;
+ for(i=0;i<s->nb_streams;i++) {
+ enc = s->streams[i]->codec;
+ if (enc->codec_type == CODEC_TYPE_VIDEO)
+ video_enc = enc;
+ }
+
+ put_swf_tag(s, TAG_END);
+ put_swf_end_tag(s);
+
+ put_flush_packet(&s->pb);
+
+ /* patch file size and number of frames if not streamed */
+ if (!url_is_streamed(&s->pb) && video_enc) {
+ file_size = url_ftell(pb);
+ url_fseek(pb, 4, SEEK_SET);
+ put_le32(pb, file_size);
+ url_fseek(pb, swf->duration_pos, SEEK_SET);
+ put_le16(pb, video_enc->frame_number);
+ }
+
+ av_free(swf->audio_fifo);
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/*********************************************/
+/* Extract FLV encoded frame and MP3 from swf
+ Note that the detection of the real frame
+ is inaccurate at this point as it can be
+ quite tricky to determine, you almost certainly
+ will get a bad audio/video sync */
+
+static int get_swf_tag(ByteIOContext *pb, int *len_ptr)
+{
+ int tag, len;
+
+ if (url_feof(pb))
+ return -1;
+
+ tag = get_le16(pb);
+ len = tag & 0x3f;
+ tag = tag >> 6;
+ if (len == 0x3f) {
+ len = get_le32(pb);
+ }
+// av_log(NULL, AV_LOG_DEBUG, "Tag: %d - Len: %d\n", tag, len);
+ *len_ptr = len;
+ return tag;
+}
+
+
+static int swf_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 16)
+ return 0;
+ if ((p->buf[0] == 'F' || p->buf[0] == 'C') && p->buf[1] == 'W' &&
+ p->buf[2] == 'S')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ SWFContext *swf = 0;
+ ByteIOContext *pb = &s->pb;
+ int nbits, len, frame_rate, tag, v;
+ offset_t firstTagOff;
+ AVStream *ast = 0;
+ AVStream *vst = 0;
+
+ swf = av_malloc(sizeof(SWFContext));
+ if (!swf)
+ return -1;
+ s->priv_data = swf;
+
+ tag = get_be32(pb) & 0xffffff00;
+
+ if (tag == MKBETAG('C', 'W', 'S', 0))
+ {
+ av_log(s, AV_LOG_ERROR, "Compressed SWF format not supported\n");
+ return AVERROR_IO;
+ }
+ if (tag != MKBETAG('F', 'W', 'S', 0))
+ return AVERROR_IO;
+ get_le32(pb);
+ /* skip rectangle size */
+ nbits = get_byte(pb) >> 3;
+ len = (4 * nbits - 3 + 7) / 8;
+ url_fskip(pb, len);
+ frame_rate = get_le16(pb);
+ get_le16(pb); /* frame count */
+
+ /* The Flash Player converts 8.8 frame rates
+ to milliseconds internally. Do the same to get
+ a correct framerate */
+ swf->ms_per_frame = ( 1000 * 256 ) / frame_rate;
+ swf->samples_per_frame = 0;
+ swf->ch_id = -1;
+
+ firstTagOff = url_ftell(pb);
+ for(;;) {
+ tag = get_swf_tag(pb, &len);
+ if (tag < 0) {
+ if ( ast || vst ) {
+ if ( vst && ast ) {
+ vst->codec->time_base.den = ast->codec->sample_rate / swf->samples_per_frame;
+ vst->codec->time_base.num = 1;
+ }
+ break;
+ }
+ av_log(s, AV_LOG_ERROR, "No media found in SWF\n");
+ return AVERROR_IO;
+ }
+ if ( tag == TAG_VIDEOSTREAM && !vst) {
+ int codec_id;
+ swf->ch_id = get_le16(pb);
+ get_le16(pb);
+ get_le16(pb);
+ get_le16(pb);
+ get_byte(pb);
+ /* Check for FLV1 */
+ codec_id = codec_get_id(swf_codec_tags, get_byte(pb));
+ if ( codec_id ) {
+ vst = av_new_stream(s, 0);
+ av_set_pts_info(vst, 24, 1, 1000); /* 24 bit pts in ms */
+
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = codec_id;
+ if ( swf->samples_per_frame ) {
+ vst->codec->time_base.den = 1000. / swf->ms_per_frame;
+ vst->codec->time_base.num = 1;
+ }
+ }
+ } else if ( ( tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2 ) && !ast) {
+ /* streaming found */
+ get_byte(pb);
+ v = get_byte(pb);
+ swf->samples_per_frame = get_le16(pb);
+ if (len!=4)
+ url_fskip(pb,len-4);
+ /* if mp3 streaming found, OK */
+ if ((v & 0x20) != 0) {
+ if ( tag == TAG_STREAMHEAD2 ) {
+ get_le16(pb);
+ }
+ ast = av_new_stream(s, 1);
+ if (!ast)
+ return -ENOMEM;
+ av_set_pts_info(ast, 24, 1, 1000); /* 24 bit pts in ms */
+
+ if (v & 0x01)
+ ast->codec->channels = 2;
+ else
+ ast->codec->channels = 1;
+
+ switch((v>> 2) & 0x03) {
+ case 1:
+ ast->codec->sample_rate = 11025;
+ break;
+ case 2:
+ ast->codec->sample_rate = 22050;
+ break;
+ case 3:
+ ast->codec->sample_rate = 44100;
+ break;
+ default:
+ av_free(ast);
+ return AVERROR_IO;
+ }
+ ast->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast->codec->codec_id = CODEC_ID_MP3;
+ ast->need_parsing = 1;
+ }
+ } else {
+ url_fskip(pb, len);
+ }
+ }
+ url_fseek(pb, firstTagOff, SEEK_SET);
+
+ return 0;
+}
+
+static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st = 0;
+ int tag, len, i, frame;
+
+ for(;;) {
+ tag = get_swf_tag(pb, &len);
+ if (tag < 0)
+ return AVERROR_IO;
+ if (tag == TAG_VIDEOFRAME) {
+ for( i=0; i<s->nb_streams; i++ ) {
+ st = s->streams[i];
+ if (st->id == 0) {
+ if ( get_le16(pb) == swf->ch_id ) {
+ frame = get_le16(pb);
+ av_get_packet(pb, pkt, len-4);
+ pkt->pts = frame * swf->ms_per_frame;
+ pkt->stream_index = st->index;
+ return pkt->size;
+ } else {
+ url_fskip(pb, len-2);
+ continue;
+ }
+ }
+ }
+ url_fskip(pb, len);
+ } else if (tag == TAG_STREAMBLOCK) {
+ for( i=0; i<s->nb_streams; i++ ) {
+ st = s->streams[i];
+ if (st->id == 1) {
+ url_fskip(pb, 4);
+ av_get_packet(pb, pkt, len-4);
+ pkt->stream_index = st->index;
+ return pkt->size;
+ }
+ }
+ url_fskip(pb, len);
+ } else {
+ url_fskip(pb, len);
+ }
+ }
+ return 0;
+}
+
+static int swf_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SWF_DEMUXER
+AVInputFormat swf_demuxer = {
+ "swf",
+ "Flash format",
+ sizeof(SWFContext),
+ swf_probe,
+ swf_read_header,
+ swf_read_packet,
+ swf_read_close,
+};
+#endif
+#ifdef CONFIG_SWF_MUXER
+AVOutputFormat swf_muxer = {
+ "swf",
+ "Flash format",
+ "application/x-shockwave-flash",
+ "swf",
+ sizeof(SWFContext),
+ CODEC_ID_MP3,
+ CODEC_ID_FLV1,
+ swf_write_header,
+ swf_write_packet,
+ swf_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/tcp.c b/contrib/ffmpeg/libavformat/tcp.c
new file mode 100644
index 000000000..93755c497
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/tcp.c
@@ -0,0 +1,232 @@
+/*
+ * TCP protocol
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#if defined(__BEOS__) || defined(__INNOTEK_LIBC__)
+typedef int socklen_t;
+#endif
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+#include <sys/time.h>
+#include <fcntl.h>
+
+typedef struct TCPContext {
+ int fd;
+} TCPContext;
+
+/* resolve host with also IP address parsing */
+int resolve_host(struct in_addr *sin_addr, const char *hostname)
+{
+ struct hostent *hp;
+
+ if ((inet_aton(hostname, sin_addr)) == 0) {
+ hp = gethostbyname(hostname);
+ if (!hp)
+ return -1;
+ memcpy (sin_addr, hp->h_addr, sizeof(struct in_addr));
+ }
+ return 0;
+}
+
+/* return non zero if error */
+static int tcp_open(URLContext *h, const char *uri, int flags)
+{
+ struct sockaddr_in dest_addr;
+ char hostname[1024], *q;
+ int port, fd = -1;
+ TCPContext *s = NULL;
+ fd_set wfds;
+ int fd_max, ret;
+ struct timeval tv;
+ socklen_t optlen;
+ char proto[1024],path[1024],tmp[1024]; // PETR: protocol and path strings
+
+ url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname),
+ &port, path, sizeof(path), uri); // PETR: use url_split
+ if (strcmp(proto,"tcp")) goto fail; // PETR: check protocol
+ if ((q = strchr(hostname,'@'))) { strcpy(tmp,q+1); strcpy(hostname,tmp); } // PETR: take only the part after '@' for tcp protocol
+
+ s = av_malloc(sizeof(TCPContext));
+ if (!s)
+ return -ENOMEM;
+ h->priv_data = s;
+
+ if (port <= 0 || port >= 65536)
+ goto fail;
+
+ dest_addr.sin_family = AF_INET;
+ dest_addr.sin_port = htons(port);
+ if (resolve_host(&dest_addr.sin_addr, hostname) < 0)
+ goto fail;
+
+ fd = socket(PF_INET, SOCK_STREAM, 0);
+ if (fd < 0)
+ goto fail;
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+
+ redo:
+ ret = connect(fd, (struct sockaddr *)&dest_addr,
+ sizeof(dest_addr));
+ if (ret < 0) {
+ if (errno == EINTR)
+ goto redo;
+ if (errno != EINPROGRESS)
+ goto fail;
+
+ /* wait until we are connected or until abort */
+ for(;;) {
+ if (url_interrupt_cb()) {
+ ret = -EINTR;
+ goto fail1;
+ }
+ fd_max = fd;
+ FD_ZERO(&wfds);
+ FD_SET(fd, &wfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ ret = select(fd_max + 1, NULL, &wfds, NULL, &tv);
+ if (ret > 0 && FD_ISSET(fd, &wfds))
+ break;
+ }
+
+ /* test error */
+ optlen = sizeof(ret);
+ getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen);
+ if (ret != 0)
+ goto fail;
+ }
+ s->fd = fd;
+ return 0;
+
+ fail:
+ ret = AVERROR_IO;
+ fail1:
+ if (fd >= 0)
+ close(fd);
+ av_free(s);
+ return ret;
+}
+
+static int tcp_read(URLContext *h, uint8_t *buf, int size)
+{
+ TCPContext *s = h->priv_data;
+ int len, fd_max, ret;
+ fd_set rfds;
+ struct timeval tv;
+
+ for (;;) {
+ if (url_interrupt_cb())
+ return -EINTR;
+ fd_max = s->fd;
+ FD_ZERO(&rfds);
+ FD_SET(s->fd, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ ret = select(fd_max + 1, &rfds, NULL, NULL, &tv);
+ if (ret > 0 && FD_ISSET(s->fd, &rfds)) {
+#ifdef __BEOS__
+ len = recv(s->fd, buf, size, 0);
+#else
+ len = read(s->fd, buf, size);
+#endif
+ if (len < 0) {
+ if (errno != EINTR && errno != EAGAIN)
+#ifdef __BEOS__
+ return errno;
+#else
+ return -errno;
+#endif
+ } else return len;
+ } else if (ret < 0) {
+ return -1;
+ }
+ }
+}
+
+static int tcp_write(URLContext *h, uint8_t *buf, int size)
+{
+ TCPContext *s = h->priv_data;
+ int ret, size1, fd_max, len;
+ fd_set wfds;
+ struct timeval tv;
+
+ size1 = size;
+ while (size > 0) {
+ if (url_interrupt_cb())
+ return -EINTR;
+ fd_max = s->fd;
+ FD_ZERO(&wfds);
+ FD_SET(s->fd, &wfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ ret = select(fd_max + 1, NULL, &wfds, NULL, &tv);
+ if (ret > 0 && FD_ISSET(s->fd, &wfds)) {
+#ifdef __BEOS__
+ len = send(s->fd, buf, size, 0);
+#else
+ len = write(s->fd, buf, size);
+#endif
+ if (len < 0) {
+ if (errno != EINTR && errno != EAGAIN) {
+#ifdef __BEOS__
+ return errno;
+#else
+ return -errno;
+#endif
+ }
+ continue;
+ }
+ size -= len;
+ buf += len;
+ } else if (ret < 0) {
+ return -1;
+ }
+ }
+ return size1 - size;
+}
+
+static int tcp_close(URLContext *h)
+{
+ TCPContext *s = h->priv_data;
+#ifdef CONFIG_BEOS_NETSERVER
+ closesocket(s->fd);
+#else
+ close(s->fd);
+#endif
+ av_free(s);
+ return 0;
+}
+
+URLProtocol tcp_protocol = {
+ "tcp",
+ tcp_open,
+ tcp_read,
+ tcp_write,
+ NULL, /* seek */
+ tcp_close,
+};
diff --git a/contrib/ffmpeg/libavformat/tiertexseq.c b/contrib/ffmpeg/libavformat/tiertexseq.c
new file mode 100644
index 000000000..b1a39bf76
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/tiertexseq.c
@@ -0,0 +1,310 @@
+/*
+ * Tiertex Limited SEQ File Demuxer
+ * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file tiertexseq.c
+ * Tiertex Limited SEQ file demuxer
+ */
+
+#include "avformat.h"
+
+#define SEQ_FRAME_SIZE 6144
+#define SEQ_FRAME_W 256
+#define SEQ_FRAME_H 128
+#define SEQ_NUM_FRAME_BUFFERS 30
+#define SEQ_AUDIO_BUFFER_SIZE 882
+#define SEQ_SAMPLE_RATE 22050
+#define SEQ_FRAME_RATE 25
+
+
+typedef struct TiertexSeqFrameBuffer {
+ int fill_size;
+ int data_size;
+ unsigned char *data;
+} TiertexSeqFrameBuffer;
+
+typedef struct SeqDemuxContext {
+ int audio_stream_index;
+ int video_stream_index;
+ int current_frame_pts;
+ int current_frame_offs;
+ TiertexSeqFrameBuffer frame_buffers[SEQ_NUM_FRAME_BUFFERS];
+ int frame_buffers_count;
+ unsigned int current_audio_data_size;
+ unsigned int current_audio_data_offs;
+ unsigned int current_pal_data_size;
+ unsigned int current_pal_data_offs;
+ unsigned int current_video_data_size;
+ unsigned char *current_video_data_ptr;
+ int audio_buffer_full;
+} SeqDemuxContext;
+
+
+static int seq_probe(AVProbeData *p)
+{
+ int i;
+
+ if (p->buf_size < 256)
+ return 0;
+
+ /* there's no real header in a .seq file, the only thing they have in common */
+ /* is the first 256 bytes of the file which are always filled with 0 */
+ for (i = 0; i < 256; i++)
+ if (p->buf[i] != 0)
+ return 0;
+
+ /* only one fourth of the score since the previous check is too naive */
+ return AVPROBE_SCORE_MAX / 4;
+}
+
+static int seq_init_frame_buffers(SeqDemuxContext *seq, ByteIOContext *pb)
+{
+ int i, sz;
+ TiertexSeqFrameBuffer *seq_buffer;
+
+ url_fseek(pb, 256, SEEK_SET);
+
+ for (i = 0; i < SEQ_NUM_FRAME_BUFFERS; i++) {
+ sz = get_le16(pb);
+ if (sz == 0)
+ break;
+ else {
+ seq_buffer = &seq->frame_buffers[i];
+ seq_buffer->fill_size = 0;
+ seq_buffer->data_size = sz;
+ seq_buffer->data = av_malloc(sz);
+ if (!seq_buffer->data)
+ return AVERROR_NOMEM;
+ }
+ }
+ seq->frame_buffers_count = i;
+ return 0;
+}
+
+static int seq_fill_buffer(SeqDemuxContext *seq, ByteIOContext *pb, int buffer_num, unsigned int data_offs, int data_size)
+{
+ TiertexSeqFrameBuffer *seq_buffer;
+
+ if (buffer_num >= SEQ_NUM_FRAME_BUFFERS)
+ return AVERROR_INVALIDDATA;
+
+ seq_buffer = &seq->frame_buffers[buffer_num];
+ if (seq_buffer->fill_size + data_size > seq_buffer->data_size)
+ return AVERROR_INVALIDDATA;
+
+ url_fseek(pb, seq->current_frame_offs + data_offs, SEEK_SET);
+ if (get_buffer(pb, seq_buffer->data + seq_buffer->fill_size, data_size) != data_size)
+ return AVERROR_IO;
+
+ seq_buffer->fill_size += data_size;
+ return 0;
+}
+
+static int seq_parse_frame_data(SeqDemuxContext *seq, ByteIOContext *pb)
+{
+ unsigned int offset_table[4], buffer_num[4];
+ TiertexSeqFrameBuffer *seq_buffer;
+ int i, e, err;
+
+ seq->current_frame_offs += SEQ_FRAME_SIZE;
+ url_fseek(pb, seq->current_frame_offs, SEEK_SET);
+
+ /* sound data */
+ seq->current_audio_data_offs = get_le16(pb);
+ if (seq->current_audio_data_offs != 0) {
+ seq->current_audio_data_size = SEQ_AUDIO_BUFFER_SIZE * 2;
+ } else {
+ seq->current_audio_data_size = 0;
+ }
+
+ /* palette data */
+ seq->current_pal_data_offs = get_le16(pb);
+ if (seq->current_pal_data_offs != 0) {
+ seq->current_pal_data_size = 768;
+ } else {
+ seq->current_pal_data_size = 0;
+ }
+
+ /* video data */
+ for (i = 0; i < 4; i++)
+ buffer_num[i] = get_byte(pb);
+
+ for (i = 0; i < 4; i++)
+ offset_table[i] = get_le16(pb);
+
+ for (i = 0; i < 3; i++) {
+ if (offset_table[i] != 0) {
+ for (e = i + 1; e < 4 && offset_table[e] == 0; e++);
+ err = seq_fill_buffer(seq, pb, buffer_num[1 + i],
+ offset_table[i],
+ offset_table[e] - offset_table[i]);
+ if (err != 0)
+ return err;
+ }
+ }
+
+ if (buffer_num[0] != 255) {
+ if (buffer_num[0] >= SEQ_NUM_FRAME_BUFFERS)
+ return AVERROR_INVALIDDATA;
+
+ seq_buffer = &seq->frame_buffers[buffer_num[0]];
+ seq->current_video_data_size = seq_buffer->fill_size;
+ seq->current_video_data_ptr = seq_buffer->data;
+ seq_buffer->fill_size = 0;
+ } else {
+ seq->current_video_data_size = 0;
+ seq->current_video_data_ptr = 0;
+ }
+
+ return 0;
+}
+
+static int seq_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ int i, rc;
+ SeqDemuxContext *seq = (SeqDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+ /* init internal buffers */
+ rc = seq_init_frame_buffers(seq, pb);
+ if (rc)
+ return rc;
+
+ seq->current_frame_offs = 0;
+
+ /* preload (no audio data, just buffer operations related data) */
+ for (i = 1; i <= 100; i++) {
+ rc = seq_parse_frame_data(seq, pb);
+ if (rc)
+ return rc;
+ }
+
+ seq->current_frame_pts = 0;
+
+ seq->audio_buffer_full = 0;
+
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, SEQ_FRAME_RATE);
+ seq->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_TIERTEXSEQVIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = SEQ_FRAME_W;
+ st->codec->height = SEQ_FRAME_H;
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, SEQ_SAMPLE_RATE);
+ seq->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S16BE;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = 1;
+ st->codec->sample_rate = SEQ_SAMPLE_RATE;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample * st->codec->channels;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ return 0;
+}
+
+static int seq_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int rc;
+ SeqDemuxContext *seq = (SeqDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ if (!seq->audio_buffer_full) {
+ rc = seq_parse_frame_data(seq, pb);
+ if (rc)
+ return rc;
+
+ /* video packet */
+ if (seq->current_pal_data_size + seq->current_video_data_size != 0) {
+ if (av_new_packet(pkt, 1 + seq->current_pal_data_size + seq->current_video_data_size))
+ return AVERROR_NOMEM;
+
+ pkt->data[0] = 0;
+ if (seq->current_pal_data_size != 0) {
+ pkt->data[0] |= 1;
+ url_fseek(pb, seq->current_frame_offs + seq->current_pal_data_offs, SEEK_SET);
+ if (get_buffer(pb, &pkt->data[1], seq->current_pal_data_size) != seq->current_pal_data_size)
+ return AVERROR_IO;
+ }
+ if (seq->current_video_data_size != 0) {
+ pkt->data[0] |= 2;
+ memcpy(&pkt->data[1 + seq->current_pal_data_size],
+ seq->current_video_data_ptr,
+ seq->current_video_data_size);
+ }
+ pkt->stream_index = seq->video_stream_index;
+ pkt->pts = seq->current_frame_pts;
+
+ /* sound buffer will be processed on next read_packet() call */
+ seq->audio_buffer_full = 1;
+ return 0;
+ }
+ }
+
+ /* audio packet */
+ if (seq->current_audio_data_offs == 0) /* end of data reached */
+ return AVERROR_IO;
+
+ url_fseek(pb, seq->current_frame_offs + seq->current_audio_data_offs, SEEK_SET);
+ rc = av_get_packet(pb, pkt, seq->current_audio_data_size);
+ if (rc < 0)
+ return rc;
+
+ pkt->stream_index = seq->audio_stream_index;
+ pkt->pts = seq->current_frame_pts++;
+
+ seq->audio_buffer_full = 0;
+ return 0;
+}
+
+static int seq_read_close(AVFormatContext *s)
+{
+ int i;
+ SeqDemuxContext *seq = (SeqDemuxContext *)s->priv_data;
+
+ for (i = 0; i < SEQ_NUM_FRAME_BUFFERS; i++)
+ av_free(seq->frame_buffers[i].data);
+
+ return 0;
+}
+
+AVInputFormat tiertexseq_demuxer = {
+ "tiertexseq",
+ "Tiertex Limited SEQ format",
+ sizeof(SeqDemuxContext),
+ seq_probe,
+ seq_read_header,
+ seq_read_packet,
+ seq_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/tta.c b/contrib/ffmpeg/libavformat/tta.c
new file mode 100644
index 000000000..a513d9d38
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/tta.c
@@ -0,0 +1,152 @@
+/*
+ * TTA demuxer
+ * Copyright (c) 2006 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+
+typedef struct {
+ int totalframes, currentframe;
+ uint32_t *seektable;
+} TTAContext;
+
+static int tta_probe(AVProbeData *p)
+{
+ const uint8_t *d = p->buf;
+ if (p->buf_size < 4)
+ return 0;
+ if (d[0] == 'T' && d[1] == 'T' && d[2] == 'A' && d[3] == '1')
+ return 80;
+ return 0;
+}
+
+static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ TTAContext *c = s->priv_data;
+ AVStream *st;
+ int i, channels, bps, samplerate, datalen, framelen, start;
+
+ start = url_ftell(&s->pb);
+
+ if (get_le32(&s->pb) != ff_get_fourcc("TTA1"))
+ return -1; // not tta file
+
+ url_fskip(&s->pb, 2); // FIXME: flags
+ channels = get_le16(&s->pb);
+ bps = get_le16(&s->pb);
+ samplerate = get_le32(&s->pb);
+ if(samplerate <= 0 || samplerate > 1000000){
+ av_log(s, AV_LOG_ERROR, "nonsense samplerate\n");
+ return -1;
+ }
+
+ datalen = get_le32(&s->pb);
+ if(datalen < 0){
+ av_log(s, AV_LOG_ERROR, "nonsense datalen\n");
+ return -1;
+ }
+
+ url_fskip(&s->pb, 4); // header crc
+
+ framelen = 1.04489795918367346939 * samplerate;
+ c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0);
+ c->currentframe = 0;
+
+ if(c->totalframes >= UINT_MAX/sizeof(uint32_t)){
+ av_log(s, AV_LOG_ERROR, "totalframes too large\n");
+ return -1;
+ }
+ c->seektable = av_mallocz(sizeof(uint32_t)*c->totalframes);
+ if (!c->seektable)
+ return AVERROR_NOMEM;
+
+ for (i = 0; i < c->totalframes; i++)
+ c->seektable[i] = get_le32(&s->pb);
+ url_fskip(&s->pb, 4); // seektable crc
+
+ st = av_new_stream(s, 0);
+// av_set_pts_info(st, 32, 1, 1000);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_TTA;
+ st->codec->channels = channels;
+ st->codec->sample_rate = samplerate;
+ st->codec->bits_per_sample = bps;
+
+ st->codec->extradata_size = url_ftell(&s->pb) - start;
+ if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
+ //this check is redundant as get_buffer should fail
+ av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
+ return -1;
+ }
+ st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
+ url_fseek(&s->pb, start, SEEK_SET); // or SEEK_CUR and -size ? :)
+ get_buffer(&s->pb, st->codec->extradata, st->codec->extradata_size);
+
+ return 0;
+}
+
+static int tta_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ TTAContext *c = s->priv_data;
+ int ret, size;
+
+ // FIXME!
+ if (c->currentframe > c->totalframes)
+ size = 0;
+ else
+ size = c->seektable[c->currentframe];
+
+ c->currentframe++;
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->pos = url_ftell(&s->pb);
+ pkt->stream_index = 0;
+ ret = get_buffer(&s->pb, pkt->data, size);
+ if (ret <= 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret;
+// av_log(s, AV_LOG_INFO, "TTA packet #%d desired size: %d read size: %d at pos %d\n",
+// c->currentframe, size, ret, pkt->pos);
+ return 0; //ret;
+}
+
+static int tta_read_close(AVFormatContext *s)
+{
+ TTAContext *c = s->priv_data;
+ if (c->seektable)
+ av_free(c->seektable);
+ return 0;
+}
+
+AVInputFormat tta_demuxer = {
+ "tta",
+ "true-audio",
+ sizeof(TTAContext),
+ tta_probe,
+ tta_read_header,
+ tta_read_packet,
+ tta_read_close,
+ .extensions = "tta",
+};
diff --git a/contrib/ffmpeg/libavformat/udp.c b/contrib/ffmpeg/libavformat/udp.c
new file mode 100644
index 000000000..96fa4e152
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/udp.c
@@ -0,0 +1,512 @@
+/*
+ * UDP prototype streaming system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#ifndef IPV6_ADD_MEMBERSHIP
+#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
+#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
+#endif
+
+typedef struct {
+ int udp_fd;
+ int ttl;
+ int is_multicast;
+ int local_port;
+ int reuse_socket;
+#ifndef CONFIG_IPV6
+ struct ip_mreq mreq;
+ struct sockaddr_in dest_addr;
+#else
+ struct sockaddr_storage dest_addr;
+ size_t dest_addr_len;
+#endif
+} UDPContext;
+
+#define UDP_TX_BUF_SIZE 32768
+
+#ifdef CONFIG_IPV6
+
+static int udp_ipv6_is_multicast_address(const struct sockaddr *addr) {
+ if (addr->sa_family == AF_INET)
+ return IN_MULTICAST(ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
+ if (addr->sa_family == AF_INET6)
+ return IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6 *)addr)->sin6_addr);
+ return -1;
+}
+
+static int udp_ipv6_set_multicast_ttl(int sockfd, int mcastTTL, struct sockaddr *addr) {
+ if (addr->sa_family == AF_INET) {
+ if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &mcastTTL, sizeof(mcastTTL)) < 0) {
+ perror("setsockopt(IP_MULTICAST_TTL)");
+ return -1;
+ }
+ }
+ if (addr->sa_family == AF_INET6) {
+ if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &mcastTTL, sizeof(mcastTTL)) < 0) {
+ perror("setsockopt(IPV6_MULTICAST_HOPS)");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int udp_ipv6_join_multicast_group(int sockfd, struct sockaddr *addr) {
+ struct ip_mreq mreq;
+ struct ipv6_mreq mreq6;
+ if (addr->sa_family == AF_INET) {
+ mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
+ mreq.imr_interface.s_addr= INADDR_ANY;
+ if (setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const void *)&mreq, sizeof(mreq)) < 0) {
+ perror("setsockopt(IP_ADD_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ if (addr->sa_family == AF_INET6) {
+ memcpy(&mreq6.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)addr)->sin6_addr), sizeof(struct in6_addr));
+ mreq6.ipv6mr_interface= 0;
+ if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, &mreq6, sizeof(mreq6)) < 0) {
+ perror("setsockopt(IPV6_ADD_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int udp_ipv6_leave_multicast_group(int sockfd, struct sockaddr *addr) {
+ struct ip_mreq mreq;
+ struct ipv6_mreq mreq6;
+ if (addr->sa_family == AF_INET) {
+ mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
+ mreq.imr_interface.s_addr= INADDR_ANY;
+ if (setsockopt(sockfd, IPPROTO_IP, IP_DROP_MEMBERSHIP, (const void *)&mreq, sizeof(mreq)) < 0) {
+ perror("setsockopt(IP_DROP_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ if (addr->sa_family == AF_INET6) {
+ memcpy(&mreq6.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)addr)->sin6_addr), sizeof(struct in6_addr));
+ mreq6.ipv6mr_interface= 0;
+ if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, &mreq6, sizeof(mreq6)) < 0) {
+ perror("setsockopt(IPV6_DROP_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static struct addrinfo* udp_ipv6_resolve_host(const char *hostname, int port, int type, int family, int flags) {
+ struct addrinfo hints, *res = 0;
+ int error;
+ char sport[16];
+ const char *node = 0, *service = 0;
+
+ if (port > 0) {
+ snprintf(sport, sizeof(sport), "%d", port);
+ service = sport;
+ }
+ if ((hostname) && (hostname[0] != '\0') && (hostname[0] != '?')) {
+ node = hostname;
+ }
+ if ((node) || (service)) {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_socktype = type;
+ hints.ai_family = family;
+ hints.ai_flags = flags;
+ if ((error = getaddrinfo(node, service, &hints, &res))) {
+ av_log(NULL, AV_LOG_ERROR, "udp_ipv6_resolve_host: %s\n", gai_strerror(error));
+ }
+ }
+ return res;
+}
+
+static int udp_ipv6_set_remote_url(URLContext *h, const char *uri) {
+ UDPContext *s = h->priv_data;
+ char hostname[256];
+ int port;
+ struct addrinfo *res0;
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
+ res0 = udp_ipv6_resolve_host(hostname, port, SOCK_DGRAM, AF_UNSPEC, 0);
+ if (res0 == 0) return AVERROR_IO;
+ memcpy(&s->dest_addr, res0->ai_addr, res0->ai_addrlen);
+ s->dest_addr_len = res0->ai_addrlen;
+ freeaddrinfo(res0);
+ return 0;
+}
+
+static int udp_ipv6_set_local(URLContext *h) {
+ UDPContext *s = h->priv_data;
+ int udp_fd = -1;
+ struct sockaddr_storage clientaddr;
+ socklen_t addrlen;
+ char sbuf[NI_MAXSERV];
+ char hbuf[NI_MAXHOST];
+ struct addrinfo *res0 = NULL, *res = NULL;
+
+ if (s->local_port != 0) {
+ res0 = udp_ipv6_resolve_host(0, s->local_port, SOCK_DGRAM, AF_UNSPEC, AI_PASSIVE);
+ if (res0 == 0)
+ goto fail;
+ for (res = res0; res; res=res->ai_next) {
+ udp_fd = socket(res->ai_family, SOCK_DGRAM, 0);
+ if (udp_fd > 0) break;
+ perror("socket");
+ }
+ } else {
+ udp_fd = socket(s->dest_addr.ss_family, SOCK_DGRAM, 0);
+ if (udp_fd < 0)
+ perror("socket");
+ }
+
+ if (udp_fd < 0)
+ goto fail;
+
+ if (s->local_port != 0) {
+ if (bind(udp_fd, res0->ai_addr, res0->ai_addrlen) < 0) {
+ perror("bind");
+ goto fail;
+ }
+ freeaddrinfo(res0);
+ res0 = NULL;
+ }
+
+ addrlen = sizeof(clientaddr);
+ if (getsockname(udp_fd, (struct sockaddr *)&clientaddr, &addrlen) < 0) {
+ perror("getsockname");
+ goto fail;
+ }
+
+ if (getnameinfo((struct sockaddr *)&clientaddr, addrlen, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
+ perror("getnameinfo");
+ goto fail;
+ }
+
+ s->local_port = strtol(sbuf, NULL, 10);
+
+ return udp_fd;
+
+ fail:
+ if (udp_fd >= 0)
+#ifdef CONFIG_BEOS_NETSERVER
+ closesocket(udp_fd);
+#else
+ close(udp_fd);
+#endif
+ if(res0)
+ freeaddrinfo(res0);
+ return -1;
+}
+
+#endif
+
+
+/**
+ * If no filename is given to av_open_input_file because you want to
+ * get the local port first, then you must call this function to set
+ * the remote server address.
+ *
+ * url syntax: udp://host:port[?option=val...]
+ * option: 'multicast=1' : enable multicast
+ * 'ttl=n' : set the ttl value (for multicast only)
+ * 'localport=n' : set the local port
+ * 'pkt_size=n' : set max packet size
+ * 'reuse=1' : enable reusing the socket
+ *
+ * @param s1 media file context
+ * @param uri of the remote server
+ * @return zero if no error.
+ */
+int udp_set_remote_url(URLContext *h, const char *uri)
+{
+#ifdef CONFIG_IPV6
+ return udp_ipv6_set_remote_url(h, uri);
+#else
+ UDPContext *s = h->priv_data;
+ char hostname[256];
+ int port;
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
+
+ /* set the destination address */
+ if (resolve_host(&s->dest_addr.sin_addr, hostname) < 0)
+ return AVERROR_IO;
+ s->dest_addr.sin_family = AF_INET;
+ s->dest_addr.sin_port = htons(port);
+ return 0;
+#endif
+}
+
+/**
+ * Return the local port used by the UDP connexion
+ * @param s1 media file context
+ * @return the local port number
+ */
+int udp_get_local_port(URLContext *h)
+{
+ UDPContext *s = h->priv_data;
+ return s->local_port;
+}
+
+/**
+ * Return the udp file handle for select() usage to wait for several RTP
+ * streams at the same time.
+ * @param h media file context
+ */
+int udp_get_file_handle(URLContext *h)
+{
+ UDPContext *s = h->priv_data;
+ return s->udp_fd;
+}
+
+/* put it in UDP context */
+/* return non zero if error */
+static int udp_open(URLContext *h, const char *uri, int flags)
+{
+ char hostname[1024];
+ int port, udp_fd = -1, tmp;
+ UDPContext *s = NULL;
+ int is_output;
+ const char *p;
+ char buf[256];
+#ifndef CONFIG_IPV6
+ struct sockaddr_in my_addr, my_addr1;
+ int len;
+#endif
+
+ h->is_streamed = 1;
+ h->max_packet_size = 1472;
+
+ is_output = (flags & URL_WRONLY);
+
+ s = av_malloc(sizeof(UDPContext));
+ if (!s)
+ return -ENOMEM;
+
+ h->priv_data = s;
+ s->ttl = 16;
+ s->is_multicast = 0;
+ s->local_port = 0;
+ s->reuse_socket = 0;
+ p = strchr(uri, '?');
+ if (p) {
+ s->is_multicast = find_info_tag(buf, sizeof(buf), "multicast", p);
+ s->reuse_socket = find_info_tag(buf, sizeof(buf), "reuse", p);
+ if (find_info_tag(buf, sizeof(buf), "ttl", p)) {
+ s->ttl = strtol(buf, NULL, 10);
+ }
+ if (find_info_tag(buf, sizeof(buf), "localport", p)) {
+ s->local_port = strtol(buf, NULL, 10);
+ }
+ if (find_info_tag(buf, sizeof(buf), "pkt_size", p)) {
+ h->max_packet_size = strtol(buf, NULL, 10);
+ }
+ }
+
+ /* fill the dest addr */
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
+
+ /* XXX: fix url_split */
+ if (hostname[0] == '\0' || hostname[0] == '?') {
+ /* only accepts null hostname if input */
+ if (s->is_multicast || (flags & URL_WRONLY))
+ goto fail;
+ } else {
+ udp_set_remote_url(h, uri);
+ }
+
+#ifndef CONFIG_IPV6
+ udp_fd = socket(PF_INET, SOCK_DGRAM, 0);
+ if (udp_fd < 0)
+ goto fail;
+
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_addr.s_addr = htonl (INADDR_ANY);
+ if (s->is_multicast && !(h->flags & URL_WRONLY)) {
+ /* special case: the bind must be done on the multicast address port */
+ my_addr.sin_port = s->dest_addr.sin_port;
+ } else {
+ my_addr.sin_port = htons(s->local_port);
+ }
+
+ if (s->reuse_socket)
+ if (setsockopt (udp_fd, SOL_SOCKET, SO_REUSEADDR, &(s->reuse_socket), sizeof(s->reuse_socket)) != 0)
+ goto fail;
+
+ /* the bind is needed to give a port to the socket now */
+ if (bind(udp_fd,(struct sockaddr *)&my_addr, sizeof(my_addr)) < 0)
+ goto fail;
+
+ len = sizeof(my_addr1);
+ getsockname(udp_fd, (struct sockaddr *)&my_addr1, &len);
+ s->local_port = ntohs(my_addr1.sin_port);
+
+#ifndef CONFIG_BEOS_NETSERVER
+ if (s->is_multicast) {
+ if (h->flags & URL_WRONLY) {
+ /* output */
+ if (setsockopt(udp_fd, IPPROTO_IP, IP_MULTICAST_TTL,
+ &s->ttl, sizeof(s->ttl)) < 0) {
+ perror("IP_MULTICAST_TTL");
+ goto fail;
+ }
+ } else {
+ /* input */
+ memset(&s->mreq, 0, sizeof(s->mreq));
+ s->mreq.imr_multiaddr = s->dest_addr.sin_addr;
+ s->mreq.imr_interface.s_addr = htonl (INADDR_ANY);
+ if (setsockopt(udp_fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
+ &s->mreq, sizeof(s->mreq)) < 0) {
+ perror("rtp: IP_ADD_MEMBERSHIP");
+ goto fail;
+ }
+ }
+ }
+#endif
+#else
+ if (s->is_multicast && !(h->flags & URL_WRONLY))
+ s->local_port = port;
+ udp_fd = udp_ipv6_set_local(h);
+ if (udp_fd < 0)
+ goto fail;
+#ifndef CONFIG_BEOS_NETSERVER
+ if (s->is_multicast) {
+ if (h->flags & URL_WRONLY) {
+ if (udp_ipv6_set_multicast_ttl(udp_fd, s->ttl, (struct sockaddr *)&s->dest_addr) < 0)
+ goto fail;
+ } else {
+ if (udp_ipv6_join_multicast_group(udp_fd, (struct sockaddr *)&s->dest_addr) < 0)
+ goto fail;
+ }
+ }
+#endif
+#endif
+
+ if (is_output) {
+ /* limit the tx buf size to limit latency */
+ tmp = UDP_TX_BUF_SIZE;
+ if (setsockopt(udp_fd, SOL_SOCKET, SO_SNDBUF, &tmp, sizeof(tmp)) < 0) {
+ perror("setsockopt sndbuf");
+ goto fail;
+ }
+ }
+
+ s->udp_fd = udp_fd;
+ return 0;
+ fail:
+ if (udp_fd >= 0)
+#ifdef CONFIG_BEOS_NETSERVER
+ closesocket(udp_fd);
+#else
+ close(udp_fd);
+#endif
+ av_free(s);
+ return AVERROR_IO;
+}
+
+static int udp_read(URLContext *h, uint8_t *buf, int size)
+{
+ UDPContext *s = h->priv_data;
+#ifndef CONFIG_IPV6
+ struct sockaddr_in from;
+#else
+ struct sockaddr_storage from;
+#endif
+ socklen_t from_len;
+ int len;
+
+ for(;;) {
+ from_len = sizeof(from);
+ len = recvfrom (s->udp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR)
+ return AVERROR_IO;
+ } else {
+ break;
+ }
+ }
+ return len;
+}
+
+static int udp_write(URLContext *h, uint8_t *buf, int size)
+{
+ UDPContext *s = h->priv_data;
+ int ret;
+
+ for(;;) {
+ ret = sendto (s->udp_fd, buf, size, 0,
+ (struct sockaddr *) &s->dest_addr,
+#ifndef CONFIG_IPV6
+ sizeof (s->dest_addr));
+#else
+ s->dest_addr_len);
+#endif
+ if (ret < 0) {
+ if (errno != EINTR && errno != EAGAIN)
+ return AVERROR_IO;
+ } else {
+ break;
+ }
+ }
+ return size;
+}
+
+static int udp_close(URLContext *h)
+{
+ UDPContext *s = h->priv_data;
+
+#ifndef CONFIG_BEOS_NETSERVER
+#ifndef CONFIG_IPV6
+ if (s->is_multicast && !(h->flags & URL_WRONLY)) {
+ if (setsockopt(s->udp_fd, IPPROTO_IP, IP_DROP_MEMBERSHIP,
+ &s->mreq, sizeof(s->mreq)) < 0) {
+ perror("IP_DROP_MEMBERSHIP");
+ }
+ }
+#else
+ if (s->is_multicast && !(h->flags & URL_WRONLY))
+ udp_ipv6_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
+#endif
+ close(s->udp_fd);
+#else
+ closesocket(s->udp_fd);
+#endif
+ av_free(s);
+ return 0;
+}
+
+URLProtocol udp_protocol = {
+ "udp",
+ udp_open,
+ udp_read,
+ udp_write,
+ NULL, /* seek */
+ udp_close,
+};
diff --git a/contrib/ffmpeg/libavformat/utils.c b/contrib/ffmpeg/libavformat/utils.c
new file mode 100644
index 000000000..eaeeb7c16
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/utils.c
@@ -0,0 +1,3108 @@
+/*
+ * Various utilities for ffmpeg system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "opt.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+/**
+ * @file libavformat/utils.c
+ * Various utility functions for using ffmpeg library.
+ */
+
+static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
+static void av_frac_add(AVFrac *f, int64_t incr);
+static void av_frac_set(AVFrac *f, int64_t val);
+
+/** head of registered input format linked list. */
+AVInputFormat *first_iformat = NULL;
+/** head of registered output format linked list. */
+AVOutputFormat *first_oformat = NULL;
+
+void av_register_input_format(AVInputFormat *format)
+{
+ AVInputFormat **p;
+ p = &first_iformat;
+ while (*p != NULL) p = &(*p)->next;
+ *p = format;
+ format->next = NULL;
+}
+
+void av_register_output_format(AVOutputFormat *format)
+{
+ AVOutputFormat **p;
+ p = &first_oformat;
+ while (*p != NULL) p = &(*p)->next;
+ *p = format;
+ format->next = NULL;
+}
+
+int match_ext(const char *filename, const char *extensions)
+{
+ const char *ext, *p;
+ char ext1[32], *q;
+
+ if(!filename)
+ return 0;
+
+ ext = strrchr(filename, '.');
+ if (ext) {
+ ext++;
+ p = extensions;
+ for(;;) {
+ q = ext1;
+ while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
+ *q++ = *p++;
+ *q = '\0';
+ if (!strcasecmp(ext1, ext))
+ return 1;
+ if (*p == '\0')
+ break;
+ p++;
+ }
+ }
+ return 0;
+}
+
+AVOutputFormat *guess_format(const char *short_name, const char *filename,
+ const char *mime_type)
+{
+ AVOutputFormat *fmt, *fmt_found;
+ int score_max, score;
+
+ /* specific test for image sequences */
+#ifdef CONFIG_IMAGE2_MUXER
+ if (!short_name && filename &&
+ av_filename_number_test(filename) &&
+ av_guess_image2_codec(filename) != CODEC_ID_NONE) {
+ return guess_format("image2", NULL, NULL);
+ }
+#endif
+ /* find the proper file type */
+ fmt_found = NULL;
+ score_max = 0;
+ fmt = first_oformat;
+ while (fmt != NULL) {
+ score = 0;
+ if (fmt->name && short_name && !strcmp(fmt->name, short_name))
+ score += 100;
+ if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
+ score += 10;
+ if (filename && fmt->extensions &&
+ match_ext(filename, fmt->extensions)) {
+ score += 5;
+ }
+ if (score > score_max) {
+ score_max = score;
+ fmt_found = fmt;
+ }
+ fmt = fmt->next;
+ }
+ return fmt_found;
+}
+
+AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
+ const char *mime_type)
+{
+ AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
+
+ if (fmt) {
+ AVOutputFormat *stream_fmt;
+ char stream_format_name[64];
+
+ snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
+ stream_fmt = guess_format(stream_format_name, NULL, NULL);
+
+ if (stream_fmt)
+ fmt = stream_fmt;
+ }
+
+ return fmt;
+}
+
+/**
+ * Guesses the codec id based upon muxer and filename.
+ */
+enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+ const char *filename, const char *mime_type, enum CodecType type){
+ if(type == CODEC_TYPE_VIDEO){
+ enum CodecID codec_id= CODEC_ID_NONE;
+
+#ifdef CONFIG_IMAGE2_MUXER
+ if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
+ codec_id= av_guess_image2_codec(filename);
+ }
+#endif
+ if(codec_id == CODEC_ID_NONE)
+ codec_id= fmt->video_codec;
+ return codec_id;
+ }else if(type == CODEC_TYPE_AUDIO)
+ return fmt->audio_codec;
+ else
+ return CODEC_ID_NONE;
+}
+
+/**
+ * finds AVInputFormat based on input format's short name.
+ */
+AVInputFormat *av_find_input_format(const char *short_name)
+{
+ AVInputFormat *fmt;
+ for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
+ if (!strcmp(fmt->name, short_name))
+ return fmt;
+ }
+ return NULL;
+}
+
+/* memory handling */
+
+/**
+ * Default packet destructor.
+ */
+void av_destruct_packet(AVPacket *pkt)
+{
+ av_free(pkt->data);
+ pkt->data = NULL; pkt->size = 0;
+}
+
+/**
+ * Allocate the payload of a packet and intialized its fields to default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK. AVERROR_xxx otherwise.
+ */
+int av_new_packet(AVPacket *pkt, int size)
+{
+ uint8_t *data;
+ if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR_NOMEM;
+ data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!data)
+ return AVERROR_NOMEM;
+ memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ av_init_packet(pkt);
+ pkt->data = data;
+ pkt->size = size;
+ pkt->destruct = av_destruct_packet;
+ return 0;
+}
+
+/**
+ * Allocate and read the payload of a packet and intialized its fields to default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return >0 (read size) if OK. AVERROR_xxx otherwise.
+ */
+int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
+{
+ int ret= av_new_packet(pkt, size);
+
+ if(ret<0)
+ return ret;
+
+ pkt->pos= url_ftell(s);
+
+ ret= get_buffer(s, pkt->data, size);
+ if(ret<=0)
+ av_free_packet(pkt);
+ else
+ pkt->size= ret;
+
+ return ret;
+}
+
+/* This is a hack - the packet memory allocation stuff is broken. The
+ packet is allocated if it was not really allocated */
+int av_dup_packet(AVPacket *pkt)
+{
+ if (pkt->destruct != av_destruct_packet) {
+ uint8_t *data;
+ /* we duplicate the packet and don't forget to put the padding
+ again */
+ if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR_NOMEM;
+ data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!data) {
+ return AVERROR_NOMEM;
+ }
+ memcpy(data, pkt->data, pkt->size);
+ memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ pkt->data = data;
+ pkt->destruct = av_destruct_packet;
+ }
+ return 0;
+}
+
+/**
+ * Allocate the payload of a packet and intialized its fields to default values.
+ *
+ * @param filename possible numbered sequence string
+ * @return 1 if a valid numbered sequence string, 0 otherwise.
+ */
+int av_filename_number_test(const char *filename)
+{
+ char buf[1024];
+ return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
+}
+
+/**
+ * Guess file format.
+ */
+AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
+{
+ AVInputFormat *fmt1, *fmt;
+ int score, score_max;
+
+ fmt = NULL;
+ score_max = 0;
+ for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
+ if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
+ continue;
+ score = 0;
+ if (fmt1->read_probe) {
+ score = fmt1->read_probe(pd);
+ } else if (fmt1->extensions) {
+ if (match_ext(pd->filename, fmt1->extensions)) {
+ score = 50;
+ }
+ }
+ if (score > score_max) {
+ score_max = score;
+ fmt = fmt1;
+ }
+ }
+ return fmt;
+}
+
+/************************************************************/
+/* input media file */
+
+/**
+ * Open a media file from an IO stream. 'fmt' must be specified.
+ */
+static const char* format_to_name(void* ptr)
+{
+ AVFormatContext* fc = (AVFormatContext*) ptr;
+ if(fc->iformat) return fc->iformat->name;
+ else if(fc->oformat) return fc->oformat->name;
+ else return "NULL";
+}
+
+#define OFFSET(x) offsetof(AVFormatContext,x)
+#define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
+//these names are too long to be readable
+#define E AV_OPT_FLAG_ENCODING_PARAM
+#define D AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[]={
+{"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
+{"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
+{"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
+{"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
+{"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
+{"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
+{"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
+{"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
+{NULL},
+};
+
+#undef E
+#undef D
+#undef DEFAULT
+
+static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
+
+#if LIBAVFORMAT_VERSION_INT >= ((51<<16)+(0<<8)+0)
+static
+#endif
+void avformat_get_context_defaults(AVFormatContext *s){
+ memset(s, 0, sizeof(AVFormatContext));
+
+ s->av_class = &av_format_context_class;
+
+ av_opt_set_defaults(s);
+}
+
+AVFormatContext *av_alloc_format_context(void)
+{
+ AVFormatContext *ic;
+ ic = av_malloc(sizeof(AVFormatContext));
+ if (!ic) return ic;
+ avformat_get_context_defaults(ic);
+ ic->av_class = &av_format_context_class;
+ return ic;
+}
+
+/**
+ * Allocates all the structures needed to read an input stream.
+ * This does not open the needed codecs for decoding the stream[s].
+ */
+int av_open_input_stream(AVFormatContext **ic_ptr,
+ ByteIOContext *pb, const char *filename,
+ AVInputFormat *fmt, AVFormatParameters *ap)
+{
+ int err;
+ AVFormatContext *ic;
+ AVFormatParameters default_ap;
+
+ if(!ap){
+ ap=&default_ap;
+ memset(ap, 0, sizeof(default_ap));
+ }
+
+ if(!ap->prealloced_context)
+ ic = av_alloc_format_context();
+ else
+ ic = *ic_ptr;
+ if (!ic) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ }
+ ic->iformat = fmt;
+ if (pb)
+ ic->pb = *pb;
+ ic->duration = AV_NOPTS_VALUE;
+ ic->start_time = AV_NOPTS_VALUE;
+ pstrcpy(ic->filename, sizeof(ic->filename), filename);
+
+ /* allocate private data */
+ if (fmt->priv_data_size > 0) {
+ ic->priv_data = av_mallocz(fmt->priv_data_size);
+ if (!ic->priv_data) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ }
+ } else {
+ ic->priv_data = NULL;
+ }
+
+ err = ic->iformat->read_header(ic, ap);
+ if (err < 0)
+ goto fail;
+
+ if (pb && !ic->data_offset)
+ ic->data_offset = url_ftell(&ic->pb);
+
+ *ic_ptr = ic;
+ return 0;
+ fail:
+ if (ic) {
+ av_freep(&ic->priv_data);
+ }
+ av_free(ic);
+ *ic_ptr = NULL;
+ return err;
+}
+
+/** Size of probe buffer, for guessing file type from file contents. */
+#define PROBE_BUF_MIN 2048
+#define PROBE_BUF_MAX (1<<20)
+
+/**
+ * Open a media file as input. The codec are not opened. Only the file
+ * header (if present) is read.
+ *
+ * @param ic_ptr the opened media file handle is put here
+ * @param filename filename to open.
+ * @param fmt if non NULL, force the file format to use
+ * @param buf_size optional buffer size (zero if default is OK)
+ * @param ap additionnal parameters needed when opening the file (NULL if default)
+ * @return 0 if OK. AVERROR_xxx otherwise.
+ */
+int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
+ AVInputFormat *fmt,
+ int buf_size,
+ AVFormatParameters *ap)
+{
+ int err, must_open_file, file_opened, probe_size;
+ AVProbeData probe_data, *pd = &probe_data;
+ ByteIOContext pb1, *pb = &pb1;
+
+ file_opened = 0;
+ pd->filename = "";
+ if (filename)
+ pd->filename = filename;
+ pd->buf = NULL;
+ pd->buf_size = 0;
+
+ if (!fmt) {
+ /* guess format if no file can be opened */
+ fmt = av_probe_input_format(pd, 0);
+ }
+
+ /* do not open file if the format does not need it. XXX: specific
+ hack needed to handle RTSP/TCP */
+ must_open_file = 1;
+ if (fmt && (fmt->flags & AVFMT_NOFILE)) {
+ must_open_file = 0;
+ pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
+ }
+
+ if (!fmt || must_open_file) {
+ /* if no file needed do not try to open one */
+ if (url_fopen(pb, filename, URL_RDONLY) < 0) {
+ err = AVERROR_IO;
+ goto fail;
+ }
+ file_opened = 1;
+ if (buf_size > 0) {
+ url_setbufsize(pb, buf_size);
+ }
+
+ for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
+ /* read probe data */
+ pd->buf= av_realloc(pd->buf, probe_size);
+ pd->buf_size = get_buffer(pb, pd->buf, probe_size);
+ if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
+ url_fclose(pb);
+ if (url_fopen(pb, filename, URL_RDONLY) < 0) {
+ file_opened = 0;
+ err = AVERROR_IO;
+ goto fail;
+ }
+ }
+ /* guess file format */
+ fmt = av_probe_input_format(pd, 1);
+ }
+ av_freep(&pd->buf);
+ }
+
+ /* if still no format found, error */
+ if (!fmt) {
+ err = AVERROR_NOFMT;
+ goto fail;
+ }
+
+ /* XXX: suppress this hack for redirectors */
+#ifdef CONFIG_NETWORK
+ if (fmt == &redir_demuxer) {
+ err = redir_open(ic_ptr, pb);
+ url_fclose(pb);
+ return err;
+ }
+#endif
+
+ /* check filename in case of an image number is expected */
+ if (fmt->flags & AVFMT_NEEDNUMBER) {
+ if (!av_filename_number_test(filename)) {
+ err = AVERROR_NUMEXPECTED;
+ goto fail;
+ }
+ }
+ err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
+ if (err)
+ goto fail;
+ return 0;
+ fail:
+ av_freep(&pd->buf);
+ if (file_opened)
+ url_fclose(pb);
+ *ic_ptr = NULL;
+ return err;
+
+}
+
+/*******************************************************/
+
+/**
+ * Read a transport packet from a media file.
+ *
+ * This function is absolete and should never be used.
+ * Use av_read_frame() instead.
+ *
+ * @param s media file handle
+ * @param pkt is filled
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
+int av_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ return s->iformat->read_packet(s, pkt);
+}
+
+/**********************************************************/
+
+/**
+ * Get the number of samples of an audio frame. Return (-1) if error.
+ */
+static int get_audio_frame_size(AVCodecContext *enc, int size)
+{
+ int frame_size;
+
+ if (enc->frame_size <= 1) {
+ int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
+
+ if (bits_per_sample) {
+ if (enc->channels == 0)
+ return -1;
+ frame_size = (size << 3) / (bits_per_sample * enc->channels);
+ } else {
+ /* used for example by ADPCM codecs */
+ if (enc->bit_rate == 0)
+ return -1;
+ frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
+ }
+ } else {
+ frame_size = enc->frame_size;
+ }
+ return frame_size;
+}
+
+
+/**
+ * Return the frame duration in seconds, return 0 if not available.
+ */
+static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
+ AVCodecParserContext *pc, AVPacket *pkt)
+{
+ int frame_size;
+
+ *pnum = 0;
+ *pden = 0;
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ if(st->time_base.num*1000LL > st->time_base.den){
+ *pnum = st->time_base.num;
+ *pden = st->time_base.den;
+ }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
+ *pnum = st->codec->time_base.num;
+ *pden = st->codec->time_base.den;
+ if (pc && pc->repeat_pict) {
+ *pden *= 2;
+ *pnum = (*pnum) * (2 + pc->repeat_pict);
+ }
+ }
+ break;
+ case CODEC_TYPE_AUDIO:
+ frame_size = get_audio_frame_size(st->codec, pkt->size);
+ if (frame_size < 0)
+ break;
+ *pnum = frame_size;
+ *pden = st->codec->sample_rate;
+ break;
+ default:
+ break;
+ }
+}
+
+static int is_intra_only(AVCodecContext *enc){
+ if(enc->codec_type == CODEC_TYPE_AUDIO){
+ return 1;
+ }else if(enc->codec_type == CODEC_TYPE_VIDEO){
+ switch(enc->codec_id){
+ case CODEC_ID_MJPEG:
+ case CODEC_ID_MJPEGB:
+ case CODEC_ID_LJPEG:
+ case CODEC_ID_RAWVIDEO:
+ case CODEC_ID_DVVIDEO:
+ case CODEC_ID_HUFFYUV:
+ case CODEC_ID_FFVHUFF:
+ case CODEC_ID_ASV1:
+ case CODEC_ID_ASV2:
+ case CODEC_ID_VCR1:
+ return 1;
+ default: break;
+ }
+ }
+ return 0;
+}
+
+static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
+ int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
+ int64_t delta= last_ts - mask/2;
+ return ((lsb - delta)&mask) + delta;
+}
+
+static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
+ AVCodecParserContext *pc, AVPacket *pkt)
+{
+ int num, den, presentation_delayed;
+ /* handle wrapping */
+ if(st->cur_dts != AV_NOPTS_VALUE){
+ if(pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
+ if(pkt->dts != AV_NOPTS_VALUE)
+ pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
+ }
+
+ if (pkt->duration == 0) {
+ compute_frame_duration(&num, &den, st, pc, pkt);
+ if (den && num) {
+ pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
+ }
+ }
+
+ if(is_intra_only(st->codec))
+ pkt->flags |= PKT_FLAG_KEY;
+
+ /* do we have a video B frame ? */
+ presentation_delayed = 0;
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ /* XXX: need has_b_frame, but cannot get it if the codec is
+ not initialized */
+ if (( st->codec->codec_id == CODEC_ID_H264
+ || st->codec->has_b_frames) &&
+ pc && pc->pict_type != FF_B_TYPE)
+ presentation_delayed = 1;
+ /* this may be redundant, but it shouldnt hurt */
+ if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
+ presentation_delayed = 1;
+ }
+
+ if(st->cur_dts == AV_NOPTS_VALUE){
+ if(presentation_delayed) st->cur_dts = -pkt->duration;
+ else st->cur_dts = 0;
+ }
+
+// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
+ /* interpolate PTS and DTS if they are not present */
+ if (presentation_delayed) {
+ /* DTS = decompression time stamp */
+ /* PTS = presentation time stamp */
+ if (pkt->dts == AV_NOPTS_VALUE) {
+ /* if we know the last pts, use it */
+ if(st->last_IP_pts != AV_NOPTS_VALUE)
+ st->cur_dts = pkt->dts = st->last_IP_pts;
+ else
+ pkt->dts = st->cur_dts;
+ } else {
+ st->cur_dts = pkt->dts;
+ }
+ /* this is tricky: the dts must be incremented by the duration
+ of the frame we are displaying, i.e. the last I or P frame */
+ if (st->last_IP_duration == 0)
+ st->cur_dts += pkt->duration;
+ else
+ st->cur_dts += st->last_IP_duration;
+ st->last_IP_duration = pkt->duration;
+ st->last_IP_pts= pkt->pts;
+ /* cannot compute PTS if not present (we can compute it only
+ by knowing the futur */
+ } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
+ if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
+ int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
+ int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
+ if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
+ pkt->pts += pkt->duration;
+// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
+ }
+ }
+
+ /* presentation is not delayed : PTS and DTS are the same */
+ if (pkt->pts == AV_NOPTS_VALUE) {
+ if (pkt->dts == AV_NOPTS_VALUE) {
+ pkt->pts = st->cur_dts;
+ pkt->dts = st->cur_dts;
+ }
+ else {
+ st->cur_dts = pkt->dts;
+ pkt->pts = pkt->dts;
+ }
+ } else {
+ st->cur_dts = pkt->pts;
+ pkt->dts = pkt->pts;
+ }
+ st->cur_dts += pkt->duration;
+ }
+// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
+
+ /* update flags */
+ if (pc) {
+ pkt->flags = 0;
+ /* key frame computation */
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ if (pc->pict_type == FF_I_TYPE)
+ pkt->flags |= PKT_FLAG_KEY;
+ break;
+ case CODEC_TYPE_AUDIO:
+ pkt->flags |= PKT_FLAG_KEY;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void av_destruct_packet_nofree(AVPacket *pkt)
+{
+ pkt->data = NULL; pkt->size = 0;
+}
+
+static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st;
+ int len, ret, i;
+
+ for(;;) {
+ /* select current input stream component */
+ st = s->cur_st;
+ if (st) {
+ if (!st->need_parsing || !st->parser) {
+ /* no parsing needed: we just output the packet as is */
+ /* raw data support */
+ *pkt = s->cur_pkt;
+ compute_pkt_fields(s, st, NULL, pkt);
+ s->cur_st = NULL;
+ break;
+ } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
+ len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
+ s->cur_ptr, s->cur_len,
+ s->cur_pkt.pts, s->cur_pkt.dts);
+ s->cur_pkt.pts = AV_NOPTS_VALUE;
+ s->cur_pkt.dts = AV_NOPTS_VALUE;
+ /* increment read pointer */
+ s->cur_ptr += len;
+ s->cur_len -= len;
+
+ /* return packet if any */
+ if (pkt->size) {
+ got_packet:
+ pkt->duration = 0;
+ pkt->stream_index = st->index;
+ pkt->pts = st->parser->pts;
+ pkt->dts = st->parser->dts;
+ pkt->destruct = av_destruct_packet_nofree;
+ compute_pkt_fields(s, st, st->parser, pkt);
+ break;
+ }
+ } else {
+ /* free packet */
+ av_free_packet(&s->cur_pkt);
+ s->cur_st = NULL;
+ }
+ } else {
+ /* read next packet */
+ ret = av_read_packet(s, &s->cur_pkt);
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ return ret;
+ /* return the last frames, if any */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (st->parser && st->need_parsing) {
+ av_parser_parse(st->parser, st->codec,
+ &pkt->data, &pkt->size,
+ NULL, 0,
+ AV_NOPTS_VALUE, AV_NOPTS_VALUE);
+ if (pkt->size)
+ goto got_packet;
+ }
+ }
+ /* no more packets: really terminates parsing */
+ return ret;
+ }
+
+ st = s->streams[s->cur_pkt.stream_index];
+ if(st->codec->debug & FF_DEBUG_PTS)
+ av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+ s->cur_pkt.stream_index,
+ s->cur_pkt.pts,
+ s->cur_pkt.dts,
+ s->cur_pkt.size);
+
+ s->cur_st = st;
+ s->cur_ptr = s->cur_pkt.data;
+ s->cur_len = s->cur_pkt.size;
+ if (st->need_parsing && !st->parser) {
+ st->parser = av_parser_init(st->codec->codec_id);
+ if (!st->parser) {
+ /* no parser available : just output the raw packets */
+ st->need_parsing = 0;
+ }else if(st->need_parsing == 2){
+ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+ }
+ }
+ }
+ }
+ if(st->codec->debug & FF_DEBUG_PTS)
+ av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+ pkt->stream_index,
+ pkt->pts,
+ pkt->dts,
+ pkt->size);
+
+ return 0;
+}
+
+/**
+ * Return the next frame of a stream.
+ *
+ * The returned packet is valid
+ * until the next av_read_frame() or until av_close_input_file() and
+ * must be freed with av_free_packet. For video, the packet contains
+ * exactly one frame. For audio, it contains an integer number of
+ * frames if each frame has a known fixed size (e.g. PCM or ADPCM
+ * data). If the audio frames have a variable size (e.g. MPEG audio),
+ * then it contains one frame.
+ *
+ * pkt->pts, pkt->dts and pkt->duration are always set to correct
+ * values in AV_TIME_BASE unit (and guessed if the format cannot
+ * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
+ * has B frames, so it is better to rely on pkt->dts if you do not
+ * decompress the payload.
+ *
+ * @return 0 if OK, < 0 if error or end of file.
+ */
+int av_read_frame(AVFormatContext *s, AVPacket *pkt)
+{
+ AVPacketList *pktl;
+ int eof=0;
+ const int genpts= s->flags & AVFMT_FLAG_GENPTS;
+
+ for(;;){
+ pktl = s->packet_buffer;
+ if (pktl) {
+ AVPacket *next_pkt= &pktl->pkt;
+
+ if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
+ while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
+ if( pktl->pkt.stream_index == next_pkt->stream_index
+ && next_pkt->dts < pktl->pkt.dts
+ && pktl->pkt.pts != pktl->pkt.dts //not b frame
+ /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
+ next_pkt->pts= pktl->pkt.dts;
+ }
+ pktl= pktl->next;
+ }
+ pktl = s->packet_buffer;
+ }
+
+ if( next_pkt->pts != AV_NOPTS_VALUE
+ || next_pkt->dts == AV_NOPTS_VALUE
+ || !genpts || eof){
+ /* read packet from packet buffer, if there is data */
+ *pkt = *next_pkt;
+ s->packet_buffer = pktl->next;
+ av_free(pktl);
+ return 0;
+ }
+ }
+ if(genpts){
+ AVPacketList **plast_pktl= &s->packet_buffer;
+ int ret= av_read_frame_internal(s, pkt);
+ if(ret<0){
+ if(pktl && ret != -EAGAIN){
+ eof=1;
+ continue;
+ }else
+ return ret;
+ }
+
+ /* duplicate the packet */
+ if (av_dup_packet(pkt) < 0)
+ return AVERROR_NOMEM;
+
+ while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
+
+ pktl = av_mallocz(sizeof(AVPacketList));
+ if (!pktl)
+ return AVERROR_NOMEM;
+
+ /* add the packet in the buffered packet list */
+ *plast_pktl = pktl;
+ pktl->pkt= *pkt;
+ }else{
+ assert(!s->packet_buffer);
+ return av_read_frame_internal(s, pkt);
+ }
+ }
+}
+
+/* XXX: suppress the packet queue */
+static void flush_packet_queue(AVFormatContext *s)
+{
+ AVPacketList *pktl;
+
+ for(;;) {
+ pktl = s->packet_buffer;
+ if (!pktl)
+ break;
+ s->packet_buffer = pktl->next;
+ av_free_packet(&pktl->pkt);
+ av_free(pktl);
+ }
+}
+
+/*******************************************************/
+/* seek support */
+
+int av_find_default_stream_index(AVFormatContext *s)
+{
+ int i;
+ AVStream *st;
+
+ if (s->nb_streams <= 0)
+ return -1;
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ return i;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Flush the frame reader.
+ */
+static void av_read_frame_flush(AVFormatContext *s)
+{
+ AVStream *st;
+ int i;
+
+ flush_packet_queue(s);
+
+ /* free previous packet */
+ if (s->cur_st) {
+ if (s->cur_st->parser)
+ av_free_packet(&s->cur_pkt);
+ s->cur_st = NULL;
+ }
+ /* fail safe */
+ s->cur_ptr = NULL;
+ s->cur_len = 0;
+
+ /* for each stream, reset read state */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+
+ if (st->parser) {
+ av_parser_close(st->parser);
+ st->parser = NULL;
+ }
+ st->last_IP_pts = AV_NOPTS_VALUE;
+ st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
+ }
+}
+
+/**
+ * Updates cur_dts of all streams based on given timestamp and AVStream.
+ *
+ * Stream ref_st unchanged, others set cur_dts in their native timebase
+ * only needed for timestamp wrapping or if (dts not set and pts!=dts)
+ * @param timestamp new dts expressed in time_base of param ref_st
+ * @param ref_st reference stream giving time_base of param timestamp
+ */
+void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
+ int i;
+
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+
+ st->cur_dts = av_rescale(timestamp,
+ st->time_base.den * (int64_t)ref_st->time_base.num,
+ st->time_base.num * (int64_t)ref_st->time_base.den);
+ }
+}
+
+/**
+ * Add a index entry into a sorted list updateing if it is already there.
+ *
+ * @param timestamp timestamp in the timebase of the given stream
+ */
+int av_add_index_entry(AVStream *st,
+ int64_t pos, int64_t timestamp, int size, int distance, int flags)
+{
+ AVIndexEntry *entries, *ie;
+ int index;
+
+ if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
+ return -1;
+
+ entries = av_fast_realloc(st->index_entries,
+ &st->index_entries_allocated_size,
+ (st->nb_index_entries + 1) *
+ sizeof(AVIndexEntry));
+ if(!entries)
+ return -1;
+
+ st->index_entries= entries;
+
+ index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
+
+ if(index<0){
+ index= st->nb_index_entries++;
+ ie= &entries[index];
+ assert(index==0 || ie[-1].timestamp < timestamp);
+ }else{
+ ie= &entries[index];
+ if(ie->timestamp != timestamp){
+ if(ie->timestamp <= timestamp)
+ return -1;
+ memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
+ st->nb_index_entries++;
+ }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
+ distance= ie->min_distance;
+ }
+
+ ie->pos = pos;
+ ie->timestamp = timestamp;
+ ie->min_distance= distance;
+ ie->size= size;
+ ie->flags = flags;
+
+ return index;
+}
+
+/**
+ * build an index for raw streams using a parser.
+ */
+static void av_build_index_raw(AVFormatContext *s)
+{
+ AVPacket pkt1, *pkt = &pkt1;
+ int ret;
+ AVStream *st;
+
+ st = s->streams[0];
+ av_read_frame_flush(s);
+ url_fseek(&s->pb, s->data_offset, SEEK_SET);
+
+ for(;;) {
+ ret = av_read_frame(s, pkt);
+ if (ret < 0)
+ break;
+ if (pkt->stream_index == 0 && st->parser &&
+ (pkt->flags & PKT_FLAG_KEY)) {
+ av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
+ 0, 0, AVINDEX_KEYFRAME);
+ }
+ av_free_packet(pkt);
+ }
+}
+
+/**
+ * Returns TRUE if we deal with a raw stream.
+ *
+ * Raw codec data and parsing needed.
+ */
+static int is_raw_stream(AVFormatContext *s)
+{
+ AVStream *st;
+
+ if (s->nb_streams != 1)
+ return 0;
+ st = s->streams[0];
+ if (!st->need_parsing)
+ return 0;
+ return 1;
+}
+
+/**
+ * Gets the index for a specific timestamp.
+ * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
+ * the timestamp which is <= the requested one, if backward is 0
+ * then it will be >=
+ * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
+ * @return < 0 if no such timestamp could be found
+ */
+int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
+ int flags)
+{
+ AVIndexEntry *entries= st->index_entries;
+ int nb_entries= st->nb_index_entries;
+ int a, b, m;
+ int64_t timestamp;
+
+ a = - 1;
+ b = nb_entries;
+
+ while (b - a > 1) {
+ m = (a + b) >> 1;
+ timestamp = entries[m].timestamp;
+ if(timestamp >= wanted_timestamp)
+ b = m;
+ if(timestamp <= wanted_timestamp)
+ a = m;
+ }
+ m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
+
+ if(!(flags & AVSEEK_FLAG_ANY)){
+ while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
+ m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
+ }
+ }
+
+ if(m == nb_entries)
+ return -1;
+ return m;
+}
+
+#define DEBUG_SEEK
+
+/**
+ * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
+ * this isnt supposed to be called directly by a user application, but by demuxers
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
+int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+ AVInputFormat *avif= s->iformat;
+ int64_t pos_min, pos_max, pos, pos_limit;
+ int64_t ts_min, ts_max, ts;
+ int index;
+ AVStream *st;
+
+ if (stream_index < 0)
+ return -1;
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
+#endif
+
+ ts_max=
+ ts_min= AV_NOPTS_VALUE;
+ pos_limit= -1; //gcc falsely says it may be uninitalized
+
+ st= s->streams[stream_index];
+ if(st->index_entries){
+ AVIndexEntry *e;
+
+ index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
+ index= FFMAX(index, 0);
+ e= &st->index_entries[index];
+
+ if(e->timestamp <= target_ts || e->pos == e->min_distance){
+ pos_min= e->pos;
+ ts_min= e->timestamp;
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+ pos_min,ts_min);
+#endif
+ }else{
+ assert(index==0);
+ }
+
+ index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
+ assert(index < st->nb_index_entries);
+ if(index >= 0){
+ e= &st->index_entries[index];
+ assert(e->timestamp >= target_ts);
+ pos_max= e->pos;
+ ts_max= e->timestamp;
+ pos_limit= pos_max - e->min_distance;
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+ pos_max,pos_limit, ts_max);
+#endif
+ }
+ }
+
+ pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
+ if(pos<0)
+ return -1;
+
+ /* do the seek */
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+ av_update_cur_dts(s, st, ts);
+
+ return 0;
+}
+
+/**
+ * Does a binary search using read_timestamp().
+ * this isnt supposed to be called directly by a user application, but by demuxers
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
+int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
+ int64_t pos, ts;
+ int64_t start_pos, filesize;
+ int no_change;
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
+#endif
+
+ if(ts_min == AV_NOPTS_VALUE){
+ pos_min = s->data_offset;
+ ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ if (ts_min == AV_NOPTS_VALUE)
+ return -1;
+ }
+
+ if(ts_max == AV_NOPTS_VALUE){
+ int step= 1024;
+ filesize = url_fsize(&s->pb);
+ pos_max = filesize - 1;
+ do{
+ pos_max -= step;
+ ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
+ step += step;
+ }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
+ if (ts_max == AV_NOPTS_VALUE)
+ return -1;
+
+ for(;;){
+ int64_t tmp_pos= pos_max + 1;
+ int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
+ if(tmp_ts == AV_NOPTS_VALUE)
+ break;
+ ts_max= tmp_ts;
+ pos_max= tmp_pos;
+ if(tmp_pos >= filesize)
+ break;
+ }
+ pos_limit= pos_max;
+ }
+
+ if(ts_min > ts_max){
+ return -1;
+ }else if(ts_min == ts_max){
+ pos_limit= pos_min;
+ }
+
+ no_change=0;
+ while (pos_min < pos_limit) {
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
+ pos_min, pos_max,
+ ts_min, ts_max);
+#endif
+ assert(pos_limit <= pos_max);
+
+ if(no_change==0){
+ int64_t approximate_keyframe_distance= pos_max - pos_limit;
+ // interpolate position (better than dichotomy)
+ pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
+ + pos_min - approximate_keyframe_distance;
+ }else if(no_change==1){
+ // bisection, if interpolation failed to change min or max pos last time
+ pos = (pos_min + pos_limit)>>1;
+ }else{
+ // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
+ pos=pos_min;
+ }
+ if(pos <= pos_min)
+ pos= pos_min + 1;
+ else if(pos > pos_limit)
+ pos= pos_limit;
+ start_pos= pos;
+
+ ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
+ if(pos == pos_max)
+ no_change++;
+ else
+ no_change=0;
+#ifdef DEBUG_SEEK
+av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+#endif
+ assert(ts != AV_NOPTS_VALUE);
+ if (target_ts <= ts) {
+ pos_limit = start_pos - 1;
+ pos_max = pos;
+ ts_max = ts;
+ }
+ if (target_ts >= ts) {
+ pos_min = pos;
+ ts_min = ts;
+ }
+ }
+
+ pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
+ ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
+#ifdef DEBUG_SEEK
+ pos_min = pos;
+ ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ pos_min++;
+ ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
+ pos, ts_min, target_ts, ts_max);
+#endif
+ *ts_ret= ts;
+ return pos;
+}
+
+static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
+ int64_t pos_min, pos_max;
+#if 0
+ AVStream *st;
+
+ if (stream_index < 0)
+ return -1;
+
+ st= s->streams[stream_index];
+#endif
+
+ pos_min = s->data_offset;
+ pos_max = url_fsize(&s->pb) - 1;
+
+ if (pos < pos_min) pos= pos_min;
+ else if(pos > pos_max) pos= pos_max;
+
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+#if 0
+ av_update_cur_dts(s, st, ts);
+#endif
+ return 0;
+}
+
+static int av_seek_frame_generic(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ int index;
+ AVStream *st;
+ AVIndexEntry *ie;
+
+ if (!s->index_built) {
+ if (is_raw_stream(s)) {
+ av_build_index_raw(s);
+ } else {
+ return -1;
+ }
+ s->index_built = 1;
+ }
+
+ st = s->streams[stream_index];
+ index = av_index_search_timestamp(st, timestamp, flags);
+ if (index < 0)
+ return -1;
+
+ /* now we have found the index, we can seek */
+ ie = &st->index_entries[index];
+ av_read_frame_flush(s);
+ url_fseek(&s->pb, ie->pos, SEEK_SET);
+
+ av_update_cur_dts(s, st, ie->timestamp);
+
+ return 0;
+}
+
+/**
+ * Seek to the key frame at timestamp.
+ * 'timestamp' in 'stream_index'.
+ * @param stream_index If stream_index is (-1), a default
+ * stream is selected, and timestamp is automatically converted
+ * from AV_TIME_BASE units to the stream specific time_base.
+ * @param timestamp timestamp in AVStream.time_base units
+ * or if there is no stream specified then in AV_TIME_BASE units
+ * @param flags flags which select direction and seeking mode
+ * @return >= 0 on success
+ */
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ int ret;
+ AVStream *st;
+
+ av_read_frame_flush(s);
+
+ if(flags & AVSEEK_FLAG_BYTE)
+ return av_seek_frame_byte(s, stream_index, timestamp, flags);
+
+ if(stream_index < 0){
+ stream_index= av_find_default_stream_index(s);
+ if(stream_index < 0)
+ return -1;
+
+ st= s->streams[stream_index];
+ /* timestamp for default must be expressed in AV_TIME_BASE units */
+ timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
+ }
+ st= s->streams[stream_index];
+
+ /* first, we try the format specific seek */
+ if (s->iformat->read_seek)
+ ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
+ else
+ ret = -1;
+ if (ret >= 0) {
+ return 0;
+ }
+
+ if(s->iformat->read_timestamp)
+ return av_seek_frame_binary(s, stream_index, timestamp, flags);
+ else
+ return av_seek_frame_generic(s, stream_index, timestamp, flags);
+}
+
+/*******************************************************/
+
+/**
+ * Returns TRUE if the stream has accurate timings in any stream.
+ *
+ * @return TRUE if the stream has accurate timings for at least one component.
+ */
+static int av_has_timings(AVFormatContext *ic)
+{
+ int i;
+ AVStream *st;
+
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time != AV_NOPTS_VALUE &&
+ st->duration != AV_NOPTS_VALUE)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Estimate the stream timings from the one of each components.
+ *
+ * Also computes the global bitrate if possible.
+ */
+static void av_update_stream_timings(AVFormatContext *ic)
+{
+ int64_t start_time, start_time1, end_time, end_time1;
+ int i;
+ AVStream *st;
+
+ start_time = MAXINT64;
+ end_time = MININT64;
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time != AV_NOPTS_VALUE) {
+ start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+ if (start_time1 < start_time)
+ start_time = start_time1;
+ if (st->duration != AV_NOPTS_VALUE) {
+ end_time1 = start_time1
+ + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
+ if (end_time1 > end_time)
+ end_time = end_time1;
+ }
+ }
+ }
+ if (start_time != MAXINT64) {
+ ic->start_time = start_time;
+ if (end_time != MININT64) {
+ ic->duration = end_time - start_time;
+ if (ic->file_size > 0) {
+ /* compute the bit rate */
+ ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
+ (double)ic->duration;
+ }
+ }
+ }
+
+}
+
+static void fill_all_stream_timings(AVFormatContext *ic)
+{
+ int i;
+ AVStream *st;
+
+ av_update_stream_timings(ic);
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time == AV_NOPTS_VALUE) {
+ if(ic->start_time != AV_NOPTS_VALUE)
+ st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
+ if(ic->duration != AV_NOPTS_VALUE)
+ st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
+ }
+ }
+}
+
+static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
+{
+ int64_t filesize, duration;
+ int bit_rate, i;
+ AVStream *st;
+
+ /* if bit_rate is already set, we believe it */
+ if (ic->bit_rate == 0) {
+ bit_rate = 0;
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ bit_rate += st->codec->bit_rate;
+ }
+ ic->bit_rate = bit_rate;
+ }
+
+ /* if duration is already set, we believe it */
+ if (ic->duration == AV_NOPTS_VALUE &&
+ ic->bit_rate != 0 &&
+ ic->file_size != 0) {
+ filesize = ic->file_size;
+ if (filesize > 0) {
+ for(i = 0; i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
+ if (st->start_time == AV_NOPTS_VALUE ||
+ st->duration == AV_NOPTS_VALUE) {
+ st->start_time = 0;
+ st->duration = duration;
+ }
+ }
+ }
+ }
+}
+
+#define DURATION_MAX_READ_SIZE 250000
+
+/* only usable for MPEG-PS streams */
+static void av_estimate_timings_from_pts(AVFormatContext *ic)
+{
+ AVPacket pkt1, *pkt = &pkt1;
+ AVStream *st;
+ int read_size, i, ret;
+ int64_t end_time;
+ int64_t filesize, offset, duration;
+
+ /* free previous packet */
+ if (ic->cur_st && ic->cur_st->parser)
+ av_free_packet(&ic->cur_pkt);
+ ic->cur_st = NULL;
+
+ /* flush packet queue */
+ flush_packet_queue(ic);
+
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (st->parser) {
+ av_parser_close(st->parser);
+ st->parser= NULL;
+ }
+ }
+
+ /* we read the first packets to get the first PTS (not fully
+ accurate, but it is enough now) */
+ url_fseek(&ic->pb, 0, SEEK_SET);
+ read_size = 0;
+ for(;;) {
+ if (read_size >= DURATION_MAX_READ_SIZE)
+ break;
+ /* if all info is available, we can stop */
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time == AV_NOPTS_VALUE)
+ break;
+ }
+ if (i == ic->nb_streams)
+ break;
+
+ ret = av_read_packet(ic, pkt);
+ if (ret != 0)
+ break;
+ read_size += pkt->size;
+ st = ic->streams[pkt->stream_index];
+ if (pkt->pts != AV_NOPTS_VALUE) {
+ if (st->start_time == AV_NOPTS_VALUE)
+ st->start_time = pkt->pts;
+ }
+ av_free_packet(pkt);
+ }
+
+ /* estimate the end time (duration) */
+ /* XXX: may need to support wrapping */
+ filesize = ic->file_size;
+ offset = filesize - DURATION_MAX_READ_SIZE;
+ if (offset < 0)
+ offset = 0;
+
+ url_fseek(&ic->pb, offset, SEEK_SET);
+ read_size = 0;
+ for(;;) {
+ if (read_size >= DURATION_MAX_READ_SIZE)
+ break;
+ /* if all info is available, we can stop */
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->duration == AV_NOPTS_VALUE)
+ break;
+ }
+ if (i == ic->nb_streams)
+ break;
+
+ ret = av_read_packet(ic, pkt);
+ if (ret != 0)
+ break;
+ read_size += pkt->size;
+ st = ic->streams[pkt->stream_index];
+ if (pkt->pts != AV_NOPTS_VALUE) {
+ end_time = pkt->pts;
+ duration = end_time - st->start_time;
+ if (duration > 0) {
+ if (st->duration == AV_NOPTS_VALUE ||
+ st->duration < duration)
+ st->duration = duration;
+ }
+ }
+ av_free_packet(pkt);
+ }
+
+ fill_all_stream_timings(ic);
+
+ url_fseek(&ic->pb, 0, SEEK_SET);
+}
+
+static void av_estimate_timings(AVFormatContext *ic)
+{
+ int64_t file_size;
+
+ /* get the file size, if possible */
+ if (ic->iformat->flags & AVFMT_NOFILE) {
+ file_size = 0;
+ } else {
+ file_size = url_fsize(&ic->pb);
+ if (file_size < 0)
+ file_size = 0;
+ }
+ ic->file_size = file_size;
+
+ if ((!strcmp(ic->iformat->name, "mpeg") ||
+ !strcmp(ic->iformat->name, "mpegts")) &&
+ file_size && !ic->pb.is_streamed) {
+ /* get accurate estimate from the PTSes */
+ av_estimate_timings_from_pts(ic);
+ } else if (av_has_timings(ic)) {
+ /* at least one components has timings - we use them for all
+ the components */
+ fill_all_stream_timings(ic);
+ } else {
+ /* less precise: use bit rate info */
+ av_estimate_timings_from_bit_rate(ic);
+ }
+ av_update_stream_timings(ic);
+
+#if 0
+ {
+ int i;
+ AVStream *st;
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ printf("%d: start_time: %0.3f duration: %0.3f\n",
+ i, (double)st->start_time / AV_TIME_BASE,
+ (double)st->duration / AV_TIME_BASE);
+ }
+ printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
+ (double)ic->start_time / AV_TIME_BASE,
+ (double)ic->duration / AV_TIME_BASE,
+ ic->bit_rate / 1000);
+ }
+#endif
+}
+
+static int has_codec_parameters(AVCodecContext *enc)
+{
+ int val;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ val = enc->sample_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
+ break;
+ default:
+ val = 1;
+ break;
+ }
+ return (val != 0);
+}
+
+static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
+{
+ int16_t *samples;
+ AVCodec *codec;
+ int got_picture, ret=0;
+ AVFrame picture;
+
+ if(!st->codec->codec){
+ codec = avcodec_find_decoder(st->codec->codec_id);
+ if (!codec)
+ return -1;
+ ret = avcodec_open(st->codec, codec);
+ if (ret < 0)
+ return ret;
+ }
+
+ if(!has_codec_parameters(st->codec)){
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ ret = avcodec_decode_video(st->codec, &picture,
+ &got_picture, (uint8_t *)data, size);
+ break;
+ case CODEC_TYPE_AUDIO:
+ samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ if (!samples)
+ goto fail;
+ ret = avcodec_decode_audio(st->codec, samples,
+ &got_picture, (uint8_t *)data, size);
+ av_free(samples);
+ break;
+ default:
+ break;
+ }
+ }
+ fail:
+ return ret;
+}
+
+/* absolute maximum size we read until we abort */
+#define MAX_READ_SIZE 5000000
+
+/* maximum duration until we stop analysing the stream */
+#define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
+
+/**
+ * Read the beginning of a media file to get stream information. This
+ * is useful for file formats with no headers such as MPEG. This
+ * function also compute the real frame rate in case of mpeg2 repeat
+ * frame mode.
+ *
+ * @param ic media file handle
+ * @return >=0 if OK. AVERROR_xxx if error.
+ * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
+ */
+int av_find_stream_info(AVFormatContext *ic)
+{
+ int i, count, ret, read_size, j;
+ AVStream *st;
+ AVPacket pkt1, *pkt;
+ AVPacketList *pktl=NULL, **ppktl;
+ int64_t last_dts[MAX_STREAMS];
+ int64_t duration_sum[MAX_STREAMS];
+ int duration_count[MAX_STREAMS]={0};
+
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+/* if(!st->time_base.num)
+ st->time_base= */
+ if(!st->codec->time_base.num)
+ st->codec->time_base= st->time_base;
+ }
+ //only for the split stuff
+ if (!st->parser) {
+ st->parser = av_parser_init(st->codec->codec_id);
+ if(st->need_parsing == 2 && st->parser){
+ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+ }
+ }
+ }
+
+ for(i=0;i<MAX_STREAMS;i++){
+ last_dts[i]= AV_NOPTS_VALUE;
+ duration_sum[i]= INT64_MAX;
+ }
+
+ count = 0;
+ read_size = 0;
+ ppktl = &ic->packet_buffer;
+ for(;;) {
+ /* check if one codec still needs to be handled */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (!has_codec_parameters(st->codec))
+ break;
+ /* variable fps and no guess at the real fps */
+ if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
+ && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
+ break;
+ if(st->parser && st->parser->parser->split && !st->codec->extradata)
+ break;
+ }
+ if (i == ic->nb_streams) {
+ /* NOTE: if the format has no header, then we need to read
+ some packets to get most of the streams, so we cannot
+ stop here */
+ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
+ /* if we found the info for all the codecs, we can stop */
+ ret = count;
+ break;
+ }
+ }
+ /* we did not get all the codec info, but we read too much data */
+ if (read_size >= MAX_READ_SIZE) {
+ ret = count;
+ break;
+ }
+
+ /* NOTE: a new stream can be added there if no header in file
+ (AVFMTCTX_NOHEADER) */
+ ret = av_read_frame_internal(ic, &pkt1);
+ if (ret < 0) {
+ /* EOF or error */
+ ret = -1; /* we could not have all the codec parameters before EOF */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (!has_codec_parameters(st->codec)){
+ char buf[256];
+ avcodec_string(buf, sizeof(buf), st->codec, 0);
+ av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
+ } else {
+ ret = 0;
+ }
+ }
+ break;
+ }
+
+ pktl = av_mallocz(sizeof(AVPacketList));
+ if (!pktl) {
+ ret = AVERROR_NOMEM;
+ break;
+ }
+
+ /* add the packet in the buffered packet list */
+ *ppktl = pktl;
+ ppktl = &pktl->next;
+
+ pkt = &pktl->pkt;
+ *pkt = pkt1;
+
+ /* duplicate the packet */
+ if (av_dup_packet(pkt) < 0) {
+ ret = AVERROR_NOMEM;
+ break;
+ }
+
+ read_size += pkt->size;
+
+ st = ic->streams[pkt->stream_index];
+ st->codec_info_duration += pkt->duration;
+ if (pkt->duration != 0)
+ st->codec_info_nb_frames++;
+
+ {
+ int index= pkt->stream_index;
+ int64_t last= last_dts[index];
+ int64_t duration= pkt->dts - last;
+
+ if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
+ if(duration*duration_count[index]*10/9 < duration_sum[index]){
+ duration_sum[index]= duration;
+ duration_count[index]=1;
+ }else{
+ int factor= av_rescale(2*duration, duration_count[index], duration_sum[index]);
+ if(factor==3)
+ duration_count[index] *= 2;
+ factor= av_rescale(duration, duration_count[index], duration_sum[index]);
+ duration_sum[index] += duration;
+ duration_count[index]+= factor;
+ }
+ if(st->codec_info_nb_frames == 0 && 0)
+ st->codec_info_duration += duration;
+ }
+ last_dts[pkt->stream_index]= pkt->dts;
+ }
+ if(st->parser && st->parser->parser->split && !st->codec->extradata){
+ int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
+ if(i){
+ st->codec->extradata_size= i;
+ st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
+ memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ }
+ }
+
+ /* if still no information, we try to open the codec and to
+ decompress the frame. We try to avoid that in most cases as
+ it takes longer and uses more memory. For MPEG4, we need to
+ decompress for Quicktime. */
+ if (!has_codec_parameters(st->codec) /*&&
+ (st->codec->codec_id == CODEC_ID_FLV1 ||
+ st->codec->codec_id == CODEC_ID_H264 ||
+ st->codec->codec_id == CODEC_ID_H263 ||
+ st->codec->codec_id == CODEC_ID_H261 ||
+ st->codec->codec_id == CODEC_ID_VORBIS ||
+ st->codec->codec_id == CODEC_ID_MJPEG ||
+ st->codec->codec_id == CODEC_ID_PNG ||
+ st->codec->codec_id == CODEC_ID_PAM ||
+ st->codec->codec_id == CODEC_ID_PGM ||
+ st->codec->codec_id == CODEC_ID_PGMYUV ||
+ st->codec->codec_id == CODEC_ID_PBM ||
+ st->codec->codec_id == CODEC_ID_PPM ||
+ st->codec->codec_id == CODEC_ID_SHORTEN ||
+ (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
+ try_decode_frame(st, pkt->data, pkt->size);
+
+ if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
+ break;
+ }
+ count++;
+ }
+
+ // close codecs which where opened in try_decode_frame()
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if(st->codec->codec)
+ avcodec_close(st->codec);
+ }
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
+ st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
+
+ if(duration_count[i]
+ && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
+ //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
+ st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
+ int64_t num, den, error, best_error;
+
+ num= st->time_base.den*duration_count[i];
+ den= st->time_base.num*duration_sum[i];
+
+ best_error= INT64_MAX;
+ for(j=1; j<60*12; j++){
+ error= FFABS(1001*12*num - 1001*j*den);
+ if(error < best_error){
+ best_error= error;
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
+ }
+ }
+ for(j=0; j<3; j++){
+ static const int ticks[]= {24,30,60};
+ error= FFABS(1001*12*num - 1000*12*den * ticks[j]);
+ if(error < best_error){
+ best_error= error;
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, ticks[j]*1000, 1001, INT_MAX);
+ }
+ }
+ }
+
+ if (!st->r_frame_rate.num){
+ if( st->codec->time_base.den * (int64_t)st->time_base.num
+ <= st->codec->time_base.num * (int64_t)st->time_base.den){
+ st->r_frame_rate.num = st->codec->time_base.den;
+ st->r_frame_rate.den = st->codec->time_base.num;
+ }else{
+ st->r_frame_rate.num = st->time_base.den;
+ st->r_frame_rate.den = st->time_base.num;
+ }
+ }
+ }
+ }
+
+ av_estimate_timings(ic);
+#if 0
+ /* correct DTS for b frame streams with no timestamps */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(b-frames){
+ ppktl = &ic->packet_buffer;
+ while(ppkt1){
+ if(ppkt1->stream_index != i)
+ continue;
+ if(ppkt1->pkt->dts < 0)
+ break;
+ if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
+ break;
+ ppkt1->pkt->dts -= delta;
+ ppkt1= ppkt1->next;
+ }
+ if(ppkt1)
+ continue;
+ st->cur_dts -= delta;
+ }
+ }
+ }
+#endif
+ return ret;
+}
+
+/*******************************************************/
+
+/**
+ * start playing a network based stream (e.g. RTSP stream) at the
+ * current position
+ */
+int av_read_play(AVFormatContext *s)
+{
+ if (!s->iformat->read_play)
+ return AVERROR_NOTSUPP;
+ return s->iformat->read_play(s);
+}
+
+/**
+ * Pause a network based stream (e.g. RTSP stream).
+ *
+ * Use av_read_play() to resume it.
+ */
+int av_read_pause(AVFormatContext *s)
+{
+ if (!s->iformat->read_pause)
+ return AVERROR_NOTSUPP;
+ return s->iformat->read_pause(s);
+}
+
+/**
+ * Close a media file (but not its codecs).
+ *
+ * @param s media file handle
+ */
+void av_close_input_file(AVFormatContext *s)
+{
+ int i, must_open_file;
+ AVStream *st;
+
+ /* free previous packet */
+ if (s->cur_st && s->cur_st->parser)
+ av_free_packet(&s->cur_pkt);
+
+ if (s->iformat->read_close)
+ s->iformat->read_close(s);
+ for(i=0;i<s->nb_streams;i++) {
+ /* free all data in a stream component */
+ st = s->streams[i];
+ if (st->parser) {
+ av_parser_close(st->parser);
+ }
+ av_free(st->index_entries);
+ av_free(st->codec->extradata);
+ av_free(st->codec);
+ av_free(st);
+ }
+ flush_packet_queue(s);
+ must_open_file = 1;
+ if (s->iformat->flags & AVFMT_NOFILE) {
+ must_open_file = 0;
+ }
+ if (must_open_file) {
+ url_fclose(&s->pb);
+ }
+ av_freep(&s->priv_data);
+ av_free(s);
+}
+
+/**
+ * Add a new stream to a media file.
+ *
+ * Can only be called in the read_header() function. If the flag
+ * AVFMTCTX_NOHEADER is in the format context, then new streams
+ * can be added in read_packet too.
+ *
+ * @param s media file handle
+ * @param id file format dependent stream id
+ */
+AVStream *av_new_stream(AVFormatContext *s, int id)
+{
+ AVStream *st;
+ int i;
+
+ if (s->nb_streams >= MAX_STREAMS)
+ return NULL;
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st)
+ return NULL;
+
+ st->codec= avcodec_alloc_context();
+ if (s->iformat) {
+ /* no default bitrate if decoding */
+ st->codec->bit_rate = 0;
+ }
+ st->index = s->nb_streams;
+ st->id = id;
+ st->start_time = AV_NOPTS_VALUE;
+ st->duration = AV_NOPTS_VALUE;
+ st->cur_dts = AV_NOPTS_VALUE;
+
+ /* default pts settings is MPEG like */
+ av_set_pts_info(st, 33, 1, 90000);
+ st->last_IP_pts = AV_NOPTS_VALUE;
+ for(i=0; i<MAX_REORDER_DELAY+1; i++)
+ st->pts_buffer[i]= AV_NOPTS_VALUE;
+
+ s->streams[s->nb_streams++] = st;
+ return st;
+}
+
+/************************************************************/
+/* output media file */
+
+int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
+{
+ int ret;
+
+ if (s->oformat->priv_data_size > 0) {
+ s->priv_data = av_mallocz(s->oformat->priv_data_size);
+ if (!s->priv_data)
+ return AVERROR_NOMEM;
+ } else
+ s->priv_data = NULL;
+
+ if (s->oformat->set_parameters) {
+ ret = s->oformat->set_parameters(s, ap);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * allocate the stream private data and write the stream header to an
+ * output media file
+ *
+ * @param s media file handle
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
+int av_write_header(AVFormatContext *s)
+{
+ int ret, i;
+ AVStream *st;
+
+ // some sanity checks
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if(st->codec->sample_rate<=0){
+ av_log(s, AV_LOG_ERROR, "sample rate not set\n");
+ return -1;
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
+ av_log(s, AV_LOG_ERROR, "time base not set\n");
+ return -1;
+ }
+ if(st->codec->width<=0 || st->codec->height<=0){
+ av_log(s, AV_LOG_ERROR, "dimensions not set\n");
+ return -1;
+ }
+ break;
+ }
+ }
+
+ if(s->oformat->write_header){
+ ret = s->oformat->write_header(s);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* init PTS generation */
+ for(i=0;i<s->nb_streams;i++) {
+ int64_t den = AV_NOPTS_VALUE;
+ st = s->streams[i];
+
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ den = (int64_t)st->time_base.num * st->codec->sample_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ den = (int64_t)st->time_base.num * st->codec->time_base.den;
+ break;
+ default:
+ break;
+ }
+ if (den != AV_NOPTS_VALUE) {
+ if (den <= 0)
+ return AVERROR_INVALIDDATA;
+ av_frac_init(&st->pts, 0, 0, den);
+ }
+ }
+ return 0;
+}
+
+//FIXME merge with compute_pkt_fields
+static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
+ int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
+ int num, den, frame_size, i;
+
+// av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
+
+/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
+ return -1;*/
+
+ /* duration field */
+ if (pkt->duration == 0) {
+ compute_frame_duration(&num, &den, st, NULL, pkt);
+ if (den && num) {
+ pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
+ }
+ }
+
+ //XXX/FIXME this is a temporary hack until all encoders output pts
+ if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
+ pkt->dts=
+// pkt->pts= st->cur_dts;
+ pkt->pts= st->pts.val;
+ }
+
+ //calculate dts from pts
+ if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
+ st->pts_buffer[0]= pkt->pts;
+ for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
+ st->pts_buffer[i]= (i-delay-1) * pkt->duration;
+ for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
+ FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
+
+ pkt->dts= st->pts_buffer[0];
+ }
+
+ if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
+ av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
+ return -1;
+ }
+ if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
+ av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
+ return -1;
+ }
+
+// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
+ st->cur_dts= pkt->dts;
+ st->pts.val= pkt->dts;
+
+ /* update pts */
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ frame_size = get_audio_frame_size(st->codec, pkt->size);
+
+ /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
+ but it would be better if we had the real timestamps from the encoder */
+ if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
+ av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void truncate_ts(AVStream *st, AVPacket *pkt){
+ int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
+
+// if(pkt->dts < 0)
+// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
+
+ pkt->pts &= pts_mask;
+ pkt->dts &= pts_mask;
+}
+
+/**
+ * Write a packet to an output media file.
+ *
+ * The packet shall contain one audio or video frame.
+ *
+ * @param s media file handle
+ * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
+ * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
+ */
+int av_write_frame(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret;
+
+ ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
+ if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+ return ret;
+
+ truncate_ts(s->streams[pkt->stream_index], pkt);
+
+ ret= s->oformat->write_packet(s, pkt);
+ if(!ret)
+ ret= url_ferror(&s->pb);
+ return ret;
+}
+
+/**
+ * Interleave a packet per DTS in an output media file.
+ *
+ * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
+ * so they cannot be used after it, note calling av_free_packet() on them is still safe.
+ *
+ * @param s media file handle
+ * @param out the interleaved packet will be output here
+ * @param in the input packet
+ * @param flush 1 if no further packets are available as input and all
+ * remaining packets should be output
+ * @return 1 if a packet was output, 0 if no packet could be output,
+ * < 0 if an error occured
+ */
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
+ AVPacketList *pktl, **next_point, *this_pktl;
+ int stream_count=0;
+ int streams[MAX_STREAMS];
+
+ if(pkt){
+ AVStream *st= s->streams[ pkt->stream_index];
+
+// assert(pkt->destruct != av_destruct_packet); //FIXME
+
+ this_pktl = av_mallocz(sizeof(AVPacketList));
+ this_pktl->pkt= *pkt;
+ if(pkt->destruct == av_destruct_packet)
+ pkt->destruct= NULL; // non shared -> must keep original from being freed
+ else
+ av_dup_packet(&this_pktl->pkt); //shared -> must dup
+
+ next_point = &s->packet_buffer;
+ while(*next_point){
+ AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
+ int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
+ int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
+ if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
+ break;
+ next_point= &(*next_point)->next;
+ }
+ this_pktl->next= *next_point;
+ *next_point= this_pktl;
+ }
+
+ memset(streams, 0, sizeof(streams));
+ pktl= s->packet_buffer;
+ while(pktl){
+//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
+ if(streams[ pktl->pkt.stream_index ] == 0)
+ stream_count++;
+ streams[ pktl->pkt.stream_index ]++;
+ pktl= pktl->next;
+ }
+
+ if(s->nb_streams == stream_count || (flush && stream_count)){
+ pktl= s->packet_buffer;
+ *out= pktl->pkt;
+
+ s->packet_buffer= pktl->next;
+ av_freep(&pktl);
+ return 1;
+ }else{
+ av_init_packet(out);
+ return 0;
+ }
+}
+
+/**
+ * Interleaves a AVPacket correctly so it can be muxed.
+ * @param out the interleaved packet will be output here
+ * @param in the input packet
+ * @param flush 1 if no further packets are available as input and all
+ * remaining packets should be output
+ * @return 1 if a packet was output, 0 if no packet could be output,
+ * < 0 if an error occured
+ */
+static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
+ if(s->oformat->interleave_packet)
+ return s->oformat->interleave_packet(s, out, in, flush);
+ else
+ return av_interleave_packet_per_dts(s, out, in, flush);
+}
+
+/**
+ * Writes a packet to an output media file ensuring correct interleaving.
+ *
+ * The packet must contain one audio or video frame.
+ * If the packets are already correctly interleaved the application should
+ * call av_write_frame() instead as its slightly faster, its also important
+ * to keep in mind that completly non interleaved input will need huge amounts
+ * of memory to interleave with this, so its prefereable to interleave at the
+ * demuxer level
+ *
+ * @param s media file handle
+ * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
+ * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
+ */
+int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
+ AVStream *st= s->streams[ pkt->stream_index];
+
+ //FIXME/XXX/HACK drop zero sized packets
+ if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
+ return 0;
+
+//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
+ if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+ return -1;
+
+ if(pkt->dts == AV_NOPTS_VALUE)
+ return -1;
+
+ for(;;){
+ AVPacket opkt;
+ int ret= av_interleave_packet(s, &opkt, pkt, 0);
+ if(ret<=0) //FIXME cleanup needed for ret<0 ?
+ return ret;
+
+ truncate_ts(s->streams[opkt.stream_index], &opkt);
+ ret= s->oformat->write_packet(s, &opkt);
+
+ av_free_packet(&opkt);
+ pkt= NULL;
+
+ if(ret<0)
+ return ret;
+ if(url_ferror(&s->pb))
+ return url_ferror(&s->pb);
+ }
+}
+
+/**
+ * @brief Write the stream trailer to an output media file and
+ * free the file private data.
+ *
+ * @param s media file handle
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
+int av_write_trailer(AVFormatContext *s)
+{
+ int ret, i;
+
+ for(;;){
+ AVPacket pkt;
+ ret= av_interleave_packet(s, &pkt, NULL, 1);
+ if(ret<0) //FIXME cleanup needed for ret<0 ?
+ goto fail;
+ if(!ret)
+ break;
+
+ truncate_ts(s->streams[pkt.stream_index], &pkt);
+ ret= s->oformat->write_packet(s, &pkt);
+
+ av_free_packet(&pkt);
+
+ if(ret<0)
+ goto fail;
+ if(url_ferror(&s->pb))
+ goto fail;
+ }
+
+ if(s->oformat->write_trailer)
+ ret = s->oformat->write_trailer(s);
+fail:
+ if(ret == 0)
+ ret=url_ferror(&s->pb);
+ for(i=0;i<s->nb_streams;i++)
+ av_freep(&s->streams[i]->priv_data);
+ av_freep(&s->priv_data);
+ return ret;
+}
+
+/* "user interface" functions */
+
+void dump_format(AVFormatContext *ic,
+ int index,
+ const char *url,
+ int is_output)
+{
+ int i, flags;
+ char buf[256];
+
+ av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
+ is_output ? "Output" : "Input",
+ index,
+ is_output ? ic->oformat->name : ic->iformat->name,
+ is_output ? "to" : "from", url);
+ if (!is_output) {
+ av_log(NULL, AV_LOG_INFO, " Duration: ");
+ if (ic->duration != AV_NOPTS_VALUE) {
+ int hours, mins, secs, us;
+ secs = ic->duration / AV_TIME_BASE;
+ us = ic->duration % AV_TIME_BASE;
+ mins = secs / 60;
+ secs %= 60;
+ hours = mins / 60;
+ mins %= 60;
+ av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
+ (10 * us) / AV_TIME_BASE);
+ } else {
+ av_log(NULL, AV_LOG_INFO, "N/A");
+ }
+ if (ic->start_time != AV_NOPTS_VALUE) {
+ int secs, us;
+ av_log(NULL, AV_LOG_INFO, ", start: ");
+ secs = ic->start_time / AV_TIME_BASE;
+ us = ic->start_time % AV_TIME_BASE;
+ av_log(NULL, AV_LOG_INFO, "%d.%06d",
+ secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
+ }
+ av_log(NULL, AV_LOG_INFO, ", bitrate: ");
+ if (ic->bit_rate) {
+ av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
+ } else {
+ av_log(NULL, AV_LOG_INFO, "N/A");
+ }
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+ for(i=0;i<ic->nb_streams;i++) {
+ AVStream *st = ic->streams[i];
+ int g= ff_gcd(st->time_base.num, st->time_base.den);
+ avcodec_string(buf, sizeof(buf), st->codec, is_output);
+ av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
+ /* the pid is an important information, so we display it */
+ /* XXX: add a generic system */
+ if (is_output)
+ flags = ic->oformat->flags;
+ else
+ flags = ic->iformat->flags;
+ if (flags & AVFMT_SHOW_IDS) {
+ av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
+ }
+ if (strlen(st->language) > 0) {
+ av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
+ }
+ av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
+ av_log(NULL, AV_LOG_INFO, ": %s", buf);
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+ if(st->r_frame_rate.den && st->r_frame_rate.num)
+ av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
+/* else if(st->time_base.den && st->time_base.num)
+ av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
+ else
+ av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
+ }
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+}
+
+typedef struct {
+ const char *abv;
+ int width, height;
+ int frame_rate, frame_rate_base;
+} AbvEntry;
+
+static AbvEntry frame_abvs[] = {
+ { "ntsc", 720, 480, 30000, 1001 },
+ { "pal", 720, 576, 25, 1 },
+ { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
+ { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
+ { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
+ { "spal", 768, 576, 25, 1 }, /* square pixel pal */
+ { "film", 352, 240, 24, 1 },
+ { "ntsc-film", 352, 240, 24000, 1001 },
+ { "sqcif", 128, 96, 0, 0 },
+ { "qcif", 176, 144, 0, 0 },
+ { "cif", 352, 288, 0, 0 },
+ { "4cif", 704, 576, 0, 0 },
+};
+
+/**
+ * parses width and height out of string str.
+ */
+int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
+{
+ int i;
+ int n = sizeof(frame_abvs) / sizeof(AbvEntry);
+ const char *p;
+ int frame_width = 0, frame_height = 0;
+
+ for(i=0;i<n;i++) {
+ if (!strcmp(frame_abvs[i].abv, str)) {
+ frame_width = frame_abvs[i].width;
+ frame_height = frame_abvs[i].height;
+ break;
+ }
+ }
+ if (i == n) {
+ p = str;
+ frame_width = strtol(p, (char **)&p, 10);
+ if (*p)
+ p++;
+ frame_height = strtol(p, (char **)&p, 10);
+ }
+ if (frame_width <= 0 || frame_height <= 0)
+ return -1;
+ *width_ptr = frame_width;
+ *height_ptr = frame_height;
+ return 0;
+}
+
+/**
+ * Converts frame rate from string to a fraction.
+ *
+ * First we try to get an exact integer or fractional frame rate.
+ * If this fails we convert the frame rate to a double and return
+ * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
+ */
+int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
+{
+ int i;
+ char* cp;
+
+ /* First, we check our abbreviation table */
+ for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
+ if (!strcmp(frame_abvs[i].abv, arg)) {
+ *frame_rate = frame_abvs[i].frame_rate;
+ *frame_rate_base = frame_abvs[i].frame_rate_base;
+ return 0;
+ }
+
+ /* Then, we try to parse it as fraction */
+ cp = strchr(arg, '/');
+ if (!cp)
+ cp = strchr(arg, ':');
+ if (cp) {
+ char* cpp;
+ *frame_rate = strtol(arg, &cpp, 10);
+ if (cpp != arg || cpp == cp)
+ *frame_rate_base = strtol(cp+1, &cpp, 10);
+ else
+ *frame_rate = 0;
+ }
+ else {
+ /* Finally we give up and parse it as double */
+ AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
+ *frame_rate_base = time_base.den;
+ *frame_rate = time_base.num;
+ }
+ if (!*frame_rate || !*frame_rate_base)
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * Converts date string to number of seconds since Jan 1st, 1970.
+ *
+ * @code
+ * Syntax:
+ * - If not a duration:
+ * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
+ * Time is localtime unless Z is suffixed to the end. In this case GMT
+ * Return the date in micro seconds since 1970
+ *
+ * - If a duration:
+ * HH[:MM[:SS[.m...]]]
+ * S+[.m...]
+ * @endcode
+ */
+#ifndef CONFIG_WINCE
+int64_t parse_date(const char *datestr, int duration)
+{
+ const char *p;
+ int64_t t;
+ struct tm dt;
+ int i;
+ static const char *date_fmt[] = {
+ "%Y-%m-%d",
+ "%Y%m%d",
+ };
+ static const char *time_fmt[] = {
+ "%H:%M:%S",
+ "%H%M%S",
+ };
+ const char *q;
+ int is_utc, len;
+ char lastch;
+ int negative = 0;
+
+#undef time
+ time_t now = time(0);
+
+ len = strlen(datestr);
+ if (len > 0)
+ lastch = datestr[len - 1];
+ else
+ lastch = '\0';
+ is_utc = (lastch == 'z' || lastch == 'Z');
+
+ memset(&dt, 0, sizeof(dt));
+
+ p = datestr;
+ q = NULL;
+ if (!duration) {
+ for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
+ q = small_strptime(p, date_fmt[i], &dt);
+ if (q) {
+ break;
+ }
+ }
+
+ if (!q) {
+ if (is_utc) {
+ dt = *gmtime(&now);
+ } else {
+ dt = *localtime(&now);
+ }
+ dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
+ } else {
+ p = q;
+ }
+
+ if (*p == 'T' || *p == 't' || *p == ' ')
+ p++;
+
+ for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
+ q = small_strptime(p, time_fmt[i], &dt);
+ if (q) {
+ break;
+ }
+ }
+ } else {
+ if (p[0] == '-') {
+ negative = 1;
+ ++p;
+ }
+ q = small_strptime(p, time_fmt[0], &dt);
+ if (!q) {
+ dt.tm_sec = strtol(p, (char **)&q, 10);
+ dt.tm_min = 0;
+ dt.tm_hour = 0;
+ }
+ }
+
+ /* Now we have all the fields that we can get */
+ if (!q) {
+ if (duration)
+ return 0;
+ else
+ return now * int64_t_C(1000000);
+ }
+
+ if (duration) {
+ t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
+ } else {
+ dt.tm_isdst = -1; /* unknown */
+ if (is_utc) {
+ t = mktimegm(&dt);
+ } else {
+ t = mktime(&dt);
+ }
+ }
+
+ t *= 1000000;
+
+ if (*q == '.') {
+ int val, n;
+ q++;
+ for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
+ if (!isdigit(*q))
+ break;
+ val += n * (*q - '0');
+ }
+ t += val;
+ }
+ return negative ? -t : t;
+}
+#endif /* CONFIG_WINCE */
+
+/**
+ * Attempts to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
+{
+ const char *p;
+ char tag[128], *q;
+
+ p = info;
+ if (*p == '?')
+ p++;
+ for(;;) {
+ q = tag;
+ while (*p != '\0' && *p != '=' && *p != '&') {
+ if ((q - tag) < sizeof(tag) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ q = arg;
+ if (*p == '=') {
+ p++;
+ while (*p != '&' && *p != '\0') {
+ if ((q - arg) < arg_size - 1) {
+ if (*p == '+')
+ *q++ = ' ';
+ else
+ *q++ = *p;
+ }
+ p++;
+ }
+ *q = '\0';
+ }
+ if (!strcmp(tag, tag1))
+ return 1;
+ if (*p != '&')
+ break;
+ p++;
+ }
+ return 0;
+}
+
+/**
+ * Returns in 'buf' the path with '%d' replaced by number.
+
+ * Also handles the '%0nd' format where 'n' is the total number
+ * of digits and '%%'.
+ *
+ * @param buf destination buffer
+ * @param buf_size destination buffer size
+ * @param path numbered sequence string
+ * @number frame number
+ * @return 0 if OK, -1 if format error.
+ */
+int av_get_frame_filename(char *buf, int buf_size,
+ const char *path, int number)
+{
+ const char *p;
+ char *q, buf1[20], c;
+ int nd, len, percentd_found;
+
+ q = buf;
+ p = path;
+ percentd_found = 0;
+ for(;;) {
+ c = *p++;
+ if (c == '\0')
+ break;
+ if (c == '%') {
+ do {
+ nd = 0;
+ while (isdigit(*p)) {
+ nd = nd * 10 + *p++ - '0';
+ }
+ c = *p++;
+ } while (isdigit(c));
+
+ switch(c) {
+ case '%':
+ goto addchar;
+ case 'd':
+ if (percentd_found)
+ goto fail;
+ percentd_found = 1;
+ snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
+ len = strlen(buf1);
+ if ((q - buf + len) > buf_size - 1)
+ goto fail;
+ memcpy(q, buf1, len);
+ q += len;
+ break;
+ default:
+ goto fail;
+ }
+ } else {
+ addchar:
+ if ((q - buf) < buf_size - 1)
+ *q++ = c;
+ }
+ }
+ if (!percentd_found)
+ goto fail;
+ *q = '\0';
+ return 0;
+ fail:
+ *q = '\0';
+ return -1;
+}
+
+/**
+ * Print nice hexa dump of a buffer
+ * @param f stream for output
+ * @param buf buffer
+ * @param size buffer size
+ */
+void av_hex_dump(FILE *f, uint8_t *buf, int size)
+{
+ int len, i, j, c;
+
+ for(i=0;i<size;i+=16) {
+ len = size - i;
+ if (len > 16)
+ len = 16;
+ fprintf(f, "%08x ", i);
+ for(j=0;j<16;j++) {
+ if (j < len)
+ fprintf(f, " %02x", buf[i+j]);
+ else
+ fprintf(f, " ");
+ }
+ fprintf(f, " ");
+ for(j=0;j<len;j++) {
+ c = buf[i+j];
+ if (c < ' ' || c > '~')
+ c = '.';
+ fprintf(f, "%c", c);
+ }
+ fprintf(f, "\n");
+ }
+}
+
+/**
+ * Print on 'f' a nice dump of a packet
+ * @param f stream for output
+ * @param pkt packet to dump
+ * @param dump_payload true if the payload must be displayed too
+ */
+ //FIXME needs to know the time_base
+void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
+{
+ fprintf(f, "stream #%d:\n", pkt->stream_index);
+ fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
+ fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
+ /* DTS is _always_ valid after av_read_frame() */
+ fprintf(f, " dts=");
+ if (pkt->dts == AV_NOPTS_VALUE)
+ fprintf(f, "N/A");
+ else
+ fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
+ /* PTS may be not known if B frames are present */
+ fprintf(f, " pts=");
+ if (pkt->pts == AV_NOPTS_VALUE)
+ fprintf(f, "N/A");
+ else
+ fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
+ fprintf(f, "\n");
+ fprintf(f, " size=%d\n", pkt->size);
+ if (dump_payload)
+ av_hex_dump(f, pkt->data, pkt->size);
+}
+
+void url_split(char *proto, int proto_size,
+ char *authorization, int authorization_size,
+ char *hostname, int hostname_size,
+ int *port_ptr,
+ char *path, int path_size,
+ const char *url)
+{
+ const char *p;
+ char *q;
+ int port;
+
+ port = -1;
+
+ p = url;
+ q = proto;
+ while (*p != ':' && *p != '\0') {
+ if ((q - proto) < proto_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (proto_size > 0)
+ *q = '\0';
+ if (authorization_size > 0)
+ authorization[0] = '\0';
+ if (*p == '\0') {
+ if (proto_size > 0)
+ proto[0] = '\0';
+ if (hostname_size > 0)
+ hostname[0] = '\0';
+ p = url;
+ } else {
+ char *at,*slash; // PETR: position of '@' character and '/' character
+
+ p++;
+ if (*p == '/')
+ p++;
+ if (*p == '/')
+ p++;
+ at = strchr(p,'@'); // PETR: get the position of '@'
+ slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
+ if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
+
+ q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
+
+ while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
+ if (*p == '@') { // PETR: passed '@'
+ if (authorization_size > 0)
+ *q = '\0';
+ q = hostname;
+ at = NULL;
+ } else if (!at) { // PETR: hostname
+ if ((q - hostname) < hostname_size - 1)
+ *q++ = *p;
+ } else {
+ if ((q - authorization) < authorization_size - 1)
+ *q++ = *p;
+ }
+ p++;
+ }
+ if (hostname_size > 0)
+ *q = '\0';
+ if (*p == ':') {
+ p++;
+ port = strtoul(p, (char **)&p, 10);
+ }
+ }
+ if (port_ptr)
+ *port_ptr = port;
+ pstrcpy(path, path_size, p);
+}
+
+/**
+ * Set the pts for a given stream.
+ *
+ * @param s stream
+ * @param pts_wrap_bits number of bits effectively used by the pts
+ * (used for wrap control, 33 is the value for MPEG)
+ * @param pts_num numerator to convert to seconds (MPEG: 1)
+ * @param pts_den denominator to convert to seconds (MPEG: 90000)
+ */
+void av_set_pts_info(AVStream *s, int pts_wrap_bits,
+ int pts_num, int pts_den)
+{
+ s->pts_wrap_bits = pts_wrap_bits;
+ s->time_base.num = pts_num;
+ s->time_base.den = pts_den;
+}
+
+/* fraction handling */
+
+/**
+ * f = val + (num / den) + 0.5.
+ *
+ * 'num' is normalized so that it is such as 0 <= num < den.
+ *
+ * @param f fractional number
+ * @param val integer value
+ * @param num must be >= 0
+ * @param den must be >= 1
+ */
+static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
+{
+ num += (den >> 1);
+ if (num >= den) {
+ val += num / den;
+ num = num % den;
+ }
+ f->val = val;
+ f->num = num;
+ f->den = den;
+}
+
+/**
+ * Set f to (val + 0.5).
+ */
+static void av_frac_set(AVFrac *f, int64_t val)
+{
+ f->val = val;
+ f->num = f->den >> 1;
+}
+
+/**
+ * Fractionnal addition to f: f = f + (incr / f->den).
+ *
+ * @param f fractional number
+ * @param incr increment, can be positive or negative
+ */
+static void av_frac_add(AVFrac *f, int64_t incr)
+{
+ int64_t num, den;
+
+ num = f->num + incr;
+ den = f->den;
+ if (num < 0) {
+ f->val += num / den;
+ num = num % den;
+ if (num < 0) {
+ num += den;
+ f->val--;
+ }
+ } else if (num >= den) {
+ f->val += num / den;
+ num = num % den;
+ }
+ f->num = num;
+}
diff --git a/contrib/ffmpeg/libavformat/v4l2.c b/contrib/ffmpeg/libavformat/v4l2.c
new file mode 100644
index 000000000..00adccaa8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/v4l2.c
@@ -0,0 +1,541 @@
+/*
+ * Video4Linux2 grab interface
+ * Copyright (c) 2000,2001 Fabrice Bellard.
+ * Copyright (c) 2006 Luca Abeni.
+ *
+ * Part of this file is based on the V4L2 video capture example
+ * (http://v4l2spec.bytesex.org/v4l2spec/capture.c)
+ *
+ * Thanks to Michael Niedermayer for providing the mapping between
+ * V4L2_PIX_FMT_* and PIX_FMT_*
+ *
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <asm/types.h>
+#include <linux/videodev2.h>
+#include <time.h>
+
+static const int desired_video_buffers = 256;
+
+enum io_method {
+ io_read,
+ io_mmap,
+ io_userptr
+};
+
+struct video_data {
+ int fd;
+ int frame_format; /* V4L2_PIX_FMT_* */
+ enum io_method io_method;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ int frame_size;
+ int top_field_first;
+
+ int buffers;
+ void **buf_start;
+ unsigned int *buf_len;
+};
+
+struct fmt_map {
+ enum PixelFormat ff_fmt;
+ int32_t v4l2_fmt;
+};
+
+static struct fmt_map fmt_conversion_table[] = {
+ {
+ .ff_fmt = PIX_FMT_YUV420P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV422P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV422P,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV422,
+ .v4l2_fmt = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .ff_fmt = PIX_FMT_UYVY422,
+ .v4l2_fmt = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV411P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV411P,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV410P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV410,
+ },
+ {
+ .ff_fmt = PIX_FMT_BGR24,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR24,
+ },
+ {
+ .ff_fmt = PIX_FMT_RGB24,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ },
+ /*
+ {
+ .ff_fmt = PIX_FMT_RGBA32,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR32,
+ },
+ */
+ {
+ .ff_fmt = PIX_FMT_GRAY8,
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ },
+};
+
+static int device_open(const char *devname, uint32_t *capabilities)
+{
+ struct v4l2_capability cap;
+ int fd;
+ int res;
+
+ fd = open(devname, O_RDWR /*| O_NONBLOCK*/, 0);
+ if (fd < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
+ devname, strerror(errno));
+
+ return -1;
+ }
+
+ res = ioctl(fd, VIDIOC_QUERYCAP, &cap);
+ // ENOIOCTLCMD definition only availble on __KERNEL__
+ if (res < 0 && errno == 515)
+ {
+ av_log(NULL, AV_LOG_ERROR, "QUERYCAP not implemented, probably V4L device but not supporting V4L2\n");
+ close(fd);
+
+ return -1;
+ }
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
+ strerror(errno));
+ close(fd);
+
+ return -1;
+ }
+ if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
+ av_log(NULL, AV_LOG_ERROR, "Not a video capture device\n");
+ close(fd);
+
+ return -1;
+ }
+ *capabilities = cap.capabilities;
+
+ return fd;
+}
+
+static int device_init(int fd, int *width, int *height, int pix_fmt)
+{
+ struct v4l2_format fmt;
+ int res;
+
+ memset(&fmt, 0, sizeof(struct v4l2_format));
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmt.fmt.pix.width = *width;
+ fmt.fmt.pix.height = *height;
+ fmt.fmt.pix.pixelformat = pix_fmt;
+ fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+ res = ioctl(fd, VIDIOC_S_FMT, &fmt);
+ if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
+ av_log(NULL, AV_LOG_INFO, "The V4L2 driver changed the video from %dx%d to %dx%d\n", *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
+ *width = fmt.fmt.pix.width;
+ *height = fmt.fmt.pix.height;
+ }
+
+ return res;
+}
+
+static int first_field(int fd)
+{
+ int res;
+ v4l2_std_id std;
+
+ res = ioctl(fd, VIDIOC_G_STD, &std);
+ if (res < 0) {
+ return 0;
+ }
+ if (std & V4L2_STD_NTSC) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static uint32_t fmt_ff2v4l(enum PixelFormat pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
+ if (fmt_conversion_table[i].ff_fmt == pix_fmt) {
+ return fmt_conversion_table[i].v4l2_fmt;
+ }
+ }
+
+ return 0;
+}
+
+static enum PixelFormat fmt_v4l2ff(uint32_t pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
+ if (fmt_conversion_table[i].v4l2_fmt == pix_fmt) {
+ return fmt_conversion_table[i].ff_fmt;
+ }
+ }
+
+ return -1;
+}
+
+static int mmap_init(struct video_data *s)
+{
+ struct v4l2_requestbuffers req;
+ int i, res;
+
+ memset(&req, 0, sizeof(struct v4l2_requestbuffers));
+ req.count = desired_video_buffers;
+ req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ req.memory = V4L2_MEMORY_MMAP;
+ res = ioctl (s->fd, VIDIOC_REQBUFS, &req);
+ if (res < 0) {
+ if (errno == EINVAL) {
+ av_log(NULL, AV_LOG_ERROR, "Device does not support mmap\n");
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
+ }
+
+ return -1;
+ }
+
+ if (req.count < 2) {
+ av_log(NULL, AV_LOG_ERROR, "Insufficient buffer memory\n");
+
+ return -1;
+ }
+ s->buffers = req.count;
+ s->buf_start = av_malloc(sizeof(void *) * s->buffers);
+ if (s->buf_start == NULL) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
+
+ return -1;
+ }
+ s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
+ if (s->buf_len == NULL) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
+ av_free(s->buf_start);
+
+ return -1;
+ }
+
+ for (i = 0; i < req.count; i++) {
+ struct v4l2_buffer buf;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+ res = ioctl (s->fd, VIDIOC_QUERYBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
+
+ return -1;
+ }
+
+ s->buf_len[i] = buf.length;
+ if (s->buf_len[i] < s->frame_size) {
+ av_log(NULL, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size);
+
+ return -1;
+ }
+ s->buf_start[i] = mmap (NULL, buf.length,
+ PROT_READ | PROT_WRITE, MAP_SHARED, s->fd, buf.m.offset);
+ if (s->buf_start[i] == MAP_FAILED) {
+ av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int read_init(struct video_data *s)
+{
+ return -1;
+}
+
+static int mmap_read_frame(struct video_data *s, void *frame, int64_t *ts)
+{
+ struct v4l2_buffer buf;
+ int res;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+
+ /* FIXME: Some special treatment might be needed in case of loss of signal... */
+ while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 &&
+ ((errno == EAGAIN) || (errno == EINTR)));
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno));
+
+ return -1;
+ }
+ assert (buf.index < s->buffers);
+ if (buf.bytesused != s->frame_size) {
+ av_log(NULL, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size);
+
+ return -1;
+ }
+
+ /* Image is at s->buff_start[buf.index] */
+ memcpy(frame, s->buf_start[buf.index], buf.bytesused);
+ *ts = buf.timestamp.tv_sec * int64_t_C(1000000) + buf.timestamp.tv_usec;
+
+ res = ioctl (s->fd, VIDIOC_QBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
+
+ return -1;
+ }
+
+ return s->buf_len[buf.index];
+}
+
+static int read_frame(struct video_data *s, void *frame, int64_t *ts)
+{
+ return -1;
+}
+
+static int mmap_start(struct video_data *s)
+{
+ enum v4l2_buf_type type;
+ int i, res;
+
+ for (i = 0; i < s->buffers; i++) {
+ struct v4l2_buffer buf;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+
+ res = ioctl (s->fd, VIDIOC_QBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", strerror(errno));
+
+ return -1;
+ }
+ }
+
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ res = ioctl (s->fd, VIDIOC_STREAMON, &type);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", strerror(errno));
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mmap_close(struct video_data *s)
+{
+ enum v4l2_buf_type type;
+ int i;
+
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ /* We do not check for the result, because we could
+ * not do anything about it anyway...
+ */
+ ioctl(s->fd, VIDIOC_STREAMOFF, &type);
+ for (i = 0; i < s->buffers; i++) {
+ munmap(s->buf_start[i], s->buf_len[i]);
+ }
+ av_free(s->buf_start);
+ av_free(s->buf_len);
+}
+
+static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ struct video_data *s = s1->priv_data;
+ AVStream *st;
+ int width, height;
+ int res, frame_rate, frame_rate_base;
+ uint32_t desired_format, capabilities;
+ const char *video_device;
+
+ if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "Missing/Wrong parameters\n");
+
+ return -1;
+ }
+
+ width = ap->width;
+ height = ap->height;
+ frame_rate = ap->time_base.den;
+ frame_rate_base = ap->time_base.num;
+
+ if((unsigned)width > 32767 || (unsigned)height > 32767) {
+ av_log(s1, AV_LOG_ERROR, "Wrong size %dx%d\n", width, height);
+
+ return -1;
+ }
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ s->width = width;
+ s->height = height;
+ s->frame_rate = frame_rate;
+ s->frame_rate_base = frame_rate_base;
+
+ video_device = ap->device;
+ if (!video_device) {
+ video_device = "/dev/video";
+ }
+ capabilities = 0;
+ s->fd = device_open(video_device, &capabilities);
+ if (s->fd < 0) {
+ av_free(st);
+
+ return AVERROR_IO;
+ }
+ av_log(s1, AV_LOG_INFO, "[%d]Capabilities: %x\n", s->fd, capabilities);
+
+ desired_format = fmt_ff2v4l(ap->pix_fmt);
+ if (desired_format == 0 || (device_init(s->fd, &width, &height, desired_format) < 0)) {
+ int i, done;
+
+ done = 0; i = 0;
+ while (!done) {
+ desired_format = fmt_conversion_table[i].v4l2_fmt;
+ if (device_init(s->fd, &width, &height, desired_format) < 0) {
+ desired_format = 0;
+ i++;
+ } else {
+ done = 1;
+ }
+ if (i == sizeof(fmt_conversion_table) / sizeof(struct fmt_map)) {
+ done = 1;
+ }
+ }
+ }
+ if (desired_format == 0) {
+ av_log(s1, AV_LOG_ERROR, "Cannot find a proper format.\n");
+ close(s->fd);
+ av_free(st);
+
+ return AVERROR_IO;
+ }
+ s->frame_format = desired_format;
+
+ st->codec->pix_fmt = fmt_v4l2ff(desired_format);
+ s->frame_size = avpicture_get_size(st->codec->pix_fmt, width, height);
+ if (capabilities & V4L2_CAP_STREAMING) {
+ s->io_method = io_mmap;
+ res = mmap_init(s);
+ if (res == 0) {
+ res = mmap_start(s);
+ }
+ } else {
+ s->io_method = io_read;
+ res = read_init(s);
+ }
+ if (res < 0) {
+ close(s->fd);
+ av_free(st);
+
+ return AVERROR_IO;
+ }
+ s->top_field_first = first_field(s->fd);
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->time_base.den = frame_rate;
+ st->codec->time_base.num = frame_rate_base;
+ st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
+
+ return 0;
+}
+
+static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ struct video_data *s = s1->priv_data;
+ int res;
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+
+ if (s->io_method == io_mmap) {
+ res = mmap_read_frame(s, pkt->data, &pkt->pts);
+ } else if (s->io_method == io_read) {
+ res = read_frame(s, pkt->data, &pkt->pts);
+ } else {
+ return AVERROR_IO;
+ }
+ if (res < 0) {
+ return AVERROR_IO;
+ }
+
+ if (s1->streams[0]->codec->coded_frame) {
+ s1->streams[0]->codec->coded_frame->interlaced_frame = 1;
+ s1->streams[0]->codec->coded_frame->top_field_first = s->top_field_first;
+ }
+
+ return s->frame_size;
+}
+
+static int v4l2_read_close(AVFormatContext *s1)
+{
+ struct video_data *s = s1->priv_data;
+
+ if (s->io_method == io_mmap) {
+ mmap_close(s);
+ }
+
+ close(s->fd);
+ return 0;
+}
+
+AVInputFormat v4l2_demuxer = {
+ "video4linux2",
+ "video grab",
+ sizeof(struct video_data),
+ NULL,
+ v4l2_read_header,
+ v4l2_read_packet,
+ v4l2_read_close,
+ .flags = AVFMT_NOFILE,
+};
diff --git a/contrib/ffmpeg/libavformat/voc.c b/contrib/ffmpeg/libavformat/voc.c
new file mode 100644
index 000000000..329f07739
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/voc.c
@@ -0,0 +1,36 @@
+/*
+ * Creative Voice File common data.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "voc.h"
+
+const unsigned char voc_magic[21] = "Creative Voice File\x1A";
+
+const CodecTag voc_codec_tags[] = {
+ {CODEC_ID_PCM_U8, 0x00},
+ {CODEC_ID_ADPCM_SBPRO_4, 0x01},
+ {CODEC_ID_ADPCM_SBPRO_3, 0x02},
+ {CODEC_ID_ADPCM_SBPRO_2, 0x03},
+ {CODEC_ID_PCM_S16LE, 0x04},
+ {CODEC_ID_PCM_ALAW, 0x06},
+ {CODEC_ID_PCM_MULAW, 0x07},
+ {CODEC_ID_ADPCM_CT, 0x0200},
+ {0, 0},
+};
diff --git a/contrib/ffmpeg/libavformat/voc.h b/contrib/ffmpeg/libavformat/voc.h
new file mode 100644
index 000000000..16adb0078
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/voc.h
@@ -0,0 +1,51 @@
+/*
+ * Creative Voice File demuxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VOC_H
+#define VOC_H
+
+#include "avformat.h"
+#include "riff.h" /* for CodecTag */
+
+typedef struct voc_dec_context {
+ int remaining_size;
+} voc_dec_context_t;
+
+typedef enum voc_type {
+ VOC_TYPE_EOF = 0x00,
+ VOC_TYPE_VOICE_DATA = 0x01,
+ VOC_TYPE_VOICE_DATA_CONT = 0x02,
+ VOC_TYPE_SILENCE = 0x03,
+ VOC_TYPE_MARKER = 0x04,
+ VOC_TYPE_ASCII = 0x05,
+ VOC_TYPE_REPETITION_START = 0x06,
+ VOC_TYPE_REPETITION_END = 0x07,
+ VOC_TYPE_EXTENDED = 0x08,
+ VOC_TYPE_NEW_VOICE_DATA = 0x09,
+} voc_type_t;
+
+extern const unsigned char voc_magic[21];
+extern const CodecTag voc_codec_tags[];
+
+int voc_get_packet(AVFormatContext *s, AVPacket *pkt,
+ AVStream *st, int max_size);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/vocdec.c b/contrib/ffmpeg/libavformat/vocdec.c
new file mode 100644
index 000000000..6a7869227
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/vocdec.c
@@ -0,0 +1,155 @@
+/*
+ * Creative Voice File demuxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "voc.h"
+
+
+static const int voc_max_pkt_size = 2048;
+
+
+static int voc_probe(AVProbeData *p)
+{
+ int version, check;
+
+ if (p->buf_size < 26)
+ return 0;
+ if (memcmp(p->buf, voc_magic, sizeof(voc_magic) - 1))
+ return 0;
+ version = p->buf[22] | (p->buf[23] << 8);
+ check = p->buf[24] | (p->buf[25] << 8);
+ if (~version + 0x1234 != check)
+ return 10;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int voc_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ voc_dec_context_t *voc = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int header_size;
+ AVStream *st;
+
+ url_fskip(pb, 20);
+ header_size = get_le16(pb) - 22;
+ if (header_size != 4) {
+ av_log(s, AV_LOG_ERROR, "unkown header size: %d\n", header_size);
+ return AVERROR_NOTSUPP;
+ }
+ url_fskip(pb, header_size);
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+
+ voc->remaining_size = 0;
+ return 0;
+}
+
+int
+voc_get_packet(AVFormatContext *s, AVPacket *pkt, AVStream *st, int max_size)
+{
+ voc_dec_context_t *voc = s->priv_data;
+ AVCodecContext *dec = st->codec;
+ ByteIOContext *pb = &s->pb;
+ voc_type_t type;
+ int size;
+ int sample_rate = 0;
+ int channels = 1;
+
+ while (!voc->remaining_size) {
+ type = get_byte(pb);
+ if (type == VOC_TYPE_EOF)
+ return AVERROR_IO;
+ voc->remaining_size = get_le24(pb);
+ max_size -= 4;
+
+ switch (type) {
+ case VOC_TYPE_VOICE_DATA:
+ dec->sample_rate = 1000000 / (256 - get_byte(pb));
+ if (sample_rate)
+ dec->sample_rate = sample_rate;
+ dec->channels = channels;
+ dec->codec_id = codec_get_id(voc_codec_tags, get_byte(pb));
+ dec->bits_per_sample = av_get_bits_per_sample(dec->codec_id);
+ voc->remaining_size -= 2;
+ max_size -= 2;
+ channels = 1;
+ break;
+
+ case VOC_TYPE_VOICE_DATA_CONT:
+ break;
+
+ case VOC_TYPE_EXTENDED:
+ sample_rate = get_le16(pb);
+ get_byte(pb);
+ channels = get_byte(pb) + 1;
+ sample_rate = 256000000 / (channels * (65536 - sample_rate));
+ voc->remaining_size = 0;
+ max_size -= 4;
+ break;
+
+ case VOC_TYPE_NEW_VOICE_DATA:
+ dec->sample_rate = get_le32(pb);
+ dec->bits_per_sample = get_byte(pb);
+ dec->channels = get_byte(pb);
+ dec->codec_id = codec_get_id(voc_codec_tags, get_le16(pb));
+ url_fskip(pb, 4);
+ voc->remaining_size -= 12;
+ max_size -= 12;
+ break;
+
+ default:
+ url_fskip(pb, voc->remaining_size);
+ max_size -= voc->remaining_size;
+ voc->remaining_size = 0;
+ break;
+ }
+ }
+
+ dec->bit_rate = dec->sample_rate * dec->bits_per_sample;
+
+ if (max_size <= 0)
+ max_size = voc_max_pkt_size;
+ size = FFMIN(voc->remaining_size, max_size);
+ voc->remaining_size -= size;
+ return av_get_packet(pb, pkt, size);
+}
+
+static int voc_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ return voc_get_packet(s, pkt, s->streams[0], 0);
+}
+
+static int voc_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat voc_demuxer = {
+ "voc",
+ "Creative Voice File format",
+ sizeof(voc_dec_context_t),
+ voc_probe,
+ voc_read_header,
+ voc_read_packet,
+ voc_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/vocenc.c b/contrib/ffmpeg/libavformat/vocenc.c
new file mode 100644
index 000000000..ed304883d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/vocenc.c
@@ -0,0 +1,104 @@
+/*
+ * Creative Voice File muxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "voc.h"
+
+
+typedef struct voc_enc_context {
+ int param_written;
+} voc_enc_context_t;
+
+static int voc_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ const int header_size = 26;
+ const int version = 0x0114;
+
+ if (s->nb_streams != 1
+ || s->streams[0]->codec->codec_type != CODEC_TYPE_AUDIO)
+ return AVERROR_NOTSUPP;
+
+ put_buffer(pb, voc_magic, sizeof(voc_magic) - 1);
+ put_le16(pb, header_size);
+ put_le16(pb, version);
+ put_le16(pb, ~version + 0x1234);
+
+ return 0;
+}
+
+static int voc_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ voc_enc_context_t *voc = s->priv_data;
+ AVCodecContext *enc = s->streams[0]->codec;
+ ByteIOContext *pb = &s->pb;
+
+ if (!voc->param_written) {
+ int format = codec_get_tag(voc_codec_tags, enc->codec_id);
+
+ if (format > 0xFF) {
+ put_byte(pb, VOC_TYPE_NEW_VOICE_DATA);
+ put_le24(pb, pkt->size + 12);
+ put_le32(pb, enc->sample_rate);
+ put_byte(pb, enc->bits_per_sample);
+ put_byte(pb, enc->channels);
+ put_le16(pb, format);
+ put_le32(pb, 0);
+ } else {
+ if (s->streams[0]->codec->channels > 1) {
+ put_byte(pb, VOC_TYPE_EXTENDED);
+ put_le24(pb, 4);
+ put_le16(pb, 65536-256000000/(enc->sample_rate*enc->channels));
+ put_byte(pb, format);
+ put_byte(pb, enc->channels - 1);
+ }
+ put_byte(pb, VOC_TYPE_VOICE_DATA);
+ put_le24(pb, pkt->size + 2);
+ put_byte(pb, 256 - 1000000 / enc->sample_rate);
+ put_byte(pb, format);
+ }
+ voc->param_written = 1;
+ } else {
+ put_byte(pb, VOC_TYPE_VOICE_DATA_CONT);
+ put_le24(pb, pkt->size);
+ }
+
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int voc_write_trailer(AVFormatContext *s)
+{
+ put_byte(&s->pb, 0);
+ return 0;
+}
+
+AVOutputFormat voc_muxer = {
+ "voc",
+ "Creative Voice File format",
+ "audio/x-voc",
+ "voc",
+ sizeof(voc_enc_context_t),
+ CODEC_ID_PCM_U8,
+ CODEC_ID_NONE,
+ voc_write_header,
+ voc_write_packet,
+ voc_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/wav.c b/contrib/ffmpeg/libavformat/wav.c
new file mode 100644
index 000000000..7fb982349
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/wav.c
@@ -0,0 +1,253 @@
+/*
+ * WAV muxer and demuxer
+ * Copyright (c) 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+
+typedef struct {
+ offset_t data;
+ offset_t data_end;
+} WAVContext;
+
+#ifdef CONFIG_MUXERS
+static int wav_write_header(AVFormatContext *s)
+{
+ WAVContext *wav = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t fmt;
+
+ put_tag(pb, "RIFF");
+ put_le32(pb, 0); /* file length */
+ put_tag(pb, "WAVE");
+
+ /* format header */
+ fmt = start_tag(pb, "fmt ");
+ if (put_wav_header(pb, s->streams[0]->codec) < 0) {
+ av_free(wav);
+ return -1;
+ }
+ end_tag(pb, fmt);
+
+ av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+
+ /* data header */
+ wav->data = start_tag(pb, "data");
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int wav_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int wav_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ WAVContext *wav = s->priv_data;
+ offset_t file_size;
+
+ if (!url_is_streamed(&s->pb)) {
+ end_tag(pb, wav->data);
+
+ /* update file size */
+ file_size = url_ftell(pb);
+ url_fseek(pb, 4, SEEK_SET);
+ put_le32(pb, (uint32_t)(file_size - 8));
+ url_fseek(pb, file_size, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/* return the size of the found tag */
+/* XXX: > 2GB ? */
+static int find_tag(ByteIOContext *pb, uint32_t tag1)
+{
+ unsigned int tag;
+ int size;
+
+ for(;;) {
+ if (url_feof(pb))
+ return -1;
+ tag = get_le32(pb);
+ size = get_le32(pb);
+ if (tag == tag1)
+ break;
+ url_fseek(pb, size, SEEK_CUR);
+ }
+ if (size < 0)
+ size = 0x7fffffff;
+ return size;
+}
+
+static int wav_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
+ p->buf[2] == 'F' && p->buf[3] == 'F' &&
+ p->buf[8] == 'W' && p->buf[9] == 'A' &&
+ p->buf[10] == 'V' && p->buf[11] == 'E')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* wav input */
+static int wav_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size;
+ unsigned int tag;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ WAVContext *wav = s->priv_data;
+
+ /* check RIFF header */
+ tag = get_le32(pb);
+
+ if (tag != MKTAG('R', 'I', 'F', 'F'))
+ return -1;
+ get_le32(pb); /* file size */
+ tag = get_le32(pb);
+ if (tag != MKTAG('W', 'A', 'V', 'E'))
+ return -1;
+
+ /* parse fmt header */
+ size = find_tag(pb, MKTAG('f', 'm', 't', ' '));
+ if (size < 0)
+ return -1;
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ get_wav_header(pb, st->codec, size);
+ st->need_parsing = 1;
+
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+ size = find_tag(pb, MKTAG('d', 'a', 't', 'a'));
+ if (size < 0)
+ return -1;
+ wav->data_end= url_ftell(pb) + size;
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int wav_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret, size, left;
+ AVStream *st;
+ WAVContext *wav = s->priv_data;
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ st = s->streams[0];
+
+ left= wav->data_end - url_ftell(&s->pb);
+ if(left <= 0){
+ left = find_tag(&(s->pb), MKTAG('d', 'a', 't', 'a'));
+ if (left < 0) {
+ return AVERROR_IO;
+ }
+ wav->data_end= url_ftell(&s->pb) + left;
+ }
+
+ size = MAX_SIZE;
+ if (st->codec->block_align > 1) {
+ if (size < st->codec->block_align)
+ size = st->codec->block_align;
+ size = (size / st->codec->block_align) * st->codec->block_align;
+ }
+ size= FFMIN(size, left);
+ if (av_new_packet(pkt, size))
+ return AVERROR_IO;
+ pkt->stream_index = 0;
+
+ ret = get_buffer(&s->pb, pkt->data, pkt->size);
+ if (ret < 0)
+ av_free_packet(pkt);
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return ret;
+}
+
+static int wav_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int wav_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st;
+
+ st = s->streams[0];
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ case CODEC_ID_AC3:
+ case CODEC_ID_DTS:
+ /* use generic seeking with dynamically generated indexes */
+ return -1;
+ default:
+ break;
+ }
+ return pcm_read_seek(s, stream_index, timestamp, flags);
+}
+
+#ifdef CONFIG_WAV_DEMUXER
+AVInputFormat wav_demuxer = {
+ "wav",
+ "wav format",
+ sizeof(WAVContext),
+ wav_probe,
+ wav_read_header,
+ wav_read_packet,
+ wav_read_close,
+ wav_read_seek,
+};
+#endif
+#ifdef CONFIG_WAV_MUXER
+AVOutputFormat wav_muxer = {
+ "wav",
+ "wav format",
+ "audio/x-wav",
+ "wav",
+ sizeof(WAVContext),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_NONE,
+ wav_write_header,
+ wav_write_packet,
+ wav_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/wc3movie.c b/contrib/ffmpeg/libavformat/wc3movie.c
new file mode 100644
index 000000000..6b3242797
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/wc3movie.c
@@ -0,0 +1,394 @@
+/*
+ * Wing Commander III Movie (.mve) File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file wc3movie.c
+ * Wing Commander III Movie file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the WC3 .mve file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define WC3_PREAMBLE_SIZE 8
+
+#define FORM_TAG MKTAG('F', 'O', 'R', 'M')
+#define MOVE_TAG MKTAG('M', 'O', 'V', 'E')
+#define _PC__TAG MKTAG('_', 'P', 'C', '_')
+#define SOND_TAG MKTAG('S', 'O', 'N', 'D')
+#define BNAM_TAG MKTAG('B', 'N', 'A', 'M')
+#define SIZE_TAG MKTAG('S', 'I', 'Z', 'E')
+#define PALT_TAG MKTAG('P', 'A', 'L', 'T')
+#define INDX_TAG MKTAG('I', 'N', 'D', 'X')
+#define BRCH_TAG MKTAG('B', 'R', 'C', 'H')
+#define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
+#define VGA__TAG MKTAG('V', 'G', 'A', ' ')
+#define TEXT_TAG MKTAG('T', 'E', 'X', 'T')
+#define AUDI_TAG MKTAG('A', 'U', 'D', 'I')
+
+/* video resolution unless otherwise specified */
+#define WC3_DEFAULT_WIDTH 320
+#define WC3_DEFAULT_HEIGHT 165
+
+/* always use the same PCM audio parameters */
+#define WC3_SAMPLE_RATE 22050
+#define WC3_AUDIO_CHANNELS 1
+#define WC3_AUDIO_BITS 16
+
+/* nice, constant framerate */
+#define WC3_FRAME_PTS_INC (90000 / 15)
+
+#define PALETTE_SIZE (256 * 3)
+#define PALETTE_COUNT 256
+
+typedef struct Wc3DemuxContext {
+ int width;
+ int height;
+ unsigned char *palettes;
+ int palette_count;
+ int64_t pts;
+ int video_stream_index;
+ int audio_stream_index;
+
+ AVPaletteControl palette_control;
+
+} Wc3DemuxContext;
+
+/* bizarre palette lookup table */
+static const unsigned char wc3_pal_lookup[] = {
+ 0x00, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0E,
+ 0x10, 0x12, 0x13, 0x15, 0x16, 0x18, 0x19, 0x1A,
+ 0x1C, 0x1D, 0x1F, 0x20, 0x21, 0x23, 0x24, 0x25,
+ 0x27, 0x28, 0x29, 0x2A, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x38, 0x39,
+ 0x3A, 0x3B, 0x3C, 0x3D, 0x3F, 0x40, 0x41, 0x42,
+ 0x43, 0x44, 0x45, 0x46, 0x48, 0x49, 0x4A, 0x4B,
+ 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C,
+ 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C,
+ 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C,
+ 0x7D, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B,
+ 0x8C, 0x8D, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92,
+ 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x99,
+ 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1,
+ 0xA2, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
+ 0xA9, 0xAA, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB0, 0xB1, 0xB2, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+ 0xB7, 0xB8, 0xB9, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4,
+ 0xC5, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB,
+ 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD0, 0xD1,
+ 0xD2, 0xD3, 0xD4, 0xD5, 0xD5, 0xD6, 0xD7, 0xD8,
+ 0xD9, 0xDA, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xE9, 0xEA, 0xEB, 0xEC,
+ 0xED, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF1, 0xF2,
+ 0xF3, 0xF4, 0xF5, 0xF6, 0xF6, 0xF7, 0xF8, 0xF9,
+ 0xFA, 0xFA, 0xFB, 0xFC, 0xFD, 0xFD, 0xFD, 0xFD
+};
+
+
+static int wc3_probe(AVProbeData *p)
+{
+ if (p->buf_size < 12)
+ return 0;
+
+ if ((LE_32(&p->buf[0]) != FORM_TAG) ||
+ (LE_32(&p->buf[8]) != MOVE_TAG))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int wc3_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ Wc3DemuxContext *wc3 = (Wc3DemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size;
+ AVStream *st;
+ unsigned char preamble[WC3_PREAMBLE_SIZE];
+ int ret = 0;
+ int current_palette = 0;
+ int bytes_to_read;
+ int i;
+ unsigned char rotate;
+
+ /* default context members */
+ wc3->width = WC3_DEFAULT_WIDTH;
+ wc3->height = WC3_DEFAULT_HEIGHT;
+ wc3->palettes = NULL;
+ wc3->palette_count = 0;
+ wc3->pts = 0;
+ wc3->video_stream_index = wc3->audio_stream_index = 0;
+
+ /* skip the first 3 32-bit numbers */
+ url_fseek(pb, 12, SEEK_CUR);
+
+ /* traverse through the chunks and load the header information before
+ * the first BRCH tag */
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ fourcc_tag = LE_32(&preamble[0]);
+ size = (BE_32(&preamble[4]) + 1) & (~1);
+
+ do {
+ switch (fourcc_tag) {
+
+ case SOND_TAG:
+ case INDX_TAG:
+ /* SOND unknown, INDX unnecessary; ignore both */
+ url_fseek(pb, size, SEEK_CUR);
+ break;
+
+ case _PC__TAG:
+ /* need the number of palettes */
+ url_fseek(pb, 8, SEEK_CUR);
+ if ((ret = get_buffer(pb, preamble, 4)) != 4)
+ return AVERROR_IO;
+ wc3->palette_count = LE_32(&preamble[0]);
+ if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE){
+ wc3->palette_count= 0;
+ return -1;
+ }
+ wc3->palettes = av_malloc(wc3->palette_count * PALETTE_SIZE);
+ break;
+
+ case BNAM_TAG:
+ /* load up the name */
+ if ((unsigned)size < 512)
+ bytes_to_read = size;
+ else
+ bytes_to_read = 512;
+ if ((ret = get_buffer(pb, s->title, bytes_to_read)) != bytes_to_read)
+ return AVERROR_IO;
+ break;
+
+ case SIZE_TAG:
+ /* video resolution override */
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ wc3->width = LE_32(&preamble[0]);
+ wc3->height = LE_32(&preamble[4]);
+ break;
+
+ case PALT_TAG:
+ /* one of several palettes */
+ if ((unsigned)current_palette >= wc3->palette_count)
+ return AVERROR_INVALIDDATA;
+ if ((ret = get_buffer(pb,
+ &wc3->palettes[current_palette * PALETTE_SIZE],
+ PALETTE_SIZE)) != PALETTE_SIZE)
+ return AVERROR_IO;
+
+ /* transform the current palette in place */
+ for (i = current_palette * PALETTE_SIZE;
+ i < (current_palette + 1) * PALETTE_SIZE; i++) {
+ /* rotate each palette component left by 2 and use the result
+ * as an index into the color component table */
+ rotate = ((wc3->palettes[i] << 2) & 0xFF) |
+ ((wc3->palettes[i] >> 6) & 0xFF);
+ wc3->palettes[i] = wc3_pal_lookup[rotate];
+ }
+ current_palette++;
+ break;
+
+ default:
+ av_log(s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
+ preamble[0], preamble[1], preamble[2], preamble[3],
+ preamble[0], preamble[1], preamble[2], preamble[3]);
+ return AVERROR_INVALIDDATA;
+ break;
+ }
+
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ fourcc_tag = LE_32(&preamble[0]);
+ /* chunk sizes are 16-bit aligned */
+ size = (BE_32(&preamble[4]) + 1) & (~1);
+
+ } while (fourcc_tag != BRCH_TAG);
+
+ /* initialize the decoder streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ wc3->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_XAN_WC3;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = wc3->width;
+ st->codec->height = wc3->height;
+
+ /* palette considerations */
+ st->codec->palctrl = &wc3->palette_control;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ wc3->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ st->codec->codec_tag = 1;
+ st->codec->channels = WC3_AUDIO_CHANNELS;
+ st->codec->bits_per_sample = WC3_AUDIO_BITS;
+ st->codec->sample_rate = WC3_SAMPLE_RATE;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = WC3_AUDIO_BITS * WC3_AUDIO_CHANNELS;
+
+ return 0;
+}
+
+static int wc3_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ Wc3DemuxContext *wc3 = (Wc3DemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size;
+ int packet_read = 0;
+ int ret = 0;
+ unsigned char preamble[WC3_PREAMBLE_SIZE];
+ unsigned char text[1024];
+ unsigned int palette_number;
+ int i;
+ unsigned char r, g, b;
+ int base_palette_index;
+
+ while (!packet_read) {
+
+ /* get the next chunk preamble */
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ ret = AVERROR_IO;
+
+ fourcc_tag = LE_32(&preamble[0]);
+ /* chunk sizes are 16-bit aligned */
+ size = (BE_32(&preamble[4]) + 1) & (~1);
+
+ switch (fourcc_tag) {
+
+ case BRCH_TAG:
+ /* no-op */
+ break;
+
+ case SHOT_TAG:
+ /* load up new palette */
+ if ((ret = get_buffer(pb, preamble, 4)) != 4)
+ return AVERROR_IO;
+ palette_number = LE_32(&preamble[0]);
+ if (palette_number >= wc3->palette_count)
+ return AVERROR_INVALIDDATA;
+ base_palette_index = palette_number * PALETTE_COUNT * 3;
+ for (i = 0; i < PALETTE_COUNT; i++) {
+ r = wc3->palettes[base_palette_index + i * 3 + 0];
+ g = wc3->palettes[base_palette_index + i * 3 + 1];
+ b = wc3->palettes[base_palette_index + i * 3 + 2];
+ wc3->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+ }
+ wc3->palette_control.palette_changed = 1;
+ break;
+
+ case VGA__TAG:
+ /* send out video chunk */
+ ret= av_get_packet(pb, pkt, size);
+ pkt->stream_index = wc3->video_stream_index;
+ pkt->pts = wc3->pts;
+ if (ret != size)
+ ret = AVERROR_IO;
+ packet_read = 1;
+ break;
+
+ case TEXT_TAG:
+ /* subtitle chunk */
+#if 0
+ url_fseek(pb, size, SEEK_CUR);
+#else
+ if ((unsigned)size > sizeof(text) || (ret = get_buffer(pb, text, size)) != size)
+ ret = AVERROR_IO;
+ else {
+ int i = 0;
+ av_log (s, AV_LOG_DEBUG, "Subtitle time!\n");
+ av_log (s, AV_LOG_DEBUG, " inglish: %s\n", &text[i + 1]);
+ i += text[i] + 1;
+ av_log (s, AV_LOG_DEBUG, " doytsch: %s\n", &text[i + 1]);
+ i += text[i] + 1;
+ av_log (s, AV_LOG_DEBUG, " fronsay: %s\n", &text[i + 1]);
+ }
+#endif
+ break;
+
+ case AUDI_TAG:
+ /* send out audio chunk */
+ ret= av_get_packet(pb, pkt, size);
+ pkt->stream_index = wc3->audio_stream_index;
+ pkt->pts = wc3->pts;
+ if (ret != size)
+ ret = AVERROR_IO;
+
+ /* time to advance pts */
+ wc3->pts += WC3_FRAME_PTS_INC;
+
+ packet_read = 1;
+ break;
+
+ default:
+ av_log (s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
+ preamble[0], preamble[1], preamble[2], preamble[3],
+ preamble[0], preamble[1], preamble[2], preamble[3]);
+ ret = AVERROR_INVALIDDATA;
+ packet_read = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int wc3_read_close(AVFormatContext *s)
+{
+ Wc3DemuxContext *wc3 = (Wc3DemuxContext *)s->priv_data;
+
+ av_free(wc3->palettes);
+
+ return 0;
+}
+
+AVInputFormat wc3_demuxer = {
+ "wc3movie",
+ "Wing Commander III movie format",
+ sizeof(Wc3DemuxContext),
+ wc3_probe,
+ wc3_read_header,
+ wc3_read_packet,
+ wc3_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/westwood.c b/contrib/ffmpeg/libavformat/westwood.c
new file mode 100644
index 000000000..5c42e3b55
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/westwood.c
@@ -0,0 +1,414 @@
+/*
+ * Westwood Studios Multimedia Formats Demuxer (VQA, AUD)
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file westwood.c
+ * Westwood Studios VQA & AUD file demuxers
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the Westwood file formats, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ * http://www.geocities.com/SiliconValley/8682/aud3.txt
+ *
+ * Implementation note: There is no definite file signature for AUD files.
+ * The demuxer uses a probabilistic strategy for content detection. This
+ * entails performing sanity checks on certain header values in order to
+ * qualify a file. Refer to wsaud_probe() for the precise parameters.
+ */
+
+#include "avformat.h"
+
+#define AUD_HEADER_SIZE 12
+#define AUD_CHUNK_PREAMBLE_SIZE 8
+#define AUD_CHUNK_SIGNATURE 0x0000DEAF
+
+#define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
+#define WVQA_TAG MKBETAG('W', 'V', 'Q', 'A')
+#define VQHD_TAG MKBETAG('V', 'Q', 'H', 'D')
+#define FINF_TAG MKBETAG('F', 'I', 'N', 'F')
+#define SND0_TAG MKBETAG('S', 'N', 'D', '0')
+#define SND1_TAG MKBETAG('S', 'N', 'D', '1')
+#define SND2_TAG MKBETAG('S', 'N', 'D', '2')
+#define VQFR_TAG MKBETAG('V', 'Q', 'F', 'R')
+
+/* don't know what these tags are for, but acknowledge their existence */
+#define CINF_TAG MKBETAG('C', 'I', 'N', 'F')
+#define CINH_TAG MKBETAG('C', 'I', 'N', 'H')
+#define CIND_TAG MKBETAG('C', 'I', 'N', 'D')
+#define PINF_TAG MKBETAG('P', 'I', 'N', 'F')
+#define PINH_TAG MKBETAG('P', 'I', 'N', 'H')
+#define PIND_TAG MKBETAG('P', 'I', 'N', 'D')
+#define CMDS_TAG MKBETAG('C', 'M', 'D', 'S')
+
+#define VQA_HEADER_SIZE 0x2A
+#define VQA_FRAMERATE 15
+#define VQA_VIDEO_PTS_INC (90000 / VQA_FRAMERATE)
+#define VQA_PREAMBLE_SIZE 8
+
+typedef struct WsAudDemuxContext {
+ int audio_samplerate;
+ int audio_channels;
+ int audio_bits;
+ int audio_type;
+ int audio_stream_index;
+ int64_t audio_frame_counter;
+} WsAudDemuxContext;
+
+typedef struct WsVqaDemuxContext {
+ int audio_samplerate;
+ int audio_channels;
+ int audio_bits;
+
+ int audio_stream_index;
+ int video_stream_index;
+
+ int64_t audio_frame_counter;
+ int64_t video_pts;
+} WsVqaDemuxContext;
+
+static int wsaud_probe(AVProbeData *p)
+{
+ int field;
+
+ /* Probabilistic content detection strategy: There is no file signature
+ * so perform sanity checks on various header parameters:
+ * 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers
+ * compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers
+ * There is a total of 24 bits. The number space contains 2^24 =
+ * 16777216 numbers. There are 40001 * 2 = 80002 acceptable combinations
+ * of numbers. There is a 80002/16777216 = 0.48% chance of a false
+ * positive.
+ */
+
+ if (p->buf_size < AUD_HEADER_SIZE)
+ return 0;
+
+ /* check sample rate */
+ field = LE_16(&p->buf[0]);
+ if ((field < 8000) || (field > 48000))
+ return 0;
+
+ /* note: only check for WS IMA (type 99) right now since there is no
+ * support for type 1 */
+ if (p->buf[11] != 99)
+ return 0;
+
+ /* return 1/2 certainty since this file check is a little sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int wsaud_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char header[AUD_HEADER_SIZE];
+
+ if (get_buffer(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE)
+ return AVERROR_IO;
+ wsaud->audio_samplerate = LE_16(&header[0]);
+ if (header[11] == 99)
+ wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS;
+ else
+ return AVERROR_INVALIDDATA;
+
+ /* flag 0 indicates stereo */
+ wsaud->audio_channels = (header[10] & 0x1) + 1;
+ /* flag 1 indicates 16 bit audio */
+ wsaud->audio_bits = (((header[10] & 0x2) >> 1) + 1) * 8;
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, wsaud->audio_samplerate);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = wsaud->audio_type;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = wsaud->audio_channels;
+ st->codec->sample_rate = wsaud->audio_samplerate;
+ st->codec->bits_per_sample = wsaud->audio_bits;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample / 4;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ wsaud->audio_stream_index = st->index;
+ wsaud->audio_frame_counter = 0;
+
+ return 0;
+}
+
+static int wsaud_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE];
+ unsigned int chunk_size;
+ int ret = 0;
+
+ if (get_buffer(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) !=
+ AUD_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+
+ /* validate the chunk */
+ if (LE_32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
+ return AVERROR_INVALIDDATA;
+
+ chunk_size = LE_16(&preamble[0]);
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = wsaud->audio_stream_index;
+ pkt->pts = wsaud->audio_frame_counter;
+ pkt->pts /= wsaud->audio_samplerate;
+
+ /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
+ wsaud->audio_frame_counter += (chunk_size * 2) / wsaud->audio_channels;
+
+ return ret;
+}
+
+static int wsaud_read_close(AVFormatContext *s)
+{
+// WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+
+static int wsvqa_probe(AVProbeData *p)
+{
+ /* need 12 bytes to qualify */
+ if (p->buf_size < 12)
+ return 0;
+
+ /* check for the VQA signatures */
+ if ((BE_32(&p->buf[0]) != FORM_TAG) ||
+ (BE_32(&p->buf[8]) != WVQA_TAG))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int wsvqa_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char *header;
+ unsigned char scratch[VQA_PREAMBLE_SIZE];
+ unsigned int chunk_tag;
+ unsigned int chunk_size;
+
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ wsvqa->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_WS_VQA;
+ st->codec->codec_tag = 0; /* no fourcc */
+
+ /* skip to the start of the VQA header */
+ url_fseek(pb, 20, SEEK_SET);
+
+ /* the VQA header needs to go to the decoder */
+ st->codec->extradata_size = VQA_HEADER_SIZE;
+ st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ header = (unsigned char *)st->codec->extradata;
+ if (get_buffer(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
+ VQA_HEADER_SIZE) {
+ av_free(st->codec->extradata);
+ return AVERROR_IO;
+ }
+ st->codec->width = LE_16(&header[6]);
+ st->codec->height = LE_16(&header[8]);
+
+ st->codec->time_base.num = 1;
+ st->codec->time_base.den = VQA_FRAMERATE;
+
+ /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
+ if (LE_16(&header[24]) || (LE_16(&header[0]) == 1)) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ if (LE_16(&header[0]) == 1)
+ st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
+ else
+ st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->sample_rate = LE_16(&header[24]);
+ if (!st->codec->sample_rate)
+ st->codec->sample_rate = 22050;
+ st->codec->channels = header[26];
+ if (!st->codec->channels)
+ st->codec->channels = 1;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample / 4;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ wsvqa->audio_stream_index = st->index;
+ wsvqa->audio_samplerate = st->codec->sample_rate;
+ wsvqa->audio_channels = st->codec->channels;
+ wsvqa->audio_frame_counter = 0;
+ }
+
+ /* there are 0 or more chunks before the FINF chunk; iterate until
+ * FINF has been skipped and the file will be ready to be demuxed */
+ do {
+ if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
+ av_free(st->codec->extradata);
+ return AVERROR_IO;
+ }
+ chunk_tag = BE_32(&scratch[0]);
+ chunk_size = BE_32(&scratch[4]);
+
+ /* catch any unknown header tags, for curiousity */
+ switch (chunk_tag) {
+ case CINF_TAG:
+ case CINH_TAG:
+ case CIND_TAG:
+ case PINF_TAG:
+ case PINH_TAG:
+ case PIND_TAG:
+ case FINF_TAG:
+ case CMDS_TAG:
+ break;
+
+ default:
+ av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
+ scratch[0], scratch[1],
+ scratch[2], scratch[3]);
+ break;
+ }
+
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ } while (chunk_tag != FINF_TAG);
+
+ wsvqa->video_pts = wsvqa->audio_frame_counter = 0;
+
+ return 0;
+}
+
+static int wsvqa_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = -1;
+ unsigned char preamble[VQA_PREAMBLE_SIZE];
+ unsigned int chunk_type;
+ unsigned int chunk_size;
+ int skip_byte;
+
+ while (get_buffer(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) {
+ chunk_type = BE_32(&preamble[0]);
+ chunk_size = BE_32(&preamble[4]);
+ skip_byte = chunk_size & 0x01;
+
+ if ((chunk_type == SND1_TAG) || (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) {
+
+ if (av_new_packet(pkt, chunk_size))
+ return AVERROR_IO;
+ ret = get_buffer(pb, pkt->data, chunk_size);
+ if (ret != chunk_size) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ if (chunk_type == SND2_TAG) {
+ pkt->stream_index = wsvqa->audio_stream_index;
+
+ pkt->pts = 90000;
+ pkt->pts *= wsvqa->audio_frame_counter;
+ pkt->pts /= wsvqa->audio_samplerate;
+
+ /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
+ wsvqa->audio_frame_counter += (chunk_size * 2) / wsvqa->audio_channels;
+ } else if(chunk_type == SND1_TAG) {
+ pkt->stream_index = wsvqa->audio_stream_index;
+
+ pkt->pts = 90000;
+ pkt->pts *= wsvqa->audio_frame_counter;
+ pkt->pts /= wsvqa->audio_samplerate;
+
+ /* unpacked size is stored in header */
+ wsvqa->audio_frame_counter += LE_16(pkt->data) / wsvqa->audio_channels;
+ } else {
+ pkt->stream_index = wsvqa->video_stream_index;
+ pkt->pts = wsvqa->video_pts;
+ wsvqa->video_pts += VQA_VIDEO_PTS_INC;
+ }
+ /* stay on 16-bit alignment */
+ if (skip_byte)
+ url_fseek(pb, 1, SEEK_CUR);
+
+ return ret;
+ } else {
+ switch(chunk_type){
+ case CMDS_TAG:
+ case SND0_TAG:
+ break;
+ default:
+ av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type);
+ }
+ url_fseek(pb, chunk_size + skip_byte, SEEK_CUR);
+ }
+ }
+
+ return ret;
+}
+
+static int wsvqa_read_close(AVFormatContext *s)
+{
+// WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+#ifdef CONFIG_WSAUD_DEMUXER
+AVInputFormat wsaud_demuxer = {
+ "wsaud",
+ "Westwood Studios audio format",
+ sizeof(WsAudDemuxContext),
+ wsaud_probe,
+ wsaud_read_header,
+ wsaud_read_packet,
+ wsaud_read_close,
+};
+#endif
+#ifdef CONFIG_WSVQA_DEMUXER
+AVInputFormat wsvqa_demuxer = {
+ "wsvqa",
+ "Westwood Studios VQA format",
+ sizeof(WsVqaDemuxContext),
+ wsvqa_probe,
+ wsvqa_read_header,
+ wsvqa_read_packet,
+ wsvqa_read_close,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/wv.c b/contrib/ffmpeg/libavformat/wv.c
new file mode 100644
index 000000000..2de07fe3f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/wv.c
@@ -0,0 +1,202 @@
+/*
+ * WavPack demuxer
+ * Copyright (c) 2006 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "allformats.h"
+#include "bswap.h"
+
+// specs say that maximum block size is 1Mb
+#define WV_BLOCK_LIMIT 1047576
+
+#define WV_EXTRA_SIZE 12
+
+enum WV_FLAGS{
+ WV_MONO = 0x0004,
+ WV_HYBRID = 0x0008,
+ WV_JOINT = 0x0010,
+ WV_CROSSD = 0x0020,
+ WV_HSHAPE = 0x0040,
+ WV_FLOAT = 0x0080,
+ WV_INT32 = 0x0100,
+ WV_HBR = 0x0200,
+ WV_HBAL = 0x0400,
+ WV_MCINIT = 0x0800,
+ WV_MCEND = 0x1000,
+};
+
+static const int wv_rates[16] = {
+ 6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000,
+ 32000, 44100, 48000, 64000, 88200, 96000, 192000, -1
+};
+
+typedef struct{
+ uint32_t blksize, flags;
+ int rate, chan, bpp;
+ int block_parsed;
+ uint8_t extra[WV_EXTRA_SIZE];
+}WVContext;
+
+static int wv_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'w' && p->buf[1] == 'v' &&
+ p->buf[2] == 'p' && p->buf[3] == 'k')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int wv_read_block_header(AVFormatContext *ctx, ByteIOContext *pb)
+{
+ WVContext *wc = ctx->priv_data;
+ uint32_t tag, ver;
+ int size;
+ int rate, bpp, chan;
+
+ tag = get_le32(pb);
+ if (tag != MKTAG('w', 'v', 'p', 'k'))
+ return -1;
+ size = get_le32(pb);
+ if(size < 24 || size > WV_BLOCK_LIMIT){
+ av_log(ctx, AV_LOG_ERROR, "Incorrect block size %i\n", size);
+ return -1;
+ }
+ wc->blksize = size;
+ ver = get_le16(pb);
+ if(ver < 0x402 || ver > 0x40F){
+ av_log(ctx, AV_LOG_ERROR, "Unsupported version %03X\n", ver);
+ return -1;
+ }
+ get_byte(pb); // track no
+ get_byte(pb); // track sub index
+ get_le32(pb); // total samples in file
+ get_le32(pb); // offset in samples of current block
+ get_buffer(pb, wc->extra, WV_EXTRA_SIZE);
+ wc->flags = LE_32(wc->extra + 4);
+ //parse flags
+ if(wc->flags & WV_FLOAT){
+ av_log(ctx, AV_LOG_ERROR, "Floating point data is not supported\n");
+ return -1;
+ }
+ if(wc->flags & WV_HYBRID){
+ av_log(ctx, AV_LOG_ERROR, "Hybrid coding mode is not supported\n");
+ return -1;
+ }
+ if(wc->flags & WV_INT32){
+ av_log(ctx, AV_LOG_ERROR, "Integer point data is not supported\n");
+ return -1;
+ }
+
+ bpp = ((wc->flags & 3) + 1) << 3;
+ chan = 1 + !(wc->flags & WV_MONO);
+ rate = wv_rates[(wc->flags >> 23) & 0xF];
+ if(rate == -1){
+ av_log(ctx, AV_LOG_ERROR, "Unknown sampling rate\n");
+ return -1;
+ }
+ if(!wc->bpp) wc->bpp = bpp;
+ if(!wc->chan) wc->chan = chan;
+ if(!wc->rate) wc->rate = rate;
+
+ if(wc->flags && bpp != wc->bpp){
+ av_log(ctx, AV_LOG_ERROR, "Bits per sample differ, this block: %i, header block: %i\n", bpp, wc->bpp);
+ return -1;
+ }
+ if(wc->flags && chan != wc->chan){
+ av_log(ctx, AV_LOG_ERROR, "Channels differ, this block: %i, header block: %i\n", chan, wc->chan);
+ return -1;
+ }
+ if(wc->flags && rate != wc->rate){
+ av_log(ctx, AV_LOG_ERROR, "Sampling rate differ, this block: %i, header block: %i\n", rate, wc->rate);
+ return -1;
+ }
+ wc->blksize = size - 24;
+ return 0;
+}
+
+static int wv_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ WVContext *wc = s->priv_data;
+ AVStream *st;
+
+ if(wv_read_block_header(s, pb) < 0)
+ return -1;
+
+ wc->block_parsed = 0;
+ /* now we are ready: build format streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_WAVPACK;
+ st->codec->channels = wc->chan;
+ st->codec->sample_rate = wc->rate;
+ st->codec->bits_per_sample = wc->bpp;
+ av_set_pts_info(st, 64, 1, wc->rate);
+ return 0;
+}
+
+static int wv_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ WVContext *wc = s->priv_data;
+ int ret;
+
+ if (url_feof(&s->pb))
+ return -EIO;
+ if(wc->block_parsed){
+ if(wv_read_block_header(s, &s->pb) < 0)
+ return -1;
+ }
+
+ if(av_new_packet(pkt, wc->blksize + WV_EXTRA_SIZE) < 0)
+ return AVERROR_NOMEM;
+ memcpy(pkt->data, wc->extra, WV_EXTRA_SIZE);
+ ret = get_buffer(&s->pb, pkt->data + WV_EXTRA_SIZE, wc->blksize);
+ if(ret != wc->blksize){
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->stream_index = 0;
+ wc->block_parsed = 1;
+ pkt->size = ret + WV_EXTRA_SIZE;
+
+ return 0;
+}
+
+static int wv_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat wv_demuxer = {
+ "wv",
+ "WavPack",
+ sizeof(WVContext),
+ wv_probe,
+ wv_read_header,
+ wv_read_packet,
+ wv_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/yuv.c b/contrib/ffmpeg/libavformat/yuv.c
new file mode 100644
index 000000000..fe52cdea5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/yuv.c
@@ -0,0 +1,161 @@
+/*
+ * .Y.U.V image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int sizes[][2] = {
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 352, 288 },
+ { 352, 240 },
+ { 160, 128 },
+ { 512, 384 },
+ { 640, 352 },
+ { 640, 240 },
+};
+
+static int infer_size(int *width_ptr, int *height_ptr, int size)
+{
+ int i;
+
+ for(i=0;i<sizeof(sizes)/sizeof(sizes[0]);i++) {
+ if ((sizes[i][0] * sizes[i][1]) == size) {
+ *width_ptr = sizes[i][0];
+ *height_ptr = sizes[i][1];
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int yuv_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ ByteIOContext pb1, *pb = &pb1;
+ int img_size, ret;
+ char fname[1024], *p;
+ int size;
+ URLContext *h;
+ AVImageInfo info1, *info = &info1;
+
+ img_size = url_fsize(f);
+
+ /* XXX: hack hack */
+ h = url_fileno(f);
+ url_get_filename(h, fname, sizeof(fname));
+
+ if (infer_size(&info->width, &info->height, img_size) < 0) {
+ return AVERROR_IO;
+ }
+ info->pix_fmt = PIX_FMT_YUV420P;
+
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ size = info->width * info->height;
+
+ p = strrchr(fname, '.');
+ if (!p || p[1] != 'Y')
+ return AVERROR_IO;
+
+ get_buffer(f, info->pict.data[0], size);
+
+ p[1] = 'U';
+ if (url_fopen(pb, fname, URL_RDONLY) < 0)
+ return AVERROR_IO;
+
+ get_buffer(pb, info->pict.data[1], size / 4);
+ url_fclose(pb);
+
+ p[1] = 'V';
+ if (url_fopen(pb, fname, URL_RDONLY) < 0)
+ return AVERROR_IO;
+
+ get_buffer(pb, info->pict.data[2], size / 4);
+ url_fclose(pb);
+ return 0;
+}
+
+static int yuv_write(ByteIOContext *pb2, AVImageInfo *info)
+{
+ ByteIOContext pb1, *pb;
+ char fname[1024], *p;
+ int i, j, width, height;
+ uint8_t *ptr;
+ URLContext *h;
+ static const char *ext = "YUV";
+
+ /* XXX: hack hack */
+ h = url_fileno(pb2);
+ url_get_filename(h, fname, sizeof(fname));
+
+ p = strrchr(fname, '.');
+ if (!p || p[1] != 'Y')
+ return AVERROR_IO;
+
+ width = info->width;
+ height = info->height;
+
+ for(i=0;i<3;i++) {
+ if (i == 1) {
+ width >>= 1;
+ height >>= 1;
+ }
+
+ if (i >= 1) {
+ pb = &pb1;
+ p[1] = ext[i];
+ if (url_fopen(pb, fname, URL_WRONLY) < 0)
+ return AVERROR_IO;
+ } else {
+ pb = pb2;
+ }
+
+ ptr = info->pict.data[i];
+ for(j=0;j<height;j++) {
+ put_buffer(pb, ptr, width);
+ ptr += info->pict.linesize[i];
+ }
+ put_flush_packet(pb);
+ if (i >= 1) {
+ url_fclose(pb);
+ }
+ }
+ return 0;
+}
+
+static int yuv_probe(AVProbeData *pd)
+{
+ if (match_ext(pd->filename, "Y"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVImageFormat yuv_image_format = {
+ "yuv",
+ "Y",
+ yuv_probe,
+ yuv_read,
+ (1 << PIX_FMT_YUV420P),
+ yuv_write,
+};
diff --git a/contrib/ffmpeg/libavformat/yuv4mpeg.c b/contrib/ffmpeg/libavformat/yuv4mpeg.c
new file mode 100644
index 000000000..70214ae00
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/yuv4mpeg.c
@@ -0,0 +1,408 @@
+/*
+ * YUV4MPEG format
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#define Y4M_MAGIC "YUV4MPEG2"
+#define Y4M_FRAME_MAGIC "FRAME"
+#define Y4M_LINE_MAX 256
+
+struct frame_attributes {
+ int interlaced_frame;
+ int top_field_first;
+};
+
+static int yuv4_generate_header(AVFormatContext *s, char* buf)
+{
+ AVStream *st;
+ int width, height;
+ int raten, rated, aspectn, aspectd, n;
+ char inter;
+ const char *colorspace = "";
+
+ st = s->streams[0];
+ width = st->codec->width;
+ height = st->codec->height;
+
+ av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1);
+
+ aspectn = st->codec->sample_aspect_ratio.num;
+ aspectd = st->codec->sample_aspect_ratio.den;
+
+ if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown
+
+ inter = 'p'; /* progressive is the default */
+ if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) {
+ inter = st->codec->coded_frame->top_field_first ? 't' : 'b';
+ }
+
+ switch(st->codec->pix_fmt) {
+ case PIX_FMT_GRAY8:
+ colorspace = " Cmono";
+ break;
+ case PIX_FMT_YUV411P:
+ colorspace = " C411 XYSCSS=411";
+ break;
+ case PIX_FMT_YUV420P:
+ colorspace = (st->codec->codec_id == CODEC_ID_DVVIDEO)?" C420paldv XYSCSS=420PALDV":" C420mpeg2 XYSCSS=420MPEG2";
+ break;
+ case PIX_FMT_YUV422P:
+ colorspace = " C422 XYSCSS=422";
+ break;
+ case PIX_FMT_YUV444P:
+ colorspace = " C444 XYSCSS=444";
+ break;
+ }
+
+ /* construct stream header, if this is the first frame */
+ n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n",
+ Y4M_MAGIC,
+ width,
+ height,
+ raten, rated,
+ inter,
+ aspectn, aspectd,
+ colorspace);
+
+ return n;
+}
+
+static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st = s->streams[pkt->stream_index];
+ ByteIOContext *pb = &s->pb;
+ AVPicture *picture;
+ int* first_pkt = s->priv_data;
+ int width, height, h_chroma_shift, v_chroma_shift;
+ int i, m;
+ char buf2[Y4M_LINE_MAX+1];
+ char buf1[20];
+ uint8_t *ptr, *ptr1, *ptr2;
+
+ picture = (AVPicture *)pkt->data;
+
+ /* for the first packet we have to output the header as well */
+ if (*first_pkt) {
+ *first_pkt = 0;
+ if (yuv4_generate_header(s, buf2) < 0) {
+ av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n");
+ return AVERROR_IO;
+ } else {
+ put_buffer(pb, buf2, strlen(buf2));
+ }
+ }
+
+ /* construct frame header */
+
+ m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
+ put_buffer(pb, buf1, strlen(buf1));
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ ptr = picture->data[0];
+ for(i=0;i<height;i++) {
+ put_buffer(pb, ptr, width);
+ ptr += picture->linesize[0];
+ }
+
+ if (st->codec->pix_fmt != PIX_FMT_GRAY8){
+ // Adjust for smaller Cb and Cr planes
+ avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+ width >>= h_chroma_shift;
+ height >>= v_chroma_shift;
+
+ ptr1 = picture->data[1];
+ ptr2 = picture->data[2];
+ for(i=0;i<height;i++) { /* Cb */
+ put_buffer(pb, ptr1, width);
+ ptr1 += picture->linesize[1];
+ }
+ for(i=0;i<height;i++) { /* Cr */
+ put_buffer(pb, ptr2, width);
+ ptr2 += picture->linesize[2];
+ }
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int yuv4_write_header(AVFormatContext *s)
+{
+ int* first_pkt = s->priv_data;
+
+ if (s->nb_streams != 1)
+ return AVERROR_IO;
+
+ if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) {
+ av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV stream, some mjpegtools might not work.\n");
+ }
+ else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) {
+ av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, yuv422p, yuv420p, yuv411p and gray pixel formats. Use -pix_fmt to select one.\n");
+ return AVERROR_IO;
+ }
+
+ *first_pkt = 1;
+ return 0;
+}
+
+static int yuv4_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_YUV4MPEGPIPE_MUXER
+AVOutputFormat yuv4mpegpipe_muxer = {
+ "yuv4mpegpipe",
+ "YUV4MPEG pipe format",
+ "",
+ "y4m",
+ sizeof(int),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ yuv4_write_header,
+ yuv4_write_packet,
+ yuv4_write_trailer,
+ .flags = AVFMT_RAWPICTURE,
+};
+#endif
+
+/* Header size increased to allow room for optional flags */
+#define MAX_YUV4_HEADER 80
+#define MAX_FRAME_HEADER 80
+
+static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ char header[MAX_YUV4_HEADER+10]; // Include headroom for the longest option
+ char *tokstart,*tokend,*header_end;
+ int i;
+ ByteIOContext *pb = &s->pb;
+ int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0;
+ enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE;
+ AVStream *st;
+ struct frame_attributes *s1 = s->priv_data;
+
+ for (i=0; i<MAX_YUV4_HEADER; i++) {
+ header[i] = get_byte(pb);
+ if (header[i] == '\n') {
+ header[i+1] = 0x20; // Add a space after last option. Makes parsing "444" vs "444alpha" easier.
+ header[i+2] = 0;
+ break;
+ }
+ }
+ if (i == MAX_YUV4_HEADER) return -1;
+ if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) return -1;
+
+ s1->interlaced_frame = 0;
+ s1->top_field_first = 0;
+ header_end = &header[i+1]; // Include space
+ for(tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) {
+ if (*tokstart==0x20) continue;
+ switch (*tokstart++) {
+ case 'W': // Width. Required.
+ width = strtol(tokstart, &tokend, 10);
+ tokstart=tokend;
+ break;
+ case 'H': // Height. Required.
+ height = strtol(tokstart, &tokend, 10);
+ tokstart=tokend;
+ break;
+ case 'C': // Color space
+ if (strncmp("420jpeg",tokstart,7)==0)
+ pix_fmt = PIX_FMT_YUV420P;
+ else if (strncmp("420mpeg2",tokstart,8)==0)
+ pix_fmt = PIX_FMT_YUV420P;
+ else if (strncmp("420paldv", tokstart, 8)==0)
+ pix_fmt = PIX_FMT_YUV420P;
+ else if (strncmp("411", tokstart, 3)==0)
+ pix_fmt = PIX_FMT_YUV411P;
+ else if (strncmp("422", tokstart, 3)==0)
+ pix_fmt = PIX_FMT_YUV422P;
+ else if (strncmp("444alpha", tokstart, 8)==0) {
+ av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 YUV4MPEG stream.\n");
+ return -1;
+ } else if (strncmp("444", tokstart, 3)==0)
+ pix_fmt = PIX_FMT_YUV444P;
+ else if (strncmp("mono",tokstart, 4)==0) {
+ pix_fmt = PIX_FMT_GRAY8;
+ } else {
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown pixel format.\n");
+ return -1;
+ }
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ case 'I': // Interlace type
+ switch (*tokstart++){
+ case '?':
+ break;
+ case 'p':
+ s1->interlaced_frame=0;
+ break;
+ case 't':
+ s1->interlaced_frame=1;
+ s1->top_field_first=1;
+ break;
+ case 'b':
+ s1->interlaced_frame=1;
+ s1->top_field_first=0;
+ break;
+ case 'm':
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed interlaced and non-interlaced frames.\n");
+ return -1;
+ default:
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
+ return -1;
+ }
+ break;
+ case 'F': // Frame rate
+ sscanf(tokstart,"%d:%d",&raten,&rated); // 0:0 if unknown
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ case 'A': // Pixel aspect
+ sscanf(tokstart,"%d:%d",&aspectn,&aspectd); // 0:0 if unknown
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ case 'X': // Vendor extensions
+ if (strncmp("YSCSS=",tokstart,6)==0) {
+ // Older nonstandard pixel format representation
+ tokstart+=6;
+ if (strncmp("420JPEG",tokstart,7)==0)
+ alt_pix_fmt=PIX_FMT_YUV420P;
+ else if (strncmp("420MPEG2",tokstart,8)==0)
+ alt_pix_fmt=PIX_FMT_YUV420P;
+ else if (strncmp("420PALDV",tokstart,8)==0)
+ alt_pix_fmt=PIX_FMT_YUV420P;
+ else if (strncmp("411",tokstart,3)==0)
+ alt_pix_fmt=PIX_FMT_YUV411P;
+ else if (strncmp("422",tokstart,3)==0)
+ alt_pix_fmt=PIX_FMT_YUV422P;
+ else if (strncmp("444",tokstart,3)==0)
+ alt_pix_fmt=PIX_FMT_YUV444P;
+ }
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ }
+ }
+
+ if ((width == -1) || (height == -1)) {
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
+ return -1;
+ }
+
+ if (pix_fmt == PIX_FMT_NONE) {
+ if (alt_pix_fmt == PIX_FMT_NONE)
+ pix_fmt = PIX_FMT_YUV420P;
+ else
+ pix_fmt = alt_pix_fmt;
+ }
+
+ if (raten == 0 && rated == 0) {
+ // Frame rate unknown
+ raten = 25;
+ rated = 1;
+ }
+
+ if (aspectn == 0 && aspectd == 0) {
+ // Pixel aspect unknown
+ aspectd = 1;
+ }
+
+ st = av_new_stream(s, 0);
+ st = s->streams[0];
+ st->codec->width = width;
+ st->codec->height = height;
+ av_reduce(&raten, &rated, raten, rated, (1UL<<31)-1);
+ av_set_pts_info(st, 64, rated, raten);
+ st->codec->pix_fmt = pix_fmt;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->sample_aspect_ratio= (AVRational){aspectn, aspectd};
+
+ return 0;
+}
+
+static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int i;
+ char header[MAX_FRAME_HEADER+1];
+ int packet_size, width, height;
+ AVStream *st = s->streams[0];
+ struct frame_attributes *s1 = s->priv_data;
+
+ for (i=0; i<MAX_FRAME_HEADER; i++) {
+ header[i] = get_byte(&s->pb);
+ if (header[i] == '\n') {
+ header[i+1] = 0;
+ break;
+ }
+ }
+ if (i == MAX_FRAME_HEADER) return -1;
+ if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1;
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
+ if (packet_size < 0)
+ return -1;
+
+ if (av_get_packet(&s->pb, pkt, packet_size) != packet_size)
+ return AVERROR_IO;
+
+ if (s->streams[0]->codec->coded_frame) {
+ s->streams[0]->codec->coded_frame->interlaced_frame = s1->interlaced_frame;
+ s->streams[0]->codec->coded_frame->top_field_first = s1->top_field_first;
+ }
+
+ pkt->stream_index = 0;
+ return 0;
+}
+
+static int yuv4_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int yuv4_probe(AVProbeData *pd)
+{
+ /* check file header */
+ if (pd->buf_size <= sizeof(Y4M_MAGIC))
+ return 0;
+ if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC)-1)==0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+#ifdef CONFIG_YUV4MPEGPIPE_DEMUXER
+AVInputFormat yuv4mpegpipe_demuxer = {
+ "yuv4mpegpipe",
+ "YUV4MPEG pipe format",
+ sizeof(struct frame_attributes),
+ yuv4_probe,
+ yuv4_read_header,
+ yuv4_read_packet,
+ yuv4_read_close,
+ .extensions = "y4m"
+};
+#endif