summaryrefslogtreecommitdiff
path: root/contrib/ffmpeg/libavformat
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/ffmpeg/libavformat')
-rw-r--r--contrib/ffmpeg/libavformat/4xm.c28
-rw-r--r--contrib/ffmpeg/libavformat/Makefile52
-rw-r--r--contrib/ffmpeg/libavformat/aiff.c32
-rw-r--r--contrib/ffmpeg/libavformat/allformats.c28
-rw-r--r--contrib/ffmpeg/libavformat/allformats.h8
-rw-r--r--contrib/ffmpeg/libavformat/asf-enc.c179
-rw-r--r--contrib/ffmpeg/libavformat/asf.c432
-rw-r--r--contrib/ffmpeg/libavformat/asf.h127
-rw-r--r--contrib/ffmpeg/libavformat/au.c10
-rw-r--r--contrib/ffmpeg/libavformat/audio.c14
-rw-r--r--contrib/ffmpeg/libavformat/avformat.h503
-rw-r--r--contrib/ffmpeg/libavformat/avidec.c123
-rw-r--r--contrib/ffmpeg/libavformat/avienc.c60
-rw-r--r--contrib/ffmpeg/libavformat/avio.c36
-rw-r--r--contrib/ffmpeg/libavformat/avio.h93
-rw-r--r--contrib/ffmpeg/libavformat/aviobuf.c65
-rw-r--r--contrib/ffmpeg/libavformat/barpainet.h45
-rw-r--r--contrib/ffmpeg/libavformat/base64.c231
-rw-r--r--contrib/ffmpeg/libavformat/beosaudio.cpp24
-rw-r--r--contrib/ffmpeg/libavformat/dc1394.c4
-rw-r--r--contrib/ffmpeg/libavformat/dsicin.c4
-rw-r--r--contrib/ffmpeg/libavformat/dv.c14
-rw-r--r--contrib/ffmpeg/libavformat/dv.h2
-rw-r--r--contrib/ffmpeg/libavformat/dv1394.c8
-rw-r--r--contrib/ffmpeg/libavformat/dvenc.c12
-rw-r--r--contrib/ffmpeg/libavformat/dxa.c214
-rw-r--r--contrib/ffmpeg/libavformat/electronicarts.c6
-rw-r--r--contrib/ffmpeg/libavformat/ffm.c12
-rw-r--r--contrib/ffmpeg/libavformat/file.c12
-rw-r--r--contrib/ffmpeg/libavformat/flic.c20
-rw-r--r--contrib/ffmpeg/libavformat/flv.h110
-rw-r--r--contrib/ffmpeg/libavformat/flvdec.c346
-rw-r--r--contrib/ffmpeg/libavformat/flvenc.c144
-rw-r--r--contrib/ffmpeg/libavformat/framehook.c11
-rw-r--r--contrib/ffmpeg/libavformat/framehook.h4
-rw-r--r--contrib/ffmpeg/libavformat/gifdec.c6
-rw-r--r--contrib/ffmpeg/libavformat/grab.c30
-rw-r--r--contrib/ffmpeg/libavformat/grab_bktr.c30
-rw-r--r--contrib/ffmpeg/libavformat/gxf.c15
-rw-r--r--contrib/ffmpeg/libavformat/gxfenc.c21
-rw-r--r--contrib/ffmpeg/libavformat/http.c137
-rw-r--r--contrib/ffmpeg/libavformat/idcin.c10
-rw-r--r--contrib/ffmpeg/libavformat/idroq.c22
-rw-r--r--contrib/ffmpeg/libavformat/img.c400
-rw-r--r--contrib/ffmpeg/libavformat/img2.c13
-rw-r--r--contrib/ffmpeg/libavformat/ipmovie.c30
-rw-r--r--contrib/ffmpeg/libavformat/isom.c130
-rw-r--r--contrib/ffmpeg/libavformat/isom.h4
-rw-r--r--contrib/ffmpeg/libavformat/jpeg.c240
-rw-r--r--contrib/ffmpeg/libavformat/libnut.c59
-rw-r--r--contrib/ffmpeg/libavformat/matroska.c565
-rw-r--r--contrib/ffmpeg/libavformat/mm.c8
-rw-r--r--contrib/ffmpeg/libavformat/mov.c292
-rw-r--r--contrib/ffmpeg/libavformat/movenc.c77
-rw-r--r--contrib/ffmpeg/libavformat/mp3.c3
-rw-r--r--contrib/ffmpeg/libavformat/mpc.c231
-rw-r--r--contrib/ffmpeg/libavformat/mpeg.c109
-rw-r--r--contrib/ffmpeg/libavformat/mpegts.c97
-rw-r--r--contrib/ffmpeg/libavformat/mpegts.h2
-rw-r--r--contrib/ffmpeg/libavformat/mpegtsenc.c69
-rw-r--r--contrib/ffmpeg/libavformat/mxf.c726
-rw-r--r--contrib/ffmpeg/libavformat/network.h (renamed from contrib/ffmpeg/libavformat/base64.h)21
-rw-r--r--contrib/ffmpeg/libavformat/nsvdec.c14
-rw-r--r--contrib/ffmpeg/libavformat/nut.c2
-rw-r--r--contrib/ffmpeg/libavformat/nutdec.c24
-rw-r--r--contrib/ffmpeg/libavformat/nuv.c8
-rw-r--r--contrib/ffmpeg/libavformat/ogg.c4
-rw-r--r--contrib/ffmpeg/libavformat/ogg2.c13
-rw-r--r--contrib/ffmpeg/libavformat/ogg2.h1
-rw-r--r--contrib/ffmpeg/libavformat/os_support.c77
-rw-r--r--contrib/ffmpeg/libavformat/os_support.h60
-rw-r--r--contrib/ffmpeg/libavformat/png.c889
-rw-r--r--contrib/ffmpeg/libavformat/pnm.c478
-rw-r--r--contrib/ffmpeg/libavformat/psxstr.c18
-rw-r--r--contrib/ffmpeg/libavformat/qtpalette.h16
-rw-r--r--contrib/ffmpeg/libavformat/raw.c64
-rw-r--r--contrib/ffmpeg/libavformat/riff.c106
-rw-r--r--contrib/ffmpeg/libavformat/riff.h30
-rw-r--r--contrib/ffmpeg/libavformat/rm.c7
-rw-r--r--contrib/ffmpeg/libavformat/rtp.c18
-rw-r--r--contrib/ffmpeg/libavformat/rtp.h15
-rw-r--r--contrib/ffmpeg/libavformat/rtp_h264.c16
-rw-r--r--contrib/ffmpeg/libavformat/rtpproto.c14
-rw-r--r--contrib/ffmpeg/libavformat/rtsp.c11
-rw-r--r--contrib/ffmpeg/libavformat/rtsp.h26
-rw-r--r--contrib/ffmpeg/libavformat/rtspcodes.h27
-rw-r--r--contrib/ffmpeg/libavformat/segafilm.c51
-rw-r--r--contrib/ffmpeg/libavformat/sgi.c460
-rw-r--r--contrib/ffmpeg/libavformat/sierravmd.c22
-rw-r--r--contrib/ffmpeg/libavformat/smacker.c4
-rw-r--r--contrib/ffmpeg/libavformat/sol.c4
-rw-r--r--contrib/ffmpeg/libavformat/swf.c355
-rw-r--r--contrib/ffmpeg/libavformat/tcp.c52
-rw-r--r--contrib/ffmpeg/libavformat/thp.c170
-rw-r--r--contrib/ffmpeg/libavformat/tiertexseq.c5
-rw-r--r--contrib/ffmpeg/libavformat/tta.c12
-rw-r--r--contrib/ffmpeg/libavformat/udp.c32
-rw-r--r--contrib/ffmpeg/libavformat/utils.c708
-rw-r--r--contrib/ffmpeg/libavformat/v4l2.c192
-rw-r--r--contrib/ffmpeg/libavformat/voc.c2
-rw-r--r--contrib/ffmpeg/libavformat/voc.h2
-rw-r--r--contrib/ffmpeg/libavformat/vocdec.c6
-rw-r--r--contrib/ffmpeg/libavformat/vocenc.c11
-rw-r--r--contrib/ffmpeg/libavformat/wav.c42
-rw-r--r--contrib/ffmpeg/libavformat/wc3movie.c24
-rw-r--r--contrib/ffmpeg/libavformat/westwood.c50
-rw-r--r--contrib/ffmpeg/libavformat/wv.c49
-rw-r--r--contrib/ffmpeg/libavformat/x11grab.c529
-rw-r--r--contrib/ffmpeg/libavformat/yuv.c161
109 files changed, 5168 insertions, 6073 deletions
diff --git a/contrib/ffmpeg/libavformat/4xm.c b/contrib/ffmpeg/libavformat/4xm.c
index 12e7d9ee4..bf10b9e82 100644
--- a/contrib/ffmpeg/libavformat/4xm.c
+++ b/contrib/ffmpeg/libavformat/4xm.c
@@ -82,8 +82,8 @@ static int fourxm_probe(AVProbeData *p)
if (p->buf_size < 12)
return 0;
- if ((LE_32(&p->buf[0]) != RIFF_TAG) ||
- (LE_32(&p->buf[8]) != _4XMV_TAG))
+ if ((AV_RL32(&p->buf[0]) != RIFF_TAG) ||
+ (AV_RL32(&p->buf[8]) != _4XMV_TAG))
return 0;
return AVPROBE_SCORE_MAX;
@@ -125,19 +125,19 @@ static int fourxm_read_header(AVFormatContext *s,
/* take the lazy approach and search for any and all vtrk and strk chunks */
for (i = 0; i < header_size - 8; i++) {
- fourcc_tag = LE_32(&header[i]);
- size = LE_32(&header[i + 4]);
+ fourcc_tag = AV_RL32(&header[i]);
+ size = AV_RL32(&header[i + 4]);
if (fourcc_tag == std__TAG) {
- fourxm->fps = av_int2flt(LE_32(&header[i + 12]));
+ fourxm->fps = av_int2flt(AV_RL32(&header[i + 12]));
} else if (fourcc_tag == vtrk_TAG) {
/* check that there is enough data */
if (size != vtrk_SIZE) {
av_free(header);
return AVERROR_INVALIDDATA;
}
- fourxm->width = LE_32(&header[i + 36]);
- fourxm->height = LE_32(&header[i + 40]);
+ fourxm->width = AV_RL32(&header[i + 36]);
+ fourxm->height = AV_RL32(&header[i + 40]);
i += 8 + size;
/* allocate a new AVStream */
@@ -160,7 +160,7 @@ static int fourxm_read_header(AVFormatContext *s,
av_free(header);
return AVERROR_INVALIDDATA;
}
- current_track = LE_32(&header[i + 8]);
+ current_track = AV_RL32(&header[i + 8]);
if (current_track + 1 > fourxm->track_count) {
fourxm->track_count = current_track + 1;
if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack))
@@ -172,10 +172,10 @@ static int fourxm_read_header(AVFormatContext *s,
return AVERROR_NOMEM;
}
}
- fourxm->tracks[current_track].adpcm = LE_32(&header[i + 12]);
- fourxm->tracks[current_track].channels = LE_32(&header[i + 36]);
- fourxm->tracks[current_track].sample_rate = LE_32(&header[i + 40]);
- fourxm->tracks[current_track].bits = LE_32(&header[i + 44]);
+ fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]);
+ fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]);
+ fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]);
+ fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]);
i += 8 + size;
/* allocate a new AVStream */
@@ -235,8 +235,8 @@ static int fourxm_read_packet(AVFormatContext *s,
if ((ret = get_buffer(&s->pb, header, 8)) < 0)
return ret;
- fourcc_tag = LE_32(&header[0]);
- size = LE_32(&header[4]);
+ fourcc_tag = AV_RL32(&header[0]);
+ size = AV_RL32(&header[4]);
if (url_feof(pb))
return AVERROR_IO;
switch (fourcc_tag) {
diff --git a/contrib/ffmpeg/libavformat/Makefile b/contrib/ffmpeg/libavformat/Makefile
index fd2ac2a29..f1339bd9d 100644
--- a/contrib/ffmpeg/libavformat/Makefile
+++ b/contrib/ffmpeg/libavformat/Makefile
@@ -29,9 +29,12 @@ OBJS-$(CONFIG_AVS_DEMUXER) += avs.o vocdec.o voc.o riff.o
OBJS-$(CONFIG_CRC_MUXER) += crc.o
OBJS-$(CONFIG_FRAMECRC_MUXER) += crc.o
OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o
+OBJS-$(CONFIG_DC1394_DEMUXER) += dc1394.o
OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o
OBJS-$(CONFIG_DV_DEMUXER) += dv.o
OBJS-$(CONFIG_DV_MUXER) += dvenc.o
+OBJS-$(CONFIG_DV1394_DEMUXER) += dv1394.o
+OBJS-$(CONFIG_DXA_DEMUXER) += dxa.o
OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o
OBJS-$(CONFIG_FFM_DEMUXER) += ffm.o
OBJS-$(CONFIG_FFM_MUXER) += ffm.o
@@ -41,6 +44,7 @@ OBJS-$(CONFIG_FLV_MUXER) += flvenc.o
OBJS-$(CONFIG_GIF_MUXER) += gif.o
OBJS-$(CONFIG_GIF_DEMUXER) += gifdec.o
OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o
+OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o
OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o
OBJS-$(CONFIG_ROQ_DEMUXER) += idroq.o
OBJS-$(CONFIG_IMAGE2_DEMUXER) += img2.o
@@ -54,7 +58,6 @@ OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o riff.o
OBJS-$(CONFIG_MMF_MUXER) += mmf.o riff.o
OBJS-$(CONFIG_MOV_DEMUXER) += mov.o riff.o isom.o
OBJS-$(CONFIG_MOV_MUXER) += movenc.o riff.o isom.o
-OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o
OBJS-$(CONFIG_TGP_MUXER) += movenc.o riff.o isom.o
OBJS-$(CONFIG_MP4_MUXER) += movenc.o riff.o isom.o
OBJS-$(CONFIG_PSP_MUXER) += movenc.o riff.o isom.o
@@ -62,6 +65,7 @@ OBJS-$(CONFIG_TG2_MUXER) += movenc.o riff.o isom.o
OBJS-$(CONFIG_MP3_DEMUXER) += mp3.o
OBJS-$(CONFIG_MP2_MUXER) += mp3.o
OBJS-$(CONFIG_MP3_MUXER) += mp3.o
+OBJS-$(CONFIG_MPC_DEMUXER) += mpc.o
OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpeg.o
OBJS-$(CONFIG_MPEG1VCD_MUXER) += mpeg.o
OBJS-$(CONFIG_MPEG2VOB_MUXER) += mpeg.o
@@ -71,6 +75,7 @@ OBJS-$(CONFIG_MPEGPS_DEMUXER) += mpeg.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpegts.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegtsenc.o
OBJS-$(CONFIG_MPJPEG_MUXER) += mpjpeg.o
+OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o
OBJS-$(CONFIG_MXF_DEMUXER) += mxf.o
OBJS-$(CONFIG_NSV_DEMUXER) += nsvdec.o riff.o
OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o riff.o
@@ -80,6 +85,7 @@ OBJS-$(CONFIG_OGG_DEMUXER) += ogg2.o \
oggparseflac.o \
oggparseogm.o \
riff.o
+OBJS-$(CONFIG_OGG_MUXER) += ogg.o
OBJS-$(CONFIG_STR_DEMUXER) += psxstr.o
OBJS-$(CONFIG_SHORTEN_DEMUXER) += raw.o
OBJS-$(CONFIG_FLAC_DEMUXER) += raw.o
@@ -105,6 +111,7 @@ OBJS-$(CONFIG_MJPEG_MUXER) += raw.o
OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += raw.o
OBJS-$(CONFIG_RAWVIDEO_MUXER) += raw.o
OBJS-$(CONFIG_NULL_MUXER) += raw.o
+OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o riff.o
OBJS-$(CONFIG_RM_DEMUXER) += rm.o
OBJS-$(CONFIG_RM_MUXER) += rm.o
OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o
@@ -113,8 +120,10 @@ OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o
OBJS-$(CONFIG_SOL_DEMUXER) += sol.o
OBJS-$(CONFIG_SWF_DEMUXER) += swf.o
OBJS-$(CONFIG_SWF_MUXER) += swf.o
+OBJS-$(CONFIG_THP_DEMUXER) += thp.o
OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o
OBJS-$(CONFIG_TTA_DEMUXER) += tta.o
+OBJS-$(CONFIG_V4L2_DEMUXER) += v4l2.o
OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc.o riff.o
OBJS-$(CONFIG_VOC_MUXER) += vocenc.o voc.o riff.o
OBJS-$(CONFIG_WAV_DEMUXER) += wav.o riff.o
@@ -123,31 +132,24 @@ OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o
OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o
OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o
OBJS-$(CONFIG_WV_DEMUXER) += wv.o
+OBJS-$(CONFIG_X11_GRAB_DEVICE_DEMUXER) += x11grab.o
OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpeg.o
OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o
+# external libraries
+OBJS-$(CONFIG_LIBNUT_DEMUXER) += libnut.o riff.o
+OBJS-$(CONFIG_LIBNUT_MUXER) += libnut.o riff.o
+
OBJS+= framehook.o
-ifeq ($(CONFIG_VIDEO4LINUX),yes)
+ifeq ($(CONFIG_V4L),yes)
OBJS-$(CONFIG_VIDEO_GRAB_DEVICE_DEMUXER) += grab.o
endif
-ifeq ($(CONFIG_VIDEO4LINUX2),yes)
-OBJS-$(CONFIG_V4L2_DEMUXER) += v4l2.o
-endif
-
ifeq ($(CONFIG_BKTR),yes)
OBJS-$(CONFIG_VIDEO_GRAB_DEVICE_DEMUXER) += grab_bktr.o
endif
-ifeq ($(CONFIG_DV1394),yes)
-OBJS-$(CONFIG_DV1394_DEMUXER) += dv1394.o
-endif
-
-ifeq ($(CONFIG_DC1394),yes)
-OBJS-$(CONFIG_DC1394_DEMUXER) += dc1394.o
-endif
-
ifeq ($(CONFIG_AUDIO_OSS),yes)
OBJS-$(CONFIG_AUDIO_DEMUXER) += audio.o
OBJS-$(CONFIG_AUDIO_MUXER) += audio.o
@@ -166,32 +168,12 @@ OBJS+= avio.o aviobuf.o
ifeq ($(CONFIG_PROTOCOLS),yes)
OBJS+= file.o
ifeq ($(CONFIG_NETWORK),yes)
-OBJS+= udp.o tcp.o http.o rtsp.o rtp.o rtpproto.o mpegts.o base64.o rtp_h264.o
-endif
-endif
-
-ifeq ($(CONFIG_LIBNUT),yes)
-OBJS-$(CONFIG_NUT_DEMUXER) += libnut.o riff.o
-OBJS-$(CONFIG_NUT_MUXER) += libnut.o riff.o
-else
-OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o riff.o
-#OBJS-$(CONFIG_NUT_MUXER) += nutenc.o riff.o
+OBJS+= udp.o tcp.o http.o rtsp.o rtp.o rtpproto.o mpegts.o rtp_h264.o
endif
-
-ifeq ($(CONFIG_LIBOGG),yes)
-OBJS-$(CONFIG_OGG_MUXER) += ogg.o
endif
-ifeq ($(CONFIG_GPL),yes)
-OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o
-endif
-
-OBJS += $(OBJS-yes)
-
NAME=avformat
-ifeq ($(BUILD_SHARED),yes)
LIBVERSION=$(LAVFVERSION)
LIBMAJOR=$(LAVFMAJOR)
-endif
include ../common.mak
diff --git a/contrib/ffmpeg/libavformat/aiff.c b/contrib/ffmpeg/libavformat/aiff.c
index e4cf66c3b..868d55219 100644
--- a/contrib/ffmpeg/libavformat/aiff.c
+++ b/contrib/ffmpeg/libavformat/aiff.c
@@ -23,7 +23,7 @@
#include "riff.h"
#include "intfloat_readwrite.h"
-static const CodecTag codec_aiff_tags[] = {
+static const AVCodecTag codec_aiff_tags[] = {
{ CODEC_ID_PCM_S16BE, MKTAG('N','O','N','E') },
{ CODEC_ID_PCM_S8, MKTAG('N','O','N','E') },
{ CODEC_ID_PCM_S24BE, MKTAG('N','O','N','E') },
@@ -91,7 +91,7 @@ static void get_meta(ByteIOContext *pb, char * str, int strsize, int size)
if (size & 1)
size++;
size -= res;
- if (size);
+ if (size)
url_fskip(pb, size);
}
@@ -163,28 +163,32 @@ static int aiff_write_header(AVFormatContext *s)
ByteIOContext *pb = &s->pb;
AVCodecContext *enc = s->streams[0]->codec;
AVExtFloat sample_rate;
+ int aifc = 0;
/* First verify if format is ok */
- enc->codec_tag = codec_get_tag(codec_aiff_tags, enc->codec_id);
if (!enc->codec_tag) {
- av_free(aiff);
return -1;
}
+ if (enc->codec_tag != MKTAG('N','O','N','E'))
+ aifc = 1;
+
/* FORM AIFF header */
put_tag(pb, "FORM");
aiff->form = url_ftell(pb);
put_be32(pb, 0); /* file length */
- put_tag(pb, "AIFC");
+ put_tag(pb, aifc ? "AIFC" : "AIFF");
- /* Version chunk */
- put_tag(pb, "FVER");
- put_be32(pb, 4);
- put_be32(pb, 0xA2805140);
+ if (aifc) {
+ /* Version chunk */
+ put_tag(pb, "FVER");
+ put_be32(pb, 4);
+ put_be32(pb, 0xA2805140);
+ }
/* Common chunk */
put_tag(pb, "COMM");
- put_be32(pb, 24); /* size */
+ put_be32(pb, aifc ? 24 : 18); /* size */
put_be16(pb, enc->channels); /* Number of channels */
aiff->frames = url_ftell(pb);
@@ -204,8 +208,10 @@ static int aiff_write_header(AVFormatContext *s)
sample_rate = av_dbl2ext((double)enc->sample_rate);
put_buffer(pb, (uint8_t*)&sample_rate, sizeof(sample_rate));
- put_le32(pb, enc->codec_tag);
- put_be16(pb, 0);
+ if (aifc) {
+ put_le32(pb, enc->codec_tag);
+ put_be16(pb, 0);
+ }
/* Sound data chunk */
put_tag(pb, "SSND");
@@ -417,6 +423,7 @@ AVInputFormat aiff_demuxer = {
aiff_read_packet,
aiff_read_close,
aiff_read_seek,
+ .codec_tag= (const AVCodecTag*[]){codec_aiff_tags, 0},
};
#endif
@@ -432,5 +439,6 @@ AVOutputFormat aiff_muxer = {
aiff_write_header,
aiff_write_packet,
aiff_write_trailer,
+ .codec_tag= (const AVCodecTag*[]){codec_aiff_tags, 0},
};
#endif
diff --git a/contrib/ffmpeg/libavformat/allformats.c b/contrib/ffmpeg/libavformat/allformats.c
index f4b16adff..8534a18f6 100644
--- a/contrib/ffmpeg/libavformat/allformats.c
+++ b/contrib/ffmpeg/libavformat/allformats.c
@@ -31,7 +31,7 @@
formats you want to support */
/**
- * Initialize libavcodec and register all the codecs and formats.
+ * Initialize libavformat and register all the (de)muxers and protocols.
*/
void av_register_all(void)
{
@@ -52,9 +52,7 @@ void av_register_all(void)
REGISTER_MUXDEMUX(ASF, asf);
REGISTER_MUXER (ASF_STREAM, asf_stream);
REGISTER_MUXDEMUX(AU, au);
-#if defined(CONFIG_AUDIO_OSS) || defined(CONFIG_AUDIO_BEOS)
REGISTER_MUXDEMUX(AUDIO, audio);
-#endif
REGISTER_MUXDEMUX(AVI, avi);
#ifdef CONFIG_AVISYNTH
av_register_input_format(&avisynth_demuxer);
@@ -62,15 +60,12 @@ void av_register_all(void)
REGISTER_DEMUXER (AVS, avs);
REGISTER_MUXER (CRC, crc);
REGISTER_DEMUXER (DAUD, daud);
-#ifdef CONFIG_DC1394
REGISTER_DEMUXER (DC1394, dc1394);
-#endif
REGISTER_DEMUXER (DSICIN, dsicin);
REGISTER_DEMUXER (DTS, dts);
REGISTER_MUXDEMUX(DV, dv);
-#ifdef CONFIG_DV1394
REGISTER_DEMUXER (DV1394, dv1394);
-#endif
+ REGISTER_DEMUXER (DXA, dxa);
REGISTER_DEMUXER (EA, ea);
REGISTER_MUXDEMUX(FFM, ffm);
REGISTER_MUXDEMUX(FLAC, flac);
@@ -80,9 +75,7 @@ void av_register_all(void)
REGISTER_MUXER (FRAMECRC, framecrc);
REGISTER_MUXDEMUX(GIF, gif);
REGISTER_DEMUXER (GXF, gxf);
-#ifdef CONFIG_GPL
REGISTER_MUXER (GXF, gxf);
-#endif
REGISTER_MUXDEMUX(H261, h261);
REGISTER_MUXDEMUX(H263, h263);
REGISTER_MUXDEMUX(H264, h264);
@@ -91,6 +84,8 @@ void av_register_all(void)
REGISTER_MUXDEMUX(IMAGE2PIPE, image2pipe);
REGISTER_DEMUXER (INGENIENT, ingenient);
REGISTER_DEMUXER (IPMOVIE, ipmovie);
+ if (!ENABLE_NUT_DEMUXER) REGISTER_DEMUXER (LIBNUT, libnut);
+ REGISTER_MUXER (LIBNUT, libnut);
REGISTER_MUXDEMUX(M4V, m4v);
REGISTER_DEMUXER (MATROSKA, matroska);
REGISTER_MUXDEMUX(MJPEG, mjpeg);
@@ -100,6 +95,7 @@ void av_register_all(void)
REGISTER_MUXER (MP2, mp2);
REGISTER_MUXDEMUX(MP3, mp3);
REGISTER_MUXER (MP4, mp4);
+ REGISTER_DEMUXER (MPC, mpc);
REGISTER_MUXER (MPEG1SYSTEM, mpeg1system);
REGISTER_MUXER (MPEG1VCD, mpeg1vcd);
REGISTER_MUXER (MPEG1VIDEO, mpeg1video);
@@ -116,14 +112,9 @@ void av_register_all(void)
REGISTER_DEMUXER (NSV, nsv);
REGISTER_MUXER (NULL, null);
REGISTER_DEMUXER (NUT, nut);
-#ifdef CONFIG_LIBNUT
- REGISTER_MUXER (NUT, nut);
-#endif
REGISTER_DEMUXER (NUV, nuv);
REGISTER_DEMUXER (OGG, ogg);
-#ifdef CONFIG_LIBOGG
REGISTER_MUXER (OGG, ogg);
-#endif
REGISTER_MUXDEMUX(PCM_ALAW, pcm_alaw);
REGISTER_MUXDEMUX(PCM_MULAW, pcm_mulaw);
REGISTER_MUXDEMUX(PCM_S16BE, pcm_s16be);
@@ -136,11 +127,11 @@ void av_register_all(void)
REGISTER_MUXDEMUX(RAWVIDEO, rawvideo);
REGISTER_MUXDEMUX(RM, rm);
REGISTER_DEMUXER (ROQ, roq);
-#ifdef CONFIG_NETWORK
REGISTER_DEMUXER (REDIR, redir);
REGISTER_MUXER (RTP, rtp);
REGISTER_DEMUXER (RTSP, rtsp);
REGISTER_DEMUXER (SDP, sdp);
+#ifdef CONFIG_NETWORK
av_register_rtp_dynamic_payload_handlers();
#endif
REGISTER_DEMUXER (SEGAFILM, segafilm);
@@ -151,14 +142,12 @@ void av_register_all(void)
REGISTER_MUXDEMUX(SWF, swf);
REGISTER_MUXER (TG2, tg2);
REGISTER_MUXER (TGP, tgp);
+ REGISTER_DEMUXER (THP, thp);
REGISTER_DEMUXER (TIERTEXSEQ, tiertexseq);
REGISTER_DEMUXER (TTA, tta);
-#ifdef CONFIG_VIDEO4LINUX2
REGISTER_DEMUXER (V4L2, v4l2);
-#endif
-#if defined(CONFIG_VIDEO4LINUX) || defined(CONFIG_BKTR)
+ REGISTER_DEMUXER (VC1, vc1);
REGISTER_DEMUXER (VIDEO_GRAB_DEVICE, video_grab_device);
-#endif
REGISTER_DEMUXER (VMD, vmd);
REGISTER_MUXDEMUX(VOC, voc);
REGISTER_MUXDEMUX(WAV, wav);
@@ -166,6 +155,7 @@ void av_register_all(void)
REGISTER_DEMUXER (WSAUD, wsaud);
REGISTER_DEMUXER (WSVQA, wsvqa);
REGISTER_DEMUXER (WV, wv);
+ REGISTER_DEMUXER (X11_GRAB_DEVICE, x11_grab_device);
REGISTER_MUXDEMUX(YUV4MPEGPIPE, yuv4mpegpipe);
#ifdef CONFIG_PROTOCOLS
diff --git a/contrib/ffmpeg/libavformat/allformats.h b/contrib/ffmpeg/libavformat/allformats.h
index a138841c9..9734940d3 100644
--- a/contrib/ffmpeg/libavformat/allformats.h
+++ b/contrib/ffmpeg/libavformat/allformats.h
@@ -47,6 +47,7 @@ extern AVInputFormat dsicin_demuxer;
extern AVInputFormat dv1394_demuxer;
extern AVInputFormat dv_demuxer;
extern AVOutputFormat dv_muxer;
+extern AVInputFormat dxa_demuxer;
extern AVInputFormat ea_demuxer;
extern AVInputFormat ffm_demuxer;
extern AVOutputFormat ffm_muxer;
@@ -69,6 +70,8 @@ extern AVInputFormat imagepipe_demuxer;
extern AVOutputFormat image_muxer;
extern AVOutputFormat imagepipe_muxer;
extern AVInputFormat ipmovie_demuxer;
+extern AVInputFormat libnut_demuxer;
+extern AVOutputFormat libnut_muxer;
extern AVInputFormat matroska_demuxer;
extern AVInputFormat mm_demuxer;
extern AVInputFormat mmf_demuxer;
@@ -82,6 +85,7 @@ extern AVOutputFormat tg2_muxer;
extern AVInputFormat mp3_demuxer;
extern AVOutputFormat mp2_muxer;
extern AVOutputFormat mp3_muxer;
+extern AVInputFormat mpc_demuxer;
extern AVOutputFormat mpeg1system_muxer;
extern AVOutputFormat mpeg1vcd_muxer;
extern AVOutputFormat mpeg2vob_muxer;
@@ -95,7 +99,6 @@ extern AVInputFormat mtv_demuxer;
extern AVInputFormat mxf_demuxer;
extern AVInputFormat nsv_demuxer;
extern AVInputFormat nut_demuxer;
-extern AVOutputFormat nut_muxer;
extern AVInputFormat nuv_demuxer;
extern AVInputFormat ogg_demuxer;
extern AVOutputFormat ogg_muxer;
@@ -152,6 +155,7 @@ extern AVInputFormat swf_demuxer;
extern AVOutputFormat swf_muxer;
extern AVInputFormat tta_demuxer;
extern AVInputFormat v4l2_demuxer;
+extern AVInputFormat vc1_demuxer;
extern AVInputFormat voc_demuxer;
extern AVOutputFormat voc_muxer;
extern AVInputFormat wav_demuxer;
@@ -163,6 +167,8 @@ extern AVInputFormat wv_demuxer;
extern AVOutputFormat yuv4mpegpipe_muxer;
extern AVInputFormat yuv4mpegpipe_demuxer;
extern AVInputFormat tiertexseq_demuxer;
+extern AVInputFormat x11_grab_device_demuxer;
+extern AVInputFormat thp_demuxer;
/* raw.c */
int pcm_read_seek(AVFormatContext *s,
diff --git a/contrib/ffmpeg/libavformat/asf-enc.c b/contrib/ffmpeg/libavformat/asf-enc.c
index 3ef67507f..f1d9c6903 100644
--- a/contrib/ffmpeg/libavformat/asf-enc.c
+++ b/contrib/ffmpeg/libavformat/asf-enc.c
@@ -187,44 +187,36 @@
2*PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS \
)
-static int preroll_time = 2000;
+static const AVCodecTag codec_asf_bmp_tags[] = {
+ { CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
+ { CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
+ { CODEC_ID_NONE, 0 },
+};
-static const uint8_t error_spread_ADPCM_G726[] = { 0x01, 0x90, 0x01, 0x90, 0x01, 0x01, 0x00, 0x00 };
+#define PREROLL_TIME 3100
static void put_guid(ByteIOContext *s, const GUID *g)
{
- int i;
-
- put_le32(s, g->v1);
- put_le16(s, g->v2);
- put_le16(s, g->v3);
- for(i=0;i<8;i++)
- put_byte(s, g->v4[i]);
+ assert(sizeof(*g) == 16);
+ put_buffer(s, g, sizeof(*g));
}
+static void put_str16_nolen(ByteIOContext *s, const char *tag);
static void put_str16(ByteIOContext *s, const char *tag)
{
- int c;
-
put_le16(s,strlen(tag) + 1);
- for(;;) {
- c = (uint8_t)*tag++;
- put_le16(s, c);
- if (c == '\0')
- break;
- }
+ put_str16_nolen(s, tag);
}
static void put_str16_nolen(ByteIOContext *s, const char *tag)
{
int c;
- for(;;) {
+ do{
c = (uint8_t)*tag++;
put_le16(s, c);
- if (c == '\0')
- break;
- }
+ }while(c);
}
static int64_t put_header(ByteIOContext *pb, const GUID *g)
@@ -269,8 +261,8 @@ static int64_t unix_to_file_time(int ti)
{
int64_t t;
- t = ti * int64_t_C(10000000);
- t += int64_t_C(116444736000000000);
+ t = ti * INT64_C(10000000);
+ t += INT64_C(116444736000000000);
return t;
}
@@ -286,7 +278,7 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
int bit_rate;
int64_t duration;
- duration = asf->duration + preroll_time * 10000;
+ duration = asf->duration + PREROLL_TIME * 10000;
has_title = (s->title[0] || s->author[0] || s->copyright[0] || s->comment[0]);
bit_rate = 0;
@@ -317,10 +309,9 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
put_le64(pb, unix_to_file_time(file_time));
put_le64(pb, asf->nb_packets); /* number of packets */
put_le64(pb, duration); /* end time stamp (in 100ns units) */
- put_le64(pb, duration); /* duration (in 100ns units) */
- put_le32(pb, preroll_time); /* start time stamp */
- put_le32(pb, 0); /* ??? */
- put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */
+ put_le64(pb, asf->duration); /* duration (in 100ns units) */
+ put_le64(pb, PREROLL_TIME); /* start time stamp */
+ put_le32(pb, (asf->is_streamed || url_is_streamed(pb)) ? 3 : 2); /* ??? */
put_le32(pb, asf->packet_size); /* packet size */
put_le32(pb, asf->packet_size); /* packet size */
put_le32(pb, bit_rate); /* Nominal data rate in bps */
@@ -351,8 +342,6 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
/* stream headers */
for(n=0;n<s->nb_streams;n++) {
int64_t es_pos;
- const uint8_t *er_spr = NULL;
- int er_spr_len = 0;
// ASFStream *stream = &asf->streams[n];
enc = s->streams[n]->codec;
@@ -360,18 +349,11 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
asf->streams[n].seq = 0;
- if (enc->codec_type == CODEC_TYPE_AUDIO) {
- if (enc->codec_id == CODEC_ID_ADPCM_G726) {
- er_spr = error_spread_ADPCM_G726;
- er_spr_len = sizeof(error_spread_ADPCM_G726);
- }
- }
-
switch(enc->codec_type) {
case CODEC_TYPE_AUDIO:
wav_extra_size = 0;
extra_size = 18 + wav_extra_size;
- extra_size2 = er_spr_len;
+ extra_size2 = 8;
break;
default:
case CODEC_TYPE_VIDEO:
@@ -384,11 +366,7 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
hpos = put_header(pb, &stream_header);
if (enc->codec_type == CODEC_TYPE_AUDIO) {
put_guid(pb, &audio_stream);
- if ((er_spr != NULL) && (er_spr_len != 0)) {
- put_guid(pb, &audio_conceal_spread);
- } else {
- put_guid(pb, &video_conceal_none);
- }
+ put_guid(pb, &audio_conceal_spread);
} else {
put_guid(pb, &video_stream);
put_guid(pb, &video_conceal_none);
@@ -417,8 +395,16 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
url_fseek(pb, cur_pos, SEEK_SET);
}
/* ERROR Correction */
- if ((er_spr != NULL) && (er_spr_len != 0))
- put_buffer(pb, er_spr, er_spr_len);
+ put_byte(pb, 0x01);
+ if(enc->codec_id == CODEC_ID_ADPCM_G726 || !enc->block_align){
+ put_le16(pb, 0x0190);
+ put_le16(pb, 0x0190);
+ }else{
+ put_le16(pb, enc->block_align);
+ put_le16(pb, enc->block_align);
+ }
+ put_le16(pb, 0x01);
+ put_byte(pb, 0x00);
} else {
put_le32(pb, enc->width);
put_le32(pb, enc->height);
@@ -442,27 +428,30 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
enc = s->streams[n]->codec;
p = avcodec_find_encoder(enc->codec_id);
- put_le16(pb, asf->streams[n].num);
- put_str16(pb, p ? p->name : enc->codec_name);
+ if(enc->codec_type == CODEC_TYPE_AUDIO)
+ put_le16(pb, 2);
+ else if(enc->codec_type == CODEC_TYPE_VIDEO)
+ put_le16(pb, 1);
+ else
+ put_le16(pb, -1);
+
+ if(enc->codec_id == CODEC_ID_WMAV2)
+ put_str16(pb, "Windows Media Audio V8");
+ else
+ put_str16(pb, p ? p->name : enc->codec_name);
put_le16(pb, 0); /* no parameters */
/* id */
if (enc->codec_type == CODEC_TYPE_AUDIO) {
put_le16(pb, 2);
- if(!enc->codec_tag)
- enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
- if(!enc->codec_tag)
- return -1;
put_le16(pb, enc->codec_tag);
} else {
put_le16(pb, 4);
- if(!enc->codec_tag)
- enc->codec_tag = codec_get_tag(codec_bmp_tags, enc->codec_id);
- if(!enc->codec_tag)
- return -1;
put_le32(pb, enc->codec_tag);
}
+ if(!enc->codec_tag)
+ return -1;
}
end_header(pb, hpos);
@@ -509,7 +498,7 @@ static int asf_write_header(AVFormatContext *s)
asf->nb_index_count = 0;
asf->maximum_packet = 0;
- if (asf_write_header1(s, 0, 50) < 0) {
+ if (asf_write_header1(s, 0, 0) < 0) {
//av_free(asf);
return -1;
}
@@ -517,7 +506,6 @@ static int asf_write_header(AVFormatContext *s)
put_flush_packet(&s->pb);
asf->packet_nb_payloads = 0;
- asf->prev_packet_sent_time = 0;
asf->packet_timestamp_start = -1;
asf->packet_timestamp_end = -1;
init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
@@ -546,10 +534,15 @@ static int put_payload_parsing_info(
ASFContext *asf = s->priv_data;
ByteIOContext *pb = &s->pb;
int ppi_size, i;
- unsigned char *start_ppi_ptr = pb->buf_ptr;
+ int64_t start= url_ftell(pb);
int iLengthTypeFlags = ASF_PPI_LENGTH_TYPE_FLAGS;
+ padsize -= PACKET_HEADER_MIN_SIZE;
+ if(asf->multi_payloads_present)
+ padsize--;
+ assert(padsize>=0);
+
put_byte(pb, ASF_PACKET_ERROR_CORRECTION_FLAGS);
for (i = 0; i < ASF_PACKET_ERROR_CORRECTION_DATA_SIZE; i++){
put_byte(pb, 0x0);
@@ -578,7 +571,7 @@ static int put_payload_parsing_info(
if (asf->multi_payloads_present)
put_byte(pb, nb_payloads | ASF_PAYLOAD_FLAGS);
- ppi_size = pb->buf_ptr - start_ppi_ptr;
+ ppi_size = url_ftell(pb) - start;
return ppi_size;
}
@@ -600,7 +593,8 @@ static void flush_packet(AVFormatContext *s)
asf->packet_size_left
);
- packet_filled_size = PACKET_SIZE - packet_hdr_size - asf->packet_size_left;
+ packet_filled_size = PACKET_SIZE - asf->packet_size_left;
+ assert(packet_hdr_size <= asf->packet_size_left);
memset(asf->packet_buf + packet_filled_size, 0, asf->packet_size_left);
put_buffer(&s->pb, asf->packet_buf, asf->packet_size - packet_hdr_size);
@@ -608,7 +602,6 @@ static void flush_packet(AVFormatContext *s)
put_flush_packet(&s->pb);
asf->nb_packets++;
asf->packet_nb_payloads = 0;
- asf->prev_packet_sent_time = asf->packet_timestamp_start;
asf->packet_timestamp_start = -1;
asf->packet_timestamp_end = -1;
init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
@@ -655,6 +648,7 @@ static void put_payload_header(
static void put_frame(
AVFormatContext *s,
ASFStream *stream,
+ AVStream *avst,
int timestamp,
const uint8_t *buf,
int m_obj_size,
@@ -670,28 +664,25 @@ static void put_frame(
if (asf->packet_timestamp_start == -1) {
asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);
+ asf->packet_size_left = PACKET_SIZE;
if (asf->multi_payloads_present){
- asf->packet_size_left = PACKET_SIZE; //For debug
- asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE - 1;
frag_len1 = MULTI_PAYLOAD_CONSTANT - 1;
}
else {
- asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE;
frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH;
}
- if (asf->prev_packet_sent_time > timestamp)
- asf->packet_timestamp_start = asf->prev_packet_sent_time;
- else
- asf->packet_timestamp_start = timestamp;
+ asf->packet_timestamp_start = timestamp;
}
else {
// multi payloads
- frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS;
+ frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS - PACKET_HEADER_MIN_SIZE - 1;
- if (asf->prev_packet_sent_time > timestamp)
- asf->packet_timestamp_start = asf->prev_packet_sent_time;
- else if (asf->packet_timestamp_start >= timestamp)
- asf->packet_timestamp_start = timestamp;
+ asf->packet_timestamp_start = timestamp;
+
+ if(frag_len1 < payload_len && avst->codec->codec_type == CODEC_TYPE_AUDIO){
+ flush_packet(s);
+ continue;
+ }
}
if (frag_len1 > 0) {
if (payload_len > frag_len1)
@@ -699,7 +690,7 @@ static void put_frame(
else if (payload_len == (frag_len1 - 1))
payload_len = frag_len1 - 2; //additional byte need to put padding length
- put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len, flags);
+ put_payload_header(s, stream, timestamp+PREROLL_TIME, m_obj_size, m_obj_offset, payload_len, flags);
put_buffer(&asf->pb, buf, payload_len);
if (asf->multi_payloads_present)
@@ -717,7 +708,7 @@ static void put_frame(
if (!asf->multi_payloads_present)
flush_packet(s);
- else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + 1))
+ else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + PACKET_HEADER_MIN_SIZE + 1))
flush_packet(s);
}
stream->seq++;
@@ -731,32 +722,27 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
AVCodecContext *codec;
int64_t packet_st,pts;
int start_sec,i;
+ int flags= pkt->flags;
codec = s->streams[pkt->stream_index]->codec;
stream = &asf->streams[pkt->stream_index];
+ if(codec->codec_type == CODEC_TYPE_AUDIO)
+ flags &= ~PKT_FLAG_KEY;
+
//XXX /FIXME use duration from AVPacket (quick hack by)
pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts;
- if (pts == AV_NOPTS_VALUE) {
- if (codec->codec_type == CODEC_TYPE_AUDIO) {
- duration = (codec->frame_number * (int64_t)codec->frame_size * int64_t_C(10000000)) /
- codec->sample_rate;
- } else {
- duration = av_rescale(codec->frame_number * (int64_t)codec->time_base.num, 10000000, codec->time_base.den);
- }
- } else {
- duration = pts * 10000;
- }
- if (duration > asf->duration)
- asf->duration = duration;
+ assert(pts != AV_NOPTS_VALUE);
+ duration = pts * 10000;
+ asf->duration= FFMAX(asf->duration, duration);
packet_st = asf->nb_packets;
- put_frame(s, stream, pkt->pts, pkt->data, pkt->size, pkt->flags);
+ put_frame(s, stream, s->streams[pkt->stream_index], pkt->dts, pkt->data, pkt->size, flags);
/* check index */
- if ((!asf->is_streamed) && (codec->codec_type == CODEC_TYPE_VIDEO) && (pkt->flags & PKT_FLAG_KEY)) {
- start_sec = (int)(duration / int64_t_C(10000000));
- if (start_sec != (int)(asf->last_indexed_pts / int64_t_C(10000000))) {
+ if ((!asf->is_streamed) && (flags & PKT_FLAG_KEY)) {
+ start_sec = (int)(duration / INT64_C(10000000));
+ if (start_sec != (int)(asf->last_indexed_pts / INT64_C(10000000))) {
for(i=asf->nb_index_count;i<start_sec;i++) {
if (i>=asf->nb_index_memory_alloc) {
asf->nb_index_memory_alloc += ASF_INDEX_BLOCK;
@@ -765,8 +751,7 @@ static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
// store
asf->index_ptr[i].packet_number = (uint32_t)packet_st;
asf->index_ptr[i].packet_count = (uint16_t)(asf->nb_packets-packet_st);
- if (asf->maximum_packet < (uint16_t)(asf->nb_packets-packet_st))
- asf->maximum_packet = (uint16_t)(asf->nb_packets-packet_st);
+ asf->maximum_packet = FFMAX(asf->maximum_packet, (uint16_t)(asf->nb_packets-packet_st));
}
asf->nb_index_count = start_sec;
asf->last_indexed_pts = duration;
@@ -811,7 +796,7 @@ static int asf_write_trailer(AVFormatContext *s)
}
put_flush_packet(&s->pb);
- if (asf->is_streamed) {
+ if (asf->is_streamed || url_is_streamed(&s->pb)) {
put_chunk(s, 0x4524, 0, 0); /* end of stream */
} else {
/* rewrite an updated header */
@@ -832,7 +817,7 @@ AVOutputFormat asf_muxer = {
"video/x-ms-asf",
"asf,wmv,wma",
sizeof(ASFContext),
-#ifdef CONFIG_MP3LAME
+#ifdef CONFIG_LIBMP3LAME
CODEC_ID_MP3,
#else
CODEC_ID_MP2,
@@ -842,6 +827,7 @@ AVOutputFormat asf_muxer = {
asf_write_packet,
asf_write_trailer,
.flags = AVFMT_GLOBALHEADER,
+ .codec_tag= (const AVCodecTag*[]){codec_asf_bmp_tags, codec_bmp_tags, codec_wav_tags, 0},
};
#endif
@@ -852,7 +838,7 @@ AVOutputFormat asf_stream_muxer = {
"video/x-ms-asf",
"asf,wmv,wma",
sizeof(ASFContext),
-#ifdef CONFIG_MP3LAME
+#ifdef CONFIG_LIBMP3LAME
CODEC_ID_MP3,
#else
CODEC_ID_MP2,
@@ -862,5 +848,6 @@ AVOutputFormat asf_stream_muxer = {
asf_write_packet,
asf_write_trailer,
.flags = AVFMT_GLOBALHEADER,
+ .codec_tag= (const AVCodecTag*[]){codec_asf_bmp_tags, codec_bmp_tags, codec_wav_tags, 0},
};
#endif //CONFIG_ASF_STREAM_MUXER
diff --git a/contrib/ffmpeg/libavformat/asf.c b/contrib/ffmpeg/libavformat/asf.c
index f63e4b695..498f6e79e 100644
--- a/contrib/ffmpeg/libavformat/asf.c
+++ b/contrib/ffmpeg/libavformat/asf.c
@@ -31,9 +31,12 @@
// Fix Me! FRAME_HEADER_SIZE may be different.
static const GUID index_guid = {
- 0x33000890, 0xe5b1, 0x11cf, { 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb },
+ 0x90, 0x08, 0x00, 0x33, 0xb1, 0xe5, 0xcf, 0x11, 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb
};
+static const GUID stream_bitrate_guid = { /* (http://get.to/sdp) */
+ 0xce, 0x75, 0xf8, 0x7b, 0x8d, 0x46, 0xd1, 0x11, 0x8d, 0x82, 0x00, 0x60, 0x97, 0xc9, 0xa2, 0xb2
+};
/**********************************/
/* decoding */
@@ -67,11 +70,12 @@ static void print_guid(const GUID *g)
else PRINT_IF_GUID(g, extended_content_header);
else PRINT_IF_GUID(g, ext_stream_embed_stream_header);
else PRINT_IF_GUID(g, ext_stream_audio_stream);
+ else PRINT_IF_GUID(g, metadata_header);
+ else PRINT_IF_GUID(g, stream_bitrate_guid);
else
printf("(GUID: unknown) ");
- printf("0x%08x, 0x%04x, 0x%04x, {", g->v1, g->v2, g->v3);
- for(i=0;i<8;i++)
- printf(" 0x%02x,", g->v4[i]);
+ for(i=0;i<16;i++)
+ printf(" 0x%02x,", (*g)[i]);
printf("}\n");
}
#undef PRINT_IF_GUID
@@ -79,13 +83,8 @@ static void print_guid(const GUID *g)
static void get_guid(ByteIOContext *s, GUID *g)
{
- int i;
-
- g->v1 = get_le32(s);
- g->v2 = get_le16(s);
- g->v3 = get_le16(s);
- for(i=0;i<8;i++)
- g->v4[i] = get_byte(s);
+ assert(sizeof(*g) == 16);
+ get_buffer(s, g, sizeof(*g));
}
#if 0
@@ -119,29 +118,26 @@ static void get_str16_nolen(ByteIOContext *pb, int len, char *buf, int buf_size)
static int asf_probe(AVProbeData *pd)
{
- GUID g;
- const unsigned char *p;
- int i;
-
/* check file header */
if (pd->buf_size <= 32)
return 0;
- p = pd->buf;
- g.v1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
- p += 4;
- g.v2 = p[0] | (p[1] << 8);
- p += 2;
- g.v3 = p[0] | (p[1] << 8);
- p += 2;
- for(i=0;i<8;i++)
- g.v4[i] = *p++;
-
- if (!memcmp(&g, &asf_header, sizeof(GUID)))
+
+ if (!memcmp(pd->buf, &asf_header, sizeof(GUID)))
return AVPROBE_SCORE_MAX;
else
return 0;
}
+static int get_value(ByteIOContext *pb, int type){
+ switch(type){
+ case 2: return get_le32(pb);
+ case 3: return get_le32(pb);
+ case 4: return get_le64(pb);
+ case 5: return get_le16(pb);
+ default:return INT_MIN;
+ }
+}
+
static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
ASFContext *asf = s->priv_data;
@@ -151,6 +147,9 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
ASFStream *asf_st;
int size, i;
int64_t gsize;
+ AVRational dar[128];
+
+ memset(dar, 0, sizeof(dar));
get_guid(pb, &g);
if (memcmp(&g, &asf_header, sizeof(GUID)))
@@ -168,13 +167,23 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
print_guid(&g);
printf(" size=0x%"PRIx64"\n", gsize);
#endif
+ if (!memcmp(&g, &data_header, sizeof(GUID))) {
+ asf->data_object_offset = url_ftell(pb);
+ // if not streaming, gsize is not unlimited (how?), and there is enough space in the file..
+ if (!(asf->hdr.flags & 0x01) && gsize >= 100) {
+ asf->data_object_size = gsize - 24;
+ } else {
+ asf->data_object_size = (uint64_t)-1;
+ }
+ break;
+ }
if (gsize < 24)
goto fail;
if (!memcmp(&g, &file_header, sizeof(GUID))) {
get_guid(pb, &asf->hdr.guid);
asf->hdr.file_size = get_le64(pb);
asf->hdr.create_time = get_le64(pb);
- asf->hdr.packets_count = get_le64(pb);
+ asf->nb_packets = get_le64(pb);
asf->hdr.send_time = get_le64(pb);
asf->hdr.play_time = get_le64(pb);
asf->hdr.preroll = get_le32(pb);
@@ -184,7 +193,6 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
asf->hdr.max_pktsize = get_le32(pb);
asf->hdr.max_bitrate = get_le32(pb);
asf->packet_size = asf->hdr.max_pktsize;
- asf->nb_packets = asf->hdr.packets_count;
} else if (!memcmp(&g, &stream_header, sizeof(GUID))) {
int type, type_specific_size, sizeX;
uint64_t total_size;
@@ -203,8 +211,10 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
goto fail;
st->priv_data = asf_st;
st->start_time = asf->hdr.preroll;
- st->duration = asf->hdr.send_time /
- (10000000 / 1000) - st->start_time;
+ if(!(asf->hdr.flags & 0x01)) { // if we aren't streaming...
+ st->duration = asf->hdr.send_time /
+ (10000000 / 1000) - st->start_time;
+ }
get_guid(pb, &g);
test_for_ext_stream_audio = 0;
@@ -249,19 +259,20 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->need_parsing = 1;
/* We have to init the frame size at some point .... */
pos2 = url_ftell(pb);
- if (gsize > (pos2 + 8 - pos1 + 24)) {
+ if (gsize >= (pos2 + 8 - pos1 + 24)) {
asf_st->ds_span = get_byte(pb);
asf_st->ds_packet_size = get_le16(pb);
asf_st->ds_chunk_size = get_le16(pb);
- asf_st->ds_data_size = get_le16(pb);
- asf_st->ds_silence_data = get_byte(pb);
+ get_le16(pb); //ds_data_size
+ get_byte(pb); //ds_silence_data
}
//printf("Descrambling: ps:%d cs:%d ds:%d s:%d sd:%d\n",
// asf_st->ds_packet_size, asf_st->ds_chunk_size,
// asf_st->ds_data_size, asf_st->ds_span, asf_st->ds_silence_data);
if (asf_st->ds_span > 1) {
if (!asf_st->ds_chunk_size
- || (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1))
+ || (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1)
+ || asf_st->ds_packet_size % asf_st->ds_chunk_size)
asf_st->ds_span = 0; // disable descrambling
}
switch (st->codec->codec_id) {
@@ -326,14 +337,6 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
}
pos2 = url_ftell(pb);
url_fskip(pb, gsize - (pos2 - pos1 + 24));
- } else if (!memcmp(&g, &data_header, sizeof(GUID))) {
- asf->data_object_offset = url_ftell(pb);
- if (gsize != (uint64_t)-1 && gsize >= 24) {
- asf->data_object_size = gsize - 24;
- } else {
- asf->data_object_size = (uint64_t)-1;
- }
- break;
} else if (!memcmp(&g, &comment_header, sizeof(GUID))) {
int len1, len2, len3, len4, len5;
@@ -342,11 +345,26 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
len3 = get_le16(pb);
len4 = get_le16(pb);
len5 = get_le16(pb);
- get_str16_nolen(pb, len1, s->title, sizeof(s->title));
- get_str16_nolen(pb, len2, s->author, sizeof(s->author));
+ get_str16_nolen(pb, len1, s->title , sizeof(s->title));
+ get_str16_nolen(pb, len2, s->author , sizeof(s->author));
get_str16_nolen(pb, len3, s->copyright, sizeof(s->copyright));
- get_str16_nolen(pb, len4, s->comment, sizeof(s->comment));
+ get_str16_nolen(pb, len4, s->comment , sizeof(s->comment));
url_fskip(pb, len5);
+ } else if (!memcmp(&g, &stream_bitrate_guid, sizeof(GUID))) {
+ int stream_count = get_le16(pb);
+ int j;
+
+// av_log(NULL, AV_LOG_ERROR, "stream bitrate properties\n");
+// av_log(NULL, AV_LOG_ERROR, "streams %d\n", streams);
+ for(j = 0; j < stream_count; j++) {
+ int flags, bitrate, stream_id;
+
+ flags= get_le16(pb);
+ bitrate= get_le32(pb);
+ stream_id= (flags & 0x7f);
+// av_log(NULL, AV_LOG_ERROR, "flags: 0x%x stream id %d, bitrate %d\n", flags, stream_id, bitrate);
+ asf->stream_bitrates[stream_id-1]= bitrate;
+ }
} else if (!memcmp(&g, &extended_content_header, sizeof(GUID))) {
int desc_count, i;
@@ -355,53 +373,69 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
int name_len,value_type,value_len;
uint64_t value_num = 0;
- char *name, *value;
+ char name[1024];
name_len = get_le16(pb);
- name = (char *)av_malloc(name_len * 2);
- get_str16_nolen(pb, name_len, name, name_len * 2);
+ get_str16_nolen(pb, name_len, name, sizeof(name));
value_type = get_le16(pb);
value_len = get_le16(pb);
if ((value_type == 0) || (value_type == 1)) // unicode or byte
{
- value = (char *)av_malloc(value_len * 2);
- get_str16_nolen(pb, value_len, value,
- value_len * 2);
- if (strcmp(name,"WM/AlbumTitle")==0) { pstrcpy(s->album, sizeof(s->album), value); }
- av_free(value);
+ if (!strcmp(name,"WM/AlbumTitle")) get_str16_nolen(pb, value_len, s->album, sizeof(s->album));
+ else if(!strcmp(name,"WM/Genre" )) get_str16_nolen(pb, value_len, s->genre, sizeof(s->genre));
+ else url_fskip(pb, value_len);
}
if ((value_type >= 2) && (value_type <= 5)) // boolean or DWORD or QWORD or WORD
{
- if (value_type==2) value_num = get_le32(pb);
- if (value_type==3) value_num = get_le32(pb);
- if (value_type==4) value_num = get_le64(pb);
- if (value_type==5) value_num = get_le16(pb);
- if (strcmp(name,"WM/Track")==0) s->track = value_num + 1;
- if (strcmp(name,"WM/TrackNumber")==0) s->track = value_num;
+ value_num= get_value(pb, value_type);
+ if (!strcmp(name,"WM/Track" )) s->track = value_num + 1;
+ if (!strcmp(name,"WM/TrackNumber")) s->track = value_num;
}
- av_free(name);
}
+ } else if (!memcmp(&g, &metadata_header, sizeof(GUID))) {
+ int n, stream_num, name_len, value_len, value_type, value_num;
+ n = get_le16(pb);
+
+ for(i=0;i<n;i++) {
+ char name[1024];
+
+ get_le16(pb); //lang_list_index
+ stream_num= get_le16(pb);
+ name_len= get_le16(pb);
+ value_type= get_le16(pb);
+ value_len= get_le32(pb);
+
+ get_str16_nolen(pb, name_len, name, sizeof(name));
+//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d <%s>\n", i, stream_num, name_len, value_type, value_len, name);
+ value_num= get_le16(pb);//we should use get_value() here but it doesnt work 2 is le16 here but le32 elsewhere
+ url_fskip(pb, value_len - 2);
+
+ if(stream_num<128){
+ if (!strcmp(name, "AspectRatioX")) dar[stream_num].num= value_num;
+ else if(!strcmp(name, "AspectRatioY")) dar[stream_num].den= value_num;
+ }
+ }
} else if (!memcmp(&g, &ext_stream_header, sizeof(GUID))) {
int ext_len, payload_ext_ct, stream_ct;
uint32_t ext_d;
int64_t pos_ex_st;
pos_ex_st = url_ftell(pb);
- get_le64(pb);
- get_le64(pb);
- get_le32(pb);
- get_le32(pb);
- get_le32(pb);
- get_le32(pb);
- get_le32(pb);
- get_le32(pb);
- get_le32(pb);
- get_le32(pb);
- get_le16(pb);
- get_le16(pb);
- get_le64(pb);
- stream_ct = get_le16(pb);
- payload_ext_ct = get_le16(pb);
+ get_le64(pb); // starttime
+ get_le64(pb); // endtime
+ get_le32(pb); // leak-datarate
+ get_le32(pb); // bucket-datasize
+ get_le32(pb); // init-bucket-fullness
+ get_le32(pb); // alt-leak-datarate
+ get_le32(pb); // alt-bucket-datasize
+ get_le32(pb); // alt-init-bucket-fullness
+ get_le32(pb); // max-object-size
+ get_le32(pb); // flags (reliable,seekable,no_cleanpoints?,resend-live-cleanpoints, rest of bits reserved)
+ get_le16(pb); // stream-num
+ get_le16(pb); // stream-language-id-index
+ get_le64(pb); // avg frametime in 100ns units
+ stream_ct = get_le16(pb); //stream-name-count
+ payload_ext_ct = get_le16(pb); //payload-extension-system-count
for (i=0; i<stream_ct; i++){
get_le16(pb);
@@ -463,6 +497,18 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
asf->data_offset = url_ftell(pb);
asf->packet_size_left = 0;
+
+ for(i=0; i<128; i++){
+ int stream_num= asf->asfid2avid[i];
+ if(stream_num>=0 && dar[i].num>0 && dar[i].den>0){
+ AVCodecContext *codec= s->streams[stream_num]->codec;
+ av_reduce(&codec->sample_aspect_ratio.num,
+ &codec->sample_aspect_ratio.den,
+ dar[i].num, dar[i].den, INT_MAX);
+//av_log(NULL, AV_LOG_ERROR, "dar %d:%d sar=%d:%d\n", dar[i].num, dar[i].den, codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
+ }
+ }
+
return 0;
fail:
@@ -486,36 +532,47 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
default: var = defval; break; \
}
+/**
+ *
+ * @return <0 in case of an error
+ */
static int asf_get_packet(AVFormatContext *s)
{
ASFContext *asf = s->priv_data;
ByteIOContext *pb = &s->pb;
uint32_t packet_length, padsize;
- int rsize = 9;
- int c;
+ int rsize = 8;
+ int c, d, e, off;
+
+ off= (url_ftell(&s->pb) - s->data_offset) % asf->packet_size + 3;
- assert((url_ftell(&s->pb) - s->data_offset) % asf->packet_size == 0);
+ c=d=e=-1;
+ while(off-- > 0){
+ c=d; d=e;
+ e= get_byte(pb);
+ if(c == 0x82 && !d && !e)
+ break;
+ }
- c = get_byte(pb);
if (c != 0x82) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
}
- if ((c & 0x0f) == 2) { // always true for now
- if (get_le16(pb) != 0) {
+ if ((c & 0x8f) == 0x82) {
+ if (d || e) {
if (!url_feof(pb))
av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n");
- return AVERROR_IO;
+ return -1;
}
- rsize+=2;
-/* }else{
- if (!url_feof(pb))
- printf("ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
- return AVERROR_IO;*/
+ c= get_byte(pb);
+ d= get_byte(pb);
+ rsize+=3;
+ }else{
+ url_fseek(pb, -1, SEEK_CUR); //FIXME
}
- asf->packet_flags = get_byte(pb);
- asf->packet_property = get_byte(pb);
+ asf->packet_flags = c;
+ asf->packet_property = d;
DO_2BITS(asf->packet_flags >> 5, packet_length, asf->packet_size);
DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored
@@ -524,11 +581,11 @@ static int asf_get_packet(AVFormatContext *s)
//the following checks prevent overflows and infinite loops
if(packet_length >= (1U<<29)){
av_log(s, AV_LOG_ERROR, "invalid packet_length %d at:%"PRId64"\n", packet_length, url_ftell(pb));
- return 0; // FIXME this should be -1
+ return -1;
}
- if(padsize >= (1U<<29)){
+ if(padsize >= packet_length){
av_log(s, AV_LOG_ERROR, "invalid padsize %d at:%"PRId64"\n", padsize, url_ftell(pb));
- return 0; // FIXME this should be -1
+ return -1;
}
asf->packet_timestamp = get_le32(pb);
@@ -552,6 +609,81 @@ static int asf_get_packet(AVFormatContext *s)
return 0;
}
+/**
+ *
+ * @return <0 if error
+ */
+static int asf_read_frame_header(AVFormatContext *s){
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int rsize = 1;
+ int num = get_byte(pb);
+ int64_t ts0, ts1;
+
+ asf->packet_segments--;
+ asf->packet_key_frame = num >> 7;
+ asf->stream_index = asf->asfid2avid[num & 0x7f];
+ // sequence should be ignored!
+ DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
+ DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
+ DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
+//printf("key:%d stream:%d seq:%d offset:%d replic_size:%d\n", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size);
+ if (asf->packet_replic_size >= 8) {
+ asf->packet_obj_size = get_le32(pb);
+ if(asf->packet_obj_size >= (1<<24) || asf->packet_obj_size <= 0){
+ av_log(s, AV_LOG_ERROR, "packet_obj_size invalid\n");
+ return -1;
+ }
+ asf->packet_frag_timestamp = get_le32(pb); // timestamp
+ if(asf->packet_replic_size >= 8+38+4){
+// for(i=0; i<asf->packet_replic_size-8; i++)
+// av_log(s, AV_LOG_DEBUG, "%02X ",get_byte(pb));
+// av_log(s, AV_LOG_DEBUG, "\n");
+ url_fskip(pb, 10);
+ ts0= get_le64(pb);
+ ts1= get_le64(pb);
+ url_fskip(pb, 12);
+ get_le32(pb);
+ url_fskip(pb, asf->packet_replic_size - 8 - 38 - 4);
+ if(ts0!= -1) asf->packet_frag_timestamp= ts0/10000;
+ else asf->packet_frag_timestamp= AV_NOPTS_VALUE;
+ }else
+ url_fskip(pb, asf->packet_replic_size - 8);
+ rsize += asf->packet_replic_size; // FIXME - check validity
+ } else if (asf->packet_replic_size==1){
+ // multipacket - frag_offset is begining timestamp
+ asf->packet_time_start = asf->packet_frag_offset;
+ asf->packet_frag_offset = 0;
+ asf->packet_frag_timestamp = asf->packet_timestamp;
+
+ asf->packet_time_delta = get_byte(pb);
+ rsize++;
+ }else if(asf->packet_replic_size!=0){
+ av_log(s, AV_LOG_ERROR, "unexpected packet_replic_size of %d\n", asf->packet_replic_size);
+ return -1;
+ }
+ if (asf->packet_flags & 0x01) {
+ DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
+ if(asf->packet_frag_size > asf->packet_size_left - rsize){
+ av_log(s, AV_LOG_ERROR, "packet_frag_size is invalid\n");
+ return -1;
+ }
+ //printf("Fragsize %d\n", asf->packet_frag_size);
+ } else {
+ asf->packet_frag_size = asf->packet_size_left - rsize;
+ //printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
+ }
+ if (asf->packet_replic_size == 1) {
+ asf->packet_multi_size = asf->packet_frag_size;
+ if (asf->packet_multi_size > asf->packet_size_left)
+ return -1;
+ }
+ asf->packet_size_left -= rsize;
+ //printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
+
+ return 0;
+}
+
static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
@@ -559,77 +691,33 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
ByteIOContext *pb = &s->pb;
//static int pc = 0;
for (;;) {
- int rsize = 0;
+ if(url_feof(pb))
+ return AVERROR_IO;
if (asf->packet_size_left < FRAME_HEADER_SIZE
|| asf->packet_segments < 1) {
//asf->packet_size_left <= asf->packet_padsize) {
int ret = asf->packet_size_left + asf->packet_padsize;
//printf("PacketLeftSize:%d Pad:%d Pos:%"PRId64"\n", asf->packet_size_left, asf->packet_padsize, url_ftell(pb));
- if((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size)
- ret += asf->packet_size - ((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size);
assert(ret>=0);
/* fail safe */
url_fskip(pb, ret);
+
asf->packet_pos= url_ftell(&s->pb);
if (asf->data_object_size != (uint64_t)-1 &&
(asf->packet_pos - asf->data_object_offset >= asf->data_object_size))
return AVERROR_IO; /* Do not exceed the size of the data object */
ret = asf_get_packet(s);
//printf("READ ASF PACKET %d r:%d c:%d\n", ret, asf->packet_size_left, pc++);
- if (ret < 0 || url_feof(pb))
- return AVERROR_IO;
+ if (ret < 0)
+ assert(asf->packet_size_left < FRAME_HEADER_SIZE || asf->packet_segments < 1);
asf->packet_time_start = 0;
continue;
}
if (asf->packet_time_start == 0) {
- /* read frame header */
- int num = get_byte(pb);
- asf->packet_segments--;
- rsize++;
- asf->packet_key_frame = (num & 0x80) >> 7;
- asf->stream_index = asf->asfid2avid[num & 0x7f];
- // sequence should be ignored!
- DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
- DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
- DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
-//printf("key:%d stream:%d seq:%d offset:%d replic_size:%d\n", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size);
- if (asf->packet_replic_size > 1) {
- assert(asf->packet_replic_size >= 8);
- // it should be always at least 8 bytes - FIXME validate
- asf->packet_obj_size = get_le32(pb);
- asf->packet_frag_timestamp = get_le32(pb); // timestamp
- if (asf->packet_replic_size > 8)
- url_fskip(pb, asf->packet_replic_size - 8);
- rsize += asf->packet_replic_size; // FIXME - check validity
- } else if (asf->packet_replic_size==1){
- // multipacket - frag_offset is begining timestamp
- asf->packet_time_start = asf->packet_frag_offset;
- asf->packet_frag_offset = 0;
- asf->packet_frag_timestamp = asf->packet_timestamp;
-
- asf->packet_time_delta = get_byte(pb);
- rsize++;
- }else{
- assert(asf->packet_replic_size==0);
- }
- if (asf->packet_flags & 0x01) {
- DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
-#undef DO_2BITS
- //printf("Fragsize %d\n", asf->packet_frag_size);
- } else {
- asf->packet_frag_size = asf->packet_size_left - rsize;
- //printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
- }
- if (asf->packet_replic_size == 1) {
- asf->packet_multi_size = asf->packet_frag_size;
- if (asf->packet_multi_size > asf->packet_size_left) {
- asf->packet_segments = 0;
- continue;
- }
+ if(asf_read_frame_header(s) < 0){
+ asf->packet_segments= 0;
+ continue;
}
- asf->packet_size_left -= rsize;
- //printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
-
if (asf->stream_index < 0
|| s->streams[asf->stream_index]->discard >= AVDISCARD_ALL
|| (!asf->packet_key_frame && s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY)
@@ -639,34 +727,13 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
url_fskip(pb, asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
if(asf->stream_index < 0)
- av_log(s, AV_LOG_ERROR, "ff asf skip %d %d\n", asf->packet_frag_size, num & 0x7f);
+ av_log(s, AV_LOG_ERROR, "ff asf skip %d (unknown stream)\n", asf->packet_frag_size);
continue;
}
asf->asf_st = s->streams[asf->stream_index]->priv_data;
}
asf_st = asf->asf_st;
- if ((asf->packet_frag_offset != asf_st->frag_offset
- || (asf->packet_frag_offset
- && asf->packet_seq != asf_st->seq)) // seq should be ignored
- ) {
- /* cannot continue current packet: free it */
- // FIXME better check if packet was already allocated
- av_log(s, AV_LOG_INFO, "ff asf parser skips: %d - %d o:%d - %d %d %d fl:%d\n",
- asf_st->pkt.size,
- asf->packet_obj_size,
- asf->packet_frag_offset, asf_st->frag_offset,
- asf->packet_seq, asf_st->seq, asf->packet_frag_size);
- if (asf_st->pkt.size)
- av_free_packet(&asf_st->pkt);
- asf_st->frag_offset = 0;
- if (asf->packet_frag_offset != 0) {
- url_fskip(pb, asf->packet_frag_size);
- av_log(s, AV_LOG_INFO, "ff asf parser skipping %db\n", asf->packet_frag_size);
- asf->packet_size_left -= asf->packet_frag_size;
- continue;
- }
- }
if (asf->packet_replic_size == 1) {
// frag_offset is here used as the begining timestamp
asf->packet_frag_timestamp = asf->packet_time_start;
@@ -684,7 +751,13 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
asf->packet_multi_size -= asf->packet_obj_size;
//printf("COMPRESS size %d %d %d ms:%d\n", asf->packet_obj_size, asf->packet_frag_timestamp, asf->packet_size_left, asf->packet_multi_size);
}
- if (asf_st->frag_offset == 0) {
+ if ( asf_st->pkt.size != asf->packet_obj_size
+ || asf_st->frag_offset + asf->packet_frag_size > asf_st->pkt.size) { //FIXME is this condition sufficient?
+ if(asf_st->pkt.data){
+ av_log(s, AV_LOG_INFO, "freeing incomplete packet size %d, new %d\n", asf_st->pkt.size, asf->packet_obj_size);
+ asf_st->frag_offset = 0;
+ av_free_packet(&asf_st->pkt);
+ }
/* new packet */
av_new_packet(&asf_st->pkt, asf->packet_obj_size);
asf_st->seq = asf->packet_seq;
@@ -708,6 +781,14 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
asf->packet_size_left -= asf->packet_frag_size;
if (asf->packet_size_left < 0)
continue;
+
+ if( asf->packet_frag_offset >= asf_st->pkt.size
+ || asf->packet_frag_size > asf_st->pkt.size - asf->packet_frag_offset){
+ av_log(s, AV_LOG_ERROR, "packet fragment position invalid %u,%u not in %u\n",
+ asf->packet_frag_offset, asf->packet_frag_size, asf_st->pkt.size);
+ continue;
+ }
+
get_buffer(pb, asf_st->pkt.data + asf->packet_frag_offset,
asf->packet_frag_size);
asf_st->frag_offset += asf->packet_frag_size;
@@ -715,6 +796,9 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
if (asf_st->frag_offset == asf_st->pkt.size) {
/* return packet */
if (asf_st->ds_span > 1) {
+ if(asf_st->pkt.size != asf_st->ds_packet_size * asf_st->ds_span){
+ av_log(s, AV_LOG_ERROR, "pkt.size != ds_packet_size * ds_span\n");
+ }else{
/* packet descrambling */
uint8_t *newdata = av_malloc(asf_st->pkt.size);
if (newdata) {
@@ -725,6 +809,9 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
int col = off % asf_st->ds_span;
int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
//printf("off:%d row:%d col:%d idx:%d\n", off, row, col, idx);
+
+ assert(offset + asf_st->ds_chunk_size <= asf_st->pkt.size);
+ assert(idx+1 <= asf_st->pkt.size / asf_st->ds_chunk_size);
memcpy(newdata + offset,
asf_st->pkt.data + idx * asf_st->ds_chunk_size,
asf_st->ds_chunk_size);
@@ -733,9 +820,10 @@ static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
av_free(asf_st->pkt.data);
asf_st->pkt.data = newdata;
}
+ }
}
asf_st->frag_offset = 0;
- memcpy(pkt, &asf_st->pkt, sizeof(AVPacket));
+ *pkt= asf_st->pkt;
//printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
asf_st->pkt.size = 0;
asf_st->pkt.data = 0;
@@ -752,7 +840,7 @@ static int asf_read_close(AVFormatContext *s)
for(i=0;i<s->nb_streams;i++) {
AVStream *st = s->streams[i];
av_free(st->priv_data);
- av_free(st->codec->palctrl);
+ av_free(st->codec->palctrl);
}
return 0;
}
@@ -767,8 +855,6 @@ static void asf_reset_header(AVFormatContext *s)
int i;
asf->packet_nb_frames = 0;
- asf->packet_timestamp_start = -1;
- asf->packet_timestamp_end = -1;
asf->packet_size_left = 0;
asf->packet_segments = 0;
asf->packet_flags = 0;
@@ -819,7 +905,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
asf_reset_header(s);
for(;;){
if (av_read_frame(s, pkt) < 0){
- av_log(s, AV_LOG_INFO, "seek failed\n");
+ av_log(s, AV_LOG_INFO, "asf_read_pts failed\n");
return AV_NOPTS_VALUE;
}
@@ -831,7 +917,7 @@ static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos,
asf_st= s->streams[i]->priv_data;
- assert((asf_st->packet_pos - s->data_offset) % asf->packet_size == 0);
+// assert((asf_st->packet_pos - s->data_offset) % asf->packet_size == 0);
pos= asf_st->packet_pos;
av_add_index_entry(s->streams[i], pos, pts, pkt->size, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
diff --git a/contrib/ffmpeg/libavformat/asf.h b/contrib/ffmpeg/libavformat/asf.h
index bbe88801a..6d76ebecb 100644
--- a/contrib/ffmpeg/libavformat/asf.h
+++ b/contrib/ffmpeg/libavformat/asf.h
@@ -31,45 +31,36 @@ typedef struct {
int ds_span; /* descrambling */
int ds_packet_size;
int ds_chunk_size;
- int ds_data_size;
- int ds_silence_data;
int64_t packet_pos;
} ASFStream;
-typedef struct {
- uint32_t v1;
- uint16_t v2;
- uint16_t v3;
- uint8_t v4[8];
-} GUID;
+typedef uint8_t GUID[16];
typedef struct {
- GUID guid; // generated by client computer
- uint64_t file_size; // in bytes
- // invalid if broadcasting
- uint64_t create_time; // time of creation, in 100-nanosecond units since 1.1.1601
- // invalid if broadcasting
- uint64_t packets_count; // how many packets are there in the file
- // invalid if broadcasting
- uint64_t play_time; // play time, in 100-nanosecond units
- // invalid if broadcasting
- uint64_t send_time; // time to send file, in 100-nanosecond units
- // invalid if broadcasting (could be ignored)
- uint32_t preroll; // timestamp of the first packet, in milliseconds
- // if nonzero - substract from time
- uint32_t ignore; // preroll is 64bit - but let's just ignore it
- uint32_t flags; // 0x01 - broadcast
- // 0x02 - seekable
- // rest is reserved should be 0
- uint32_t min_pktsize; // size of a data packet
- // invalid if broadcasting
- uint32_t max_pktsize; // shall be the same as for min_pktsize
- // invalid if broadcasting
- uint32_t max_bitrate; // bandwith of stream in bps
- // should be the sum of bitrates of the
- // individual media streams
+ GUID guid; ///< generated by client computer
+ uint64_t file_size; /**< in bytes
+ * invalid if broadcasting */
+ uint64_t create_time; /**< time of creation, in 100-nanosecond units since 1.1.1601
+ * invalid if broadcasting */
+ uint64_t play_time; /**< play time, in 100-nanosecond units
+ * invalid if broadcasting */
+ uint64_t send_time; /**< time to send file, in 100-nanosecond units
+ * invalid if broadcasting (could be ignored) */
+ uint32_t preroll; /**< timestamp of the first packet, in milliseconds
+ * if nonzero - substract from time */
+ uint32_t ignore; ///< preroll is 64bit - but let's just ignore it
+ uint32_t flags; /**< 0x01 - broadcast
+ * 0x02 - seekable
+ * rest is reserved should be 0 */
+ uint32_t min_pktsize; /**< size of a data packet
+ * invalid if broadcasting */
+ uint32_t max_pktsize; /**< shall be the same as for min_pktsize
+ * invalid if broadcasting */
+ uint32_t max_bitrate; /**< bandwith of stream in bps
+ * should be the sum of bitrates of the
+ * individual media streams */
} ASFMainHeader;
@@ -83,15 +74,15 @@ typedef struct {
uint32_t seqno;
unsigned int packet_size;
int is_streamed;
- int asfid2avid[128]; /* conversion table from asf ID 2 AVStream ID */
- ASFStream streams[128]; /* it's max number and it's not that big */
+ int asfid2avid[128]; ///< conversion table from asf ID 2 AVStream ID
+ ASFStream streams[128]; ///< it's max number and it's not that big
+ uint32_t stream_bitrates[128]; ///< max number of streams, bitrate for each (for streaming)
/* non streamed additonnal info */
- int64_t nb_packets;
- int64_t duration; /* in 100ns units */
+ uint64_t nb_packets; ///< how many packets are there in the file, invalid if broadcasting
+ int64_t duration; ///< in 100ns units
/* packet filling */
unsigned char multi_payloads_present;
int packet_size_left;
- int prev_packet_sent_time;
int packet_timestamp_start;
int packet_timestamp_end;
unsigned int packet_nb_payloads;
@@ -99,9 +90,9 @@ typedef struct {
uint8_t packet_buf[PACKET_SIZE];
ByteIOContext pb;
/* only for reading */
- uint64_t data_offset; /* begining of the first data packet */
- uint64_t data_object_offset; /* data object offset (excl. GUID & size)*/
- uint64_t data_object_size; /* size of the data object */
+ uint64_t data_offset; ///< begining of the first data packet
+ uint64_t data_object_offset; ///< data object offset (excl. GUID & size)
+ uint64_t data_object_size; ///< size of the data object
int index_read;
ASFMainHeader hdr;
@@ -115,9 +106,9 @@ typedef struct {
int packet_replic_size;
int packet_key_frame;
int packet_padsize;
- int packet_frag_offset;
- int packet_frag_size;
- int packet_frag_timestamp;
+ unsigned int packet_frag_offset;
+ unsigned int packet_frag_size;
+ int64_t packet_frag_timestamp;
int packet_multi_size;
int packet_obj_size;
int packet_time_delta;
@@ -133,94 +124,98 @@ typedef struct {
uint32_t nb_index_memory_alloc;
uint16_t maximum_packet;
- ASFStream* asf_st; /* currently decoded stream */
+ ASFStream* asf_st; ///< currently decoded stream
} ASFContext;
static const GUID asf_header = {
- 0x75B22630, 0x668E, 0x11CF, { 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C },
+ 0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C
};
static const GUID file_header = {
- 0x8CABDCA1, 0xA947, 0x11CF, { 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 },
+ 0xA1, 0xDC, 0xAB, 0x8C, 0x47, 0xA9, 0xCF, 0x11, 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
static const GUID stream_header = {
- 0xB7DC0791, 0xA9B7, 0x11CF, { 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 },
+ 0x91, 0x07, 0xDC, 0xB7, 0xB7, 0xA9, 0xCF, 0x11, 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65
};
static const GUID ext_stream_header = {
- 0x14E6A5CB, 0xC672, 0x4332, { 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A },
+ 0xCB, 0xA5, 0xE6, 0x14, 0x72, 0xC6, 0x32, 0x43, 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A
};
static const GUID audio_stream = {
- 0xF8699E40, 0x5B4D, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+ 0x40, 0x9E, 0x69, 0xF8, 0x4D, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
static const GUID audio_conceal_none = {
- // 0x49f1a440, 0x4ece, 0x11d0, { 0xa3, 0xac, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6 },
+ // 0x40, 0xa4, 0xf1, 0x49, 0x4ece, 0x11d0, 0xa3, 0xac, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6
// New value lifted from avifile
- 0x20fb5700, 0x5b55, 0x11cf, { 0xa8, 0xfd, 0x00, 0x80, 0x5f, 0x5c, 0x44, 0x2b },
+ 0x00, 0x57, 0xfb, 0x20, 0x55, 0x5B, 0xCF, 0x11, 0xa8, 0xfd, 0x00, 0x80, 0x5f, 0x5c, 0x44, 0x2b
};
static const GUID audio_conceal_spread = {
- 0xBFC3CD50, 0x618F, 0x11CF, { 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20 },
+ 0x50, 0xCD, 0xC3, 0xBF, 0x8F, 0x61, 0xCF, 0x11, 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20
};
static const GUID video_stream = {
- 0xBC19EFC0, 0x5B4D, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+ 0xC0, 0xEF, 0x19, 0xBC, 0x4D, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
static const GUID video_conceal_none = {
- 0x20FB5700, 0x5B55, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+ 0x00, 0x57, 0xFB, 0x20, 0x55, 0x5B, 0xCF, 0x11, 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B
};
static const GUID command_stream = {
- 0x59DACFC0, 0x59E6, 0x11D0, { 0xA3, 0xAC, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 },
+ 0xC0, 0xCF, 0xDA, 0x59, 0xE6, 0x59, 0xD0, 0x11, 0xA3, 0xAC, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6
};
static const GUID comment_header = {
- 0x75b22633, 0x668e, 0x11cf, { 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c },
+ 0x33, 0x26, 0xb2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c
};
static const GUID codec_comment_header = {
- 0x86D15240, 0x311D, 0x11D0, { 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 },
+ 0x40, 0x52, 0xD1, 0x86, 0x1D, 0x31, 0xD0, 0x11, 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6
};
static const GUID codec_comment1_header = {
- 0x86d15241, 0x311d, 0x11d0, { 0xa3, 0xa4, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6 },
+ 0x41, 0x52, 0xd1, 0x86, 0x1D, 0x31, 0xD0, 0x11, 0xa3, 0xa4, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6
};
static const GUID data_header = {
- 0x75b22636, 0x668e, 0x11cf, { 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c },
+ 0x36, 0x26, 0xb2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c
};
static const GUID head1_guid = {
- 0x5fbf03b5, 0xa92e, 0x11cf, { 0x8e, 0xe3, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65 },
+ 0xb5, 0x03, 0xbf, 0x5f, 0x2E, 0xA9, 0xCF, 0x11, 0x8e, 0xe3, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65
};
static const GUID head2_guid = {
- 0xabd3d211, 0xa9ba, 0x11cf, { 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65 },
+ 0x11, 0xd2, 0xd3, 0xab, 0xBA, 0xA9, 0xCF, 0x11, 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65
};
static const GUID extended_content_header = {
- 0xD2D0A440, 0xE307, 0x11D2, { 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5E, 0xA8, 0x50 },
+ 0x40, 0xA4, 0xD0, 0xD2, 0x07, 0xE3, 0xD2, 0x11, 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5E, 0xA8, 0x50
};
static const GUID simple_index_header = {
- 0x33000890, 0xE5B1, 0x11CF, { 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB },
+ 0x90, 0x08, 0x00, 0x33, 0xB1, 0xE5, 0xCF, 0x11, 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB
};
static const GUID ext_stream_embed_stream_header = {
- 0x3afb65e2, 0x47ef, 0x40f2, { 0xac, 0x2c, 0x70, 0xa9, 0x0d, 0x71, 0xd3, 0x43}
+ 0xe2, 0x65, 0xfb, 0x3a, 0xEF, 0x47, 0xF2, 0x40, 0xac, 0x2c, 0x70, 0xa9, 0x0d, 0x71, 0xd3, 0x43
};
static const GUID ext_stream_audio_stream = {
- 0x31178c9d, 0x03e1, 0x4528, { 0xb5, 0x82, 0x3d, 0xf9, 0xdb, 0x22, 0xf5, 0x03}
+ 0x9d, 0x8c, 0x17, 0x31, 0xE1, 0x03, 0x28, 0x45, 0xb5, 0x82, 0x3d, 0xf9, 0xdb, 0x22, 0xf5, 0x03
+};
+
+static const GUID metadata_header = {
+ 0xea, 0xcb, 0xf8, 0xc5, 0xaf, 0x5b, 0x77, 0x48, 0x84, 0x67, 0xaa, 0x8c, 0x44, 0xfa, 0x4c, 0xca
};
/* I am not a number !!! This GUID is the one found on the PC used to
generate the stream */
static const GUID my_guid = {
- 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
#define ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT 0x80 //1000 0000
diff --git a/contrib/ffmpeg/libavformat/au.c b/contrib/ffmpeg/libavformat/au.c
index 27c7cdc85..9e84c9d31 100644
--- a/contrib/ffmpeg/libavformat/au.c
+++ b/contrib/ffmpeg/libavformat/au.c
@@ -32,10 +32,10 @@
#include "riff.h"
/* if we don't know the size in advance */
-#define AU_UNKOWN_SIZE ((uint32_t)(~0))
+#define AU_UNKNOWN_SIZE ((uint32_t)(~0))
/* The ffmpeg codecs we support, and the IDs they have in the file */
-static const CodecTag codec_au_tags[] = {
+static const AVCodecTag codec_au_tags[] = {
{ CODEC_ID_PCM_MULAW, 1 },
{ CODEC_ID_PCM_S16BE, 3 },
{ CODEC_ID_PCM_ALAW, 27 },
@@ -47,12 +47,10 @@ static const CodecTag codec_au_tags[] = {
static int put_au_header(ByteIOContext *pb, AVCodecContext *enc)
{
if(!enc->codec_tag)
- enc->codec_tag = codec_get_tag(codec_au_tags, enc->codec_id);
- if(!enc->codec_tag)
return -1;
put_tag(pb, ".snd"); /* magic number */
put_be32(pb, 24); /* header size */
- put_be32(pb, AU_UNKOWN_SIZE); /* data size */
+ put_be32(pb, AU_UNKNOWN_SIZE); /* data size */
put_be32(pb, (uint32_t)enc->codec_tag); /* codec ID */
put_be32(pb, enc->sample_rate);
put_be32(pb, (uint32_t)enc->channels);
@@ -190,6 +188,7 @@ AVInputFormat au_demuxer = {
au_read_packet,
au_read_close,
pcm_read_seek,
+ .codec_tag= (const AVCodecTag*[]){codec_au_tags, 0},
};
#endif
@@ -205,5 +204,6 @@ AVOutputFormat au_muxer = {
au_write_header,
au_write_packet,
au_write_trailer,
+ .codec_tag= (const AVCodecTag*[]){codec_au_tags, 0},
};
#endif //CONFIG_AU_MUXER
diff --git a/contrib/ffmpeg/libavformat/audio.c b/contrib/ffmpeg/libavformat/audio.c
index 1dfccccb8..a9e5bffd5 100644
--- a/contrib/ffmpeg/libavformat/audio.c
+++ b/contrib/ffmpeg/libavformat/audio.c
@@ -23,7 +23,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
-#ifdef __OpenBSD__
+#ifdef HAVE_SOUNDCARD_H
#include <soundcard.h>
#else
#include <sys/soundcard.h>
@@ -53,14 +53,6 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
int tmp, err;
char *flip = getenv("AUDIO_FLIP_LEFT");
- /* open linux audio device */
- if (!audio_device)
-#ifdef __OpenBSD__
- audio_device = "/dev/sound";
-#else
- audio_device = "/dev/dsp";
-#endif
-
if (is_output)
audio_fd = open(audio_device, O_WRONLY);
else
@@ -224,12 +216,12 @@ static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st = av_new_stream(s1, 0);
if (!st) {
- return -ENOMEM;
+ return AVERROR(ENOMEM);
}
s->sample_rate = ap->sample_rate;
s->channels = ap->channels;
- ret = audio_open(s, 0, ap->device);
+ ret = audio_open(s, 0, s1->filename);
if (ret < 0) {
av_free(st);
return AVERROR_IO;
diff --git a/contrib/ffmpeg/libavformat/avformat.h b/contrib/ffmpeg/libavformat/avformat.h
index 5dc41d273..eb8c4e153 100644
--- a/contrib/ffmpeg/libavformat/avformat.h
+++ b/contrib/ffmpeg/libavformat/avformat.h
@@ -25,8 +25,8 @@
extern "C" {
#endif
-#define LIBAVFORMAT_VERSION_INT ((51<<16)+(6<<8)+0)
-#define LIBAVFORMAT_VERSION 51.6.0
+#define LIBAVFORMAT_VERSION_INT ((51<<16)+(11<<8)+0)
+#define LIBAVFORMAT_VERSION 51.11.0
#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
@@ -39,14 +39,6 @@ extern "C" {
/* packet functions */
-#ifndef MAXINT64
-#define MAXINT64 int64_t_C(0x7fffffffffffffff)
-#endif
-
-#ifndef MININT64
-#define MININT64 int64_t_C(0x8000000000000000)
-#endif
-
typedef struct AVPacket {
int64_t pts; ///< presentation time stamp in time_base units
int64_t dts; ///< decompression time stamp in time_base units
@@ -62,6 +54,10 @@ typedef struct AVPacket {
#define PKT_FLAG_KEY 0x0001
void av_destruct_packet_nofree(AVPacket *pkt);
+
+/**
+ * Default packet destructor.
+ */
void av_destruct_packet(AVPacket *pkt);
/* initialize optional fields of a packet */
@@ -76,8 +72,28 @@ static inline void av_init_packet(AVPacket *pkt)
pkt->destruct= av_destruct_packet_nofree;
}
+/**
+ * Allocate the payload of a packet and intialized its fields to default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK. AVERROR_xxx otherwise.
+ */
int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Allocate and read the payload of a packet and intialized its fields to default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return >0 (read size) if OK. AVERROR_xxx otherwise.
+ */
int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size);
+
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated
+ */
int av_dup_packet(AVPacket *pkt);
/**
@@ -104,9 +120,11 @@ typedef struct AVFrac {
/*************************************************/
/* input/output formats */
+struct AVCodecTag;
+
struct AVFormatContext;
-/* this structure contains the data a format has to probe a file */
+/** this structure contains the data a format has to probe a file */
typedef struct AVProbeData {
const char *filename;
unsigned char *buf;
@@ -122,46 +140,57 @@ typedef struct AVFormatParameters {
int width;
int height;
enum PixelFormat pix_fmt;
- int channel; /* used to select dv channel */
- const char *device; /* video, audio or DV device */
- const char *standard; /* tv standard, NTSC, PAL, SECAM */
- int mpeg2ts_raw:1; /* force raw MPEG2 transport stream output, if possible */
- int mpeg2ts_compute_pcr:1; /* compute exact PCR for each transport
+ int channel; /**< used to select dv channel */
+#if LIBAVFORMAT_VERSION_INT < (52<<16)
+ const char *device; /**< video, audio or DV device */
+#endif
+ const char *standard; /**< tv standard, NTSC, PAL, SECAM */
+ int mpeg2ts_raw:1; /**< force raw MPEG2 transport stream output, if possible */
+ int mpeg2ts_compute_pcr:1; /**< compute exact PCR for each transport
stream packet (only meaningful if
mpeg2ts_raw is TRUE */
- int initial_pause:1; /* do not begin to play the stream
+ int initial_pause:1; /**< do not begin to play the stream
immediately (RTSP only) */
int prealloced_context:1;
enum CodecID video_codec_id;
enum CodecID audio_codec_id;
} AVFormatParameters;
-#define AVFMT_NOFILE 0x0001 /* no file should be opened */
-#define AVFMT_NEEDNUMBER 0x0002 /* needs '%d' in filename */
-#define AVFMT_SHOW_IDS 0x0008 /* show format stream IDs numbers */
-#define AVFMT_RAWPICTURE 0x0020 /* format wants AVPicture structure for
+//! demuxer will use url_fopen, no opened file should be provided by the caller
+#define AVFMT_NOFILE 0x0001
+#define AVFMT_NEEDNUMBER 0x0002 /**< needs '%d' in filename */
+#define AVFMT_SHOW_IDS 0x0008 /**< show format stream IDs numbers */
+#define AVFMT_RAWPICTURE 0x0020 /**< format wants AVPicture structure for
raw picture data */
-#define AVFMT_GLOBALHEADER 0x0040 /* format wants global header */
-#define AVFMT_NOTIMESTAMPS 0x0080 /* format doesnt need / has any timestamps */
+#define AVFMT_GLOBALHEADER 0x0040 /**< format wants global header */
+#define AVFMT_NOTIMESTAMPS 0x0080 /**< format doesnt need / has any timestamps */
+#define AVFMT_GENERIC_INDEX 0x0100 /**< use generic index building code */
typedef struct AVOutputFormat {
const char *name;
const char *long_name;
const char *mime_type;
- const char *extensions; /* comma separated extensions */
- /* size of private data so that it can be allocated in the wrapper */
+ const char *extensions; /**< comma separated filename extensions */
+ /** size of private data so that it can be allocated in the wrapper */
int priv_data_size;
/* output support */
- enum CodecID audio_codec; /* default audio codec */
- enum CodecID video_codec; /* default video codec */
+ enum CodecID audio_codec; /**< default audio codec */
+ enum CodecID video_codec; /**< default video codec */
int (*write_header)(struct AVFormatContext *);
int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);
int (*write_trailer)(struct AVFormatContext *);
- /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER */
+ /** can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER */
int flags;
- /* currently only used to set pixel format if not YUV420P */
+ /** currently only used to set pixel format if not YUV420P */
int (*set_parameters)(struct AVFormatContext *, AVFormatParameters *);
int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, AVPacket *in, int flush);
+
+ /**
+ * list of supported codec_id-codec_tag pairs, ordered by "better choice first"
+ * the arrays are all CODEC_ID_NONE terminated
+ */
+ const struct AVCodecTag **codec_tag;
+
/* private fields */
struct AVOutputFormat *next;
} AVOutputFormat;
@@ -169,21 +198,21 @@ typedef struct AVOutputFormat {
typedef struct AVInputFormat {
const char *name;
const char *long_name;
- /* size of private data so that it can be allocated in the wrapper */
+ /** size of private data so that it can be allocated in the wrapper */
int priv_data_size;
- /* tell if a given file has a chance of being parsing by this format */
+ /** tell if a given file has a chance of being parsing by this format */
int (*read_probe)(AVProbeData *);
- /* read the format header and initialize the AVFormatContext
+ /** read the format header and initialize the AVFormatContext
structure. Return 0 if OK. 'ap' if non NULL contains
additionnal paramters. Only used in raw format right
now. 'av_new_stream' should be called to create new streams. */
int (*read_header)(struct AVFormatContext *,
AVFormatParameters *ap);
- /* read one packet and put it in 'pkt'. pts and flags are also
+ /** read one packet and put it in 'pkt'. pts and flags are also
set. 'av_new_stream' can be called only if the flag
AVFMTCTX_NOHEADER is used. */
int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);
- /* close the stream. The AVFormatContext and AVStreams are not
+ /** close the stream. The AVFormatContext and AVStreams are not
freed by this function */
int (*read_close)(struct AVFormatContext *);
/**
@@ -200,23 +229,25 @@ typedef struct AVInputFormat {
*/
int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
int64_t *pos, int64_t pos_limit);
- /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */
+ /** can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */
int flags;
- /* if extensions are defined, then no probe is done. You should
+ /** if extensions are defined, then no probe is done. You should
usually not use extension format guessing because it is not
reliable enough */
const char *extensions;
- /* general purpose read only value that the format can use */
+ /** general purpose read only value that the format can use */
int value;
- /* start/resume playing - only meaningful if using a network based format
+ /** start/resume playing - only meaningful if using a network based format
(RTSP) */
int (*read_play)(struct AVFormatContext *);
- /* pause playing - only meaningful if using a network based format
+ /** pause playing - only meaningful if using a network based format
(RTSP) */
int (*read_pause)(struct AVFormatContext *);
+ const struct AVCodecTag **codec_tag;
+
/* private fields */
struct AVInputFormat *next;
} AVInputFormat;
@@ -227,13 +258,13 @@ typedef struct AVIndexEntry {
#define AVINDEX_KEYFRAME 0x0001
int flags:2;
int size:30; //yeah trying to keep the size of this small to reduce memory requirements (its 24 vs 32 byte due to possible 8byte align)
- int min_distance; /* min distance between this and the previous keyframe, used to avoid unneeded searching */
+ int min_distance; /**< min distance between this and the previous keyframe, used to avoid unneeded searching */
} AVIndexEntry;
typedef struct AVStream {
- int index; /* stream index in AVFormatContext */
- int id; /* format specific stream id */
- AVCodecContext *codec; /* codec context */
+ int index; /**< stream index in AVFormatContext */
+ int id; /**< format specific stream id */
+ AVCodecContext *codec; /**< codec context */
/**
* real base frame rate of the stream.
* this is the lowest framerate with which all timestamps can be
@@ -244,10 +275,12 @@ typedef struct AVStream {
*/
AVRational r_frame_rate;
void *priv_data;
+#if LIBAVFORMAT_VERSION_INT < (52<<16)
/* internal data used in av_find_stream_info() */
int64_t codec_info_duration;
int codec_info_nb_frames;
- /* encoding: PTS generation when outputing stream */
+#endif
+ /** encoding: PTS generation when outputing stream */
AVFrac pts;
/**
@@ -257,22 +290,22 @@ typedef struct AVStream {
* identically 1.
*/
AVRational time_base;
- int pts_wrap_bits; /* number of bits in pts (used for wrapping control) */
+ int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
/* ffmpeg.c private use */
- int stream_copy; /* if TRUE, just copy stream */
+ int stream_copy; /**< if set, just copy stream */
enum AVDiscard discard; ///< selects which packets can be discarded at will and dont need to be demuxed
//FIXME move stuff to a flags field?
- /* quality, as it has been removed from AVCodecContext and put in AVVideoFrame
+ /** quality, as it has been removed from AVCodecContext and put in AVVideoFrame
* MN:dunno if thats the right place, for it */
float quality;
- /* decoding: position of the first frame of the component, in
+ /** decoding: position of the first frame of the component, in
AV_TIME_BASE fractional seconds. */
int64_t start_time;
- /* decoding: duration of the stream, in AV_TIME_BASE fractional
+ /** decoding: duration of the stream, in AV_TIME_BASE fractional
seconds. */
int64_t duration;
- char language[4]; /* ISO 639 3-letter language code (empty string if undefined) */
+ char language[4]; /** ISO 639 3-letter language code (empty string if undefined) */
/* av_read_frame() support */
int need_parsing; ///< 1->full parsing needed, 2->only parse headers dont repack
@@ -282,7 +315,7 @@ typedef struct AVStream {
int last_IP_duration;
int64_t last_IP_pts;
/* av_seek_frame() support */
- AVIndexEntry *index_entries; /* only used if the format does not
+ AVIndexEntry *index_entries; /**< only used if the format does not
support seeking natively */
int nb_index_entries;
unsigned int index_entries_allocated_size;
@@ -293,22 +326,22 @@ typedef struct AVStream {
int64_t pts_buffer[MAX_REORDER_DELAY+1];
} AVStream;
-#define AVFMTCTX_NOHEADER 0x0001 /* signal that no header is present
+#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present
(streams are added dynamically) */
#define MAX_STREAMS 20
/* format I/O context */
typedef struct AVFormatContext {
- const AVClass *av_class; /* set by av_alloc_format_context */
+ const AVClass *av_class; /**< set by av_alloc_format_context */
/* can only be iformat or oformat, not both at the same time */
struct AVInputFormat *iformat;
struct AVOutputFormat *oformat;
void *priv_data;
ByteIOContext pb;
- int nb_streams;
+ unsigned int nb_streams;
AVStream *streams[MAX_STREAMS];
- char filename[1024]; /* input or output filename */
+ char filename[1024]; /**< input or output filename */
/* stream info */
int64_t timestamp;
char title[512];
@@ -316,28 +349,28 @@ typedef struct AVFormatContext {
char copyright[512];
char comment[512];
char album[512];
- int year; /* ID3 year, 0 if none */
- int track; /* track number, 0 if none */
- char genre[32]; /* ID3 genre */
+ int year; /**< ID3 year, 0 if none */
+ int track; /**< track number, 0 if none */
+ char genre[32]; /**< ID3 genre */
- int ctx_flags; /* format specific flags, see AVFMTCTX_xx */
+ int ctx_flags; /**< format specific flags, see AVFMTCTX_xx */
/* private data for pts handling (do not modify directly) */
- /* This buffer is only needed when packets were already buffered but
+ /** This buffer is only needed when packets were already buffered but
not decoded, for example to get the codec parameters in mpeg
streams */
struct AVPacketList *packet_buffer;
- /* decoding: position of the first frame of the component, in
+ /** decoding: position of the first frame of the component, in
AV_TIME_BASE fractional seconds. NEVER set this value directly:
it is deduced from the AVStream values. */
int64_t start_time;
- /* decoding: duration of the stream, in AV_TIME_BASE fractional
+ /** decoding: duration of the stream, in AV_TIME_BASE fractional
seconds. NEVER set this value directly: it is deduced from the
AVStream values. */
int64_t duration;
- /* decoding: total file size. 0 if unknown */
+ /** decoding: total file size. 0 if unknown */
int64_t file_size;
- /* decoding: total stream bitrate in bit/s, 0 if not
+ /** decoding: total stream bitrate in bit/s, 0 if not
available. Never set it directly if the file_size and the
duration are known as ffmpeg can compute it automatically. */
int bit_rate;
@@ -349,7 +382,7 @@ typedef struct AVFormatContext {
AVPacket cur_pkt;
/* av_seek_frame() support */
- int64_t data_offset; /* offset of the first packet */
+ int64_t data_offset; /** offset of the first packet */
int index_built;
int mux_rate;
@@ -359,7 +392,7 @@ typedef struct AVFormatContext {
#define AVFMT_NOOUTPUTLOOP -1
#define AVFMT_INFINITEOUTPUTLOOP 0
- /* number of times to loop output in formats that support it */
+ /** number of times to loop output in formats that support it */
int loop_output;
int flags;
@@ -367,8 +400,16 @@ typedef struct AVFormatContext {
#define AVFMT_FLAG_IGNIDX 0x0002 ///< ignore index
int loop_input;
- /* decoding: size of data to probe; encoding unused */
+ /** decoding: size of data to probe; encoding unused */
unsigned int probesize;
+
+ /**
+ * maximum duration in AV_TIME_BASE units over which the input should be analyzed in av_find_stream_info()
+ */
+ int max_analyze_duration;
+
+ const uint8_t *key;
+ int keylen;
} AVFormatContext;
typedef struct AVPacketList {
@@ -395,43 +436,207 @@ AVOutputFormat *guess_stream_format(const char *short_name,
const char *filename, const char *mime_type);
AVOutputFormat *guess_format(const char *short_name,
const char *filename, const char *mime_type);
+
+/**
+ * Guesses the codec id based upon muxer and filename.
+ */
enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type, enum CodecType type);
+/**
+ * Send a nice hexadecimal dump of a buffer to the specified file stream.
+ *
+ * @param f The file stream pointer where the dump should be sent to.
+ * @param buf buffer
+ * @param size buffer size
+ *
+ * @see av_hex_dump_log, av_pkt_dump, av_pkt_dump_log
+ */
void av_hex_dump(FILE *f, uint8_t *buf, int size);
+
+/**
+ * Send a nice hexadecimal dump of a buffer to the log.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message, lower values signifying
+ * higher importance.
+ * @param buf buffer
+ * @param size buffer size
+ *
+ * @see av_hex_dump, av_pkt_dump, av_pkt_dump_log
+ */
+void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size);
+
+/**
+ * Send a nice dump of a packet to the specified file stream.
+ *
+ * @param f The file stream pointer where the dump should be sent to.
+ * @param pkt packet to dump
+ * @param dump_payload true if the payload must be displayed too
+ */
void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload);
+/**
+ * Send a nice dump of a packet to the log.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message, lower values signifying
+ * higher importance.
+ * @param pkt packet to dump
+ * @param dump_payload true if the payload must be displayed too
+ */
+void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload);
+
void av_register_all(void);
+/** codec tag <-> codec id */
+enum CodecID av_codec_get_id(const struct AVCodecTag **tags, unsigned int tag);
+unsigned int av_codec_get_tag(const struct AVCodecTag **tags, enum CodecID id);
+
/* media file input */
+
+/**
+ * finds AVInputFormat based on input format's short name.
+ */
AVInputFormat *av_find_input_format(const char *short_name);
+
+/**
+ * Guess file format.
+ *
+ * @param is_opened whether the file is already opened, determines whether
+ * demuxers with or without AVFMT_NOFILE are probed
+ */
AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
+
+/**
+ * Allocates all the structures needed to read an input stream.
+ * This does not open the needed codecs for decoding the stream[s].
+ */
int av_open_input_stream(AVFormatContext **ic_ptr,
ByteIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap);
+
+/**
+ * Open a media file as input. The codec are not opened. Only the file
+ * header (if present) is read.
+ *
+ * @param ic_ptr the opened media file handle is put here
+ * @param filename filename to open.
+ * @param fmt if non NULL, force the file format to use
+ * @param buf_size optional buffer size (zero if default is OK)
+ * @param ap additionnal parameters needed when opening the file (NULL if default)
+ * @return 0 if OK. AVERROR_xxx otherwise.
+ */
int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
AVInputFormat *fmt,
int buf_size,
AVFormatParameters *ap);
-/* no av_open for output, so applications will need this: */
+/** no av_open for output, so applications will need this: */
AVFormatContext *av_alloc_format_context(void);
-#define AVERROR_UNKNOWN (-1) /* unknown error */
-#define AVERROR_IO (-2) /* i/o error */
-#define AVERROR_NUMEXPECTED (-3) /* number syntax expected in filename */
-#define AVERROR_INVALIDDATA (-4) /* invalid data found */
-#define AVERROR_NOMEM (-5) /* not enough memory */
-#define AVERROR_NOFMT (-6) /* unknown format */
-#define AVERROR_NOTSUPP (-7) /* operation not supported */
-
+/**
+ * Read packets of a media file to get stream information. This
+ * is useful for file formats with no headers such as MPEG. This
+ * function also computes the real frame rate in case of mpeg2 repeat
+ * frame mode.
+ * The logical file position is not changed by this function;
+ * examined packets may be buffered for later processing.
+ *
+ * @param ic media file handle
+ * @return >=0 if OK. AVERROR_xxx if error.
+ * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
+ */
int av_find_stream_info(AVFormatContext *ic);
+
+/**
+ * Read a transport packet from a media file.
+ *
+ * This function is absolete and should never be used.
+ * Use av_read_frame() instead.
+ *
+ * @param s media file handle
+ * @param pkt is filled
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
int av_read_packet(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Return the next frame of a stream.
+ *
+ * The returned packet is valid
+ * until the next av_read_frame() or until av_close_input_file() and
+ * must be freed with av_free_packet. For video, the packet contains
+ * exactly one frame. For audio, it contains an integer number of
+ * frames if each frame has a known fixed size (e.g. PCM or ADPCM
+ * data). If the audio frames have a variable size (e.g. MPEG audio),
+ * then it contains one frame.
+ *
+ * pkt->pts, pkt->dts and pkt->duration are always set to correct
+ * values in AVStream.timebase units (and guessed if the format cannot
+ * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
+ * has B frames, so it is better to rely on pkt->dts if you do not
+ * decompress the payload.
+ *
+ * @return 0 if OK, < 0 if error or end of file.
+ */
int av_read_frame(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Seek to the key frame at timestamp.
+ * 'timestamp' in 'stream_index'.
+ * @param stream_index If stream_index is (-1), a default
+ * stream is selected, and timestamp is automatically converted
+ * from AV_TIME_BASE units to the stream specific time_base.
+ * @param timestamp timestamp in AVStream.time_base units
+ * or if there is no stream specified then in AV_TIME_BASE units
+ * @param flags flags which select direction and seeking mode
+ * @return >= 0 on success
+ */
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
+
+/**
+ * start playing a network based stream (e.g. RTSP stream) at the
+ * current position
+ */
int av_read_play(AVFormatContext *s);
+
+/**
+ * Pause a network based stream (e.g. RTSP stream).
+ *
+ * Use av_read_play() to resume it.
+ */
int av_read_pause(AVFormatContext *s);
+
+/**
+ * Close a media file (but not its codecs).
+ *
+ * @param s media file handle
+ */
void av_close_input_file(AVFormatContext *s);
+
+/**
+ * Add a new stream to a media file.
+ *
+ * Can only be called in the read_header() function. If the flag
+ * AVFMTCTX_NOHEADER is in the format context, then new streams
+ * can be added in read_packet too.
+ *
+ * @param s media file handle
+ * @param id file format dependent stream id
+ */
AVStream *av_new_stream(AVFormatContext *s, int id);
+
+/**
+ * Set the pts for a given stream.
+ *
+ * @param s stream
+ * @param pts_wrap_bits number of bits effectively used by the pts
+ * (used for wrap control, 33 is the value for MPEG)
+ * @param pts_num numerator to convert to seconds (MPEG: 1)
+ * @param pts_den denominator to convert to seconds (MPEG: 90000)
+ */
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
int pts_num, int pts_den);
@@ -440,28 +645,147 @@ void av_set_pts_info(AVStream *s, int pts_wrap_bits,
#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non keyframes
int av_find_default_stream_index(AVFormatContext *s);
+
+/**
+ * Gets the index for a specific timestamp.
+ * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
+ * the timestamp which is <= the requested one, if backward is 0
+ * then it will be >=
+ * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
+ * @return < 0 if no such timestamp could be found
+ */
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
+
+/**
+ * Add a index entry into a sorted list updateing if it is already there.
+ *
+ * @param timestamp timestamp in the timebase of the given stream
+ */
int av_add_index_entry(AVStream *st,
int64_t pos, int64_t timestamp, int size, int distance, int flags);
+
+/**
+ * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
+ * this isnt supposed to be called directly by a user application, but by demuxers
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags);
+
+/**
+ * Updates cur_dts of all streams based on given timestamp and AVStream.
+ *
+ * Stream ref_st unchanged, others set cur_dts in their native timebase
+ * only needed for timestamp wrapping or if (dts not set and pts!=dts)
+ * @param timestamp new dts expressed in time_base of param ref_st
+ * @param ref_st reference stream giving time_base of param timestamp
+ */
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
+
+/**
+ * Does a binary search using read_timestamp().
+ * this isnt supposed to be called directly by a user application, but by demuxers
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
-/* media file output */
+/** media file output */
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
+
+/**
+ * allocate the stream private data and write the stream header to an
+ * output media file
+ *
+ * @param s media file handle
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
int av_write_header(AVFormatContext *s);
+
+/**
+ * Write a packet to an output media file.
+ *
+ * The packet shall contain one audio or video frame.
+ * The packet must be correctly interleaved according to the container specification,
+ * if not then av_interleaved_write_frame must be used
+ *
+ * @param s media file handle
+ * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
+ * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
+ */
int av_write_frame(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Writes a packet to an output media file ensuring correct interleaving.
+ *
+ * The packet must contain one audio or video frame.
+ * If the packets are already correctly interleaved the application should
+ * call av_write_frame() instead as its slightly faster, its also important
+ * to keep in mind that completly non interleaved input will need huge amounts
+ * of memory to interleave with this, so its prefereable to interleave at the
+ * demuxer level
+ *
+ * @param s media file handle
+ * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
+ * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
+ */
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Interleave a packet per DTS in an output media file.
+ *
+ * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
+ * so they cannot be used after it, note calling av_free_packet() on them is still safe.
+ *
+ * @param s media file handle
+ * @param out the interleaved packet will be output here
+ * @param in the input packet
+ * @param flush 1 if no further packets are available as input and all
+ * remaining packets should be output
+ * @return 1 if a packet was output, 0 if no packet could be output,
+ * < 0 if an error occured
+ */
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush);
+/**
+ * @brief Write the stream trailer to an output media file and
+ * free the file private data.
+ *
+ * @param s media file handle
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
int av_write_trailer(AVFormatContext *s);
void dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output);
+
+/**
+ * parses width and height out of string str.
+ */
int parse_image_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Converts frame rate from string to a fraction.
+ */
int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg);
+
+/**
+ * Converts date string to number of seconds since Jan 1st, 1970.
+ *
+ * @code
+ * Syntax:
+ * - If not a duration:
+ * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
+ * Time is localtime unless Z is suffixed to the end. In this case GMT
+ * Return the date in micro seconds since 1970
+ *
+ * - If a duration:
+ * HH[:MM[:SS[.m...]]]
+ * S+[.m...]
+ * @endcode
+ */
int64_t parse_date(const char *datestr, int duration);
int64_t av_gettime(void);
@@ -472,10 +796,35 @@ offset_t ffm_read_write_index(int fd);
void ffm_write_write_index(int fd, offset_t pos);
void ffm_set_write_index(AVFormatContext *s, offset_t pos, offset_t file_size);
+/**
+ * Attempts to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+/**
+ * Returns in 'buf' the path with '%d' replaced by number.
+
+ * Also handles the '%0nd' format where 'n' is the total number
+ * of digits and '%%'.
+ *
+ * @param buf destination buffer
+ * @param buf_size destination buffer size
+ * @param path numbered sequence string
+ * @number frame number
+ * @return 0 if OK, -1 if format error.
+ */
int av_get_frame_filename(char *buf, int buf_size,
const char *path, int number);
+
+/**
+ * Check whether filename actually is a numbered sequence generator.
+ *
+ * @param filename possible numbered sequence string
+ * @return 1 if a valid numbered sequence string, 0 otherwise.
+ */
int av_filename_number_test(const char *filename);
/* grab specific */
diff --git a/contrib/ffmpeg/libavformat/avidec.c b/contrib/ffmpeg/libavformat/avidec.c
index d1af79fa3..23c130ab7 100644
--- a/contrib/ffmpeg/libavformat/avidec.c
+++ b/contrib/ffmpeg/libavformat/avidec.c
@@ -82,6 +82,9 @@ static int get_riff(AVIContext *avi, ByteIOContext *pb)
avi->riff_end = get_le32(pb); /* RIFF chunk size */
avi->riff_end += url_ftell(pb); /* RIFF chunk end */
tag = get_le32(pb);
+ if(tag == MKTAG('A', 'V', 'I', 0x19))
+ av_log(NULL, AV_LOG_INFO, "file has been generated with a totally broken muxer\n");
+ else
if (tag != MKTAG('A', 'V', 'I', ' ') && tag != MKTAG('A', 'V', 'I', 'X'))
return -1;
@@ -213,7 +216,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
uint32_t tag, tag1, handler;
int codec_type, stream_index, frame_period, bit_rate;
unsigned int size, nb_frames;
- int i, n;
+ int i;
AVStream *st;
AVIStream *ast = NULL;
char str_track[4];
@@ -266,10 +269,22 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
avi->non_interleaved |= get_le32(pb) & AVIF_MUSTUSEINDEX;
url_fskip(pb, 2 * 4);
- n = get_le32(pb);
- for(i=0;i<n;i++) {
- AVIStream *ast;
- st = av_new_stream(s, i);
+ get_le32(pb);
+
+ url_fskip(pb, size - 7 * 4);
+ break;
+ case MKTAG('s', 't', 'r', 'h'):
+ /* stream header */
+
+ tag1 = get_le32(pb);
+ handler = get_le32(pb); /* codec tag */
+
+ if(tag1 == MKTAG('p', 'a', 'd', 's')){
+ url_fskip(pb, size - 8);
+ break;
+ }else{
+ stream_index++;
+ st = av_new_stream(s, stream_index);
if (!st)
goto fail;
@@ -278,17 +293,13 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
goto fail;
st->priv_data = ast;
}
- url_fskip(pb, size - 7 * 4);
- break;
- case MKTAG('s', 't', 'r', 'h'):
- /* stream header */
- stream_index++;
- tag1 = get_le32(pb);
- handler = get_le32(pb); /* codec tag */
+
#ifdef DEBUG
print_tag("strh", tag1, -1);
#endif
if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){
+ int64_t dv_dur;
+
/*
* After some consideration -- I don't think we
* have to support anything but DV in a type1 AVIs.
@@ -314,20 +325,24 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
url_fskip(pb, 3 * 4);
ast->scale = get_le32(pb);
ast->rate = get_le32(pb);
+ url_fskip(pb, 4); /* start time */
+
+ dv_dur = get_le32(pb);
+ if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
+ dv_dur *= AV_TIME_BASE;
+ s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
+ }
+ /*
+ * else, leave duration alone; timing estimation in utils.c
+ * will make a guess based on bit rate.
+ */
+
stream_index = s->nb_streams - 1;
- url_fskip(pb, size - 7*4);
+ url_fskip(pb, size - 9*4);
break;
}
- if (stream_index >= s->nb_streams) {
- url_fskip(pb, size - 8);
- /* ignore padding stream */
- if (tag1 == MKTAG('p', 'a', 'd', 's'))
- stream_index--;
- break;
- }
- st = s->streams[stream_index];
- ast = st->priv_data;
+ assert(stream_index < s->nb_streams);
st->codec->stream_codec_tag= handler;
get_le32(pb); /* flags */
@@ -370,10 +385,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
//FIXME
codec_type = CODEC_TYPE_DATA; //CODEC_TYPE_SUB ? FIXME
break;
- case MKTAG('p', 'a', 'd', 's'):
- codec_type = CODEC_TYPE_UNKNOWN;
- stream_index--;
- break;
default:
av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
goto fail;
@@ -383,7 +394,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
break;
case MKTAG('s', 't', 'r', 'f'):
/* stream header */
- if (stream_index >= s->nb_streams || avi->dv_demux) {
+ if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
url_fskip(pb, size);
} else {
st = s->streams[stream_index];
@@ -492,6 +503,13 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
sscanf(str_track, "%d", &s->track);
break;
default:
+ if(size > 1000000){
+ av_log(s, AV_LOG_ERROR, "well something went wrong during header parsing, "
+ "ill ignore it and try to continue anyway\n");
+ avi->movi_list = url_ftell(pb) - 4;
+ avi->movi_end = url_fsize(pb);
+ goto end_of_header;
+ }
/* skip tag */
size += (size & 1);
url_fskip(pb, size);
@@ -612,21 +630,16 @@ resync:
pkt->stream_index = avi->stream_index;
if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
- if(st->index_entries){
- AVIndexEntry *e;
- int index;
+ AVIndexEntry *e;
+ int index;
+ assert(st->index_entries);
- index= av_index_search_timestamp(st, pkt->dts, 0);
- e= &st->index_entries[index];
+ index= av_index_search_timestamp(st, pkt->dts, 0);
+ e= &st->index_entries[index];
- if(index >= 0 && e->timestamp == ast->frame_offset){
- if (e->flags & AVINDEX_KEYFRAME)
- pkt->flags |= PKT_FLAG_KEY;
- }
- } else {
- /* if no index, better to say that all frames
- are key frames */
- pkt->flags |= PKT_FLAG_KEY;
+ if(index >= 0 && e->timestamp == ast->frame_offset){
+ if (e->flags & AVINDEX_KEYFRAME)
+ pkt->flags |= PKT_FLAG_KEY;
}
} else {
pkt->flags |= PKT_FLAG_KEY;
@@ -640,10 +653,6 @@ resync:
if(!ast->remaining){
avi->stream_index= -1;
ast->packet_size= 0;
- if (size & 1) {
- get_byte(pb);
- size++;
- }
}
return size;
@@ -725,6 +734,13 @@ resync:
avi->stream_index= n;
ast->packet_size= size + 8;
ast->remaining= size;
+
+ {
+ uint64_t pos= url_ftell(pb) - 8;
+ if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){
+ av_add_index_entry(st, pos, ast->frame_offset / FFMAX(1, ast->sample_size), size, 0, AVINDEX_KEYFRAME);
+ }
+ }
goto resync;
}
}
@@ -903,6 +919,21 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
// av_log(NULL, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
+ if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ /* One and only one real stream for DV in AVI, and it has video */
+ /* offsets. Calling with other stream indices should have failed */
+ /* the av_index_search_timestamp call above. */
+ assert(stream_index == 0);
+
+ /* Feed the DV video stream version of the timestamp to the */
+ /* DV demux so it can synth correct timestamps */
+ dv_offset_reset(avi->dv_demux, timestamp);
+
+ url_fseek(&s->pb, pos, SEEK_SET);
+ avi->stream_index= -1;
+ return 0;
+ }
+
for(i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i];
AVIStream *ast2 = st2->priv_data;
@@ -937,8 +968,6 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
ast2->frame_offset *=ast2->sample_size;
}
- if (ENABLE_DV_DEMUXER && avi->dv_demux)
- dv_flush_audio_packets(avi->dv_demux);
/* do the seek */
url_fseek(&s->pb, pos, SEEK_SET);
avi->stream_index= -1;
@@ -971,7 +1000,7 @@ static int avi_probe(AVProbeData *p)
if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
p->buf[2] == 'F' && p->buf[3] == 'F' &&
p->buf[8] == 'A' && p->buf[9] == 'V' &&
- p->buf[10] == 'I' && p->buf[11] == ' ')
+ p->buf[10] == 'I' && (p->buf[11] == ' ' || p->buf[11] == 0x19))
return AVPROBE_SCORE_MAX;
else
return 0;
diff --git a/contrib/ffmpeg/libavformat/avienc.c b/contrib/ffmpeg/libavformat/avienc.c
index 296608704..ac8b2670d 100644
--- a/contrib/ffmpeg/libavformat/avienc.c
+++ b/contrib/ffmpeg/libavformat/avienc.c
@@ -163,7 +163,7 @@ static int avi_write_header(AVFormatContext *s)
nb_frames = 0;
if(video_enc){
- put_le32(pb, (uint32_t)(int64_t_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
+ put_le32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
} else {
put_le32(pb, 0);
}
@@ -519,38 +519,37 @@ static int avi_write_trailer(AVFormatContext *s)
int i, j, n, nb_frames;
offset_t file_size;
- if (!url_is_streamed(pb))
- {
- if (avi->riff_id == 1) {
- end_tag(pb, avi->movi_list);
- res = avi_write_idx1(s);
- end_tag(pb, avi->riff_start);
- } else {
- avi_write_ix(s);
- end_tag(pb, avi->movi_list);
- end_tag(pb, avi->riff_start);
-
- file_size = url_ftell(pb);
- url_fseek(pb, avi->odml_list - 8, SEEK_SET);
- put_tag(pb, "LIST"); /* Making this AVI OpenDML one */
- url_fskip(pb, 16);
-
- for (n=nb_frames=0;n<s->nb_streams;n++) {
- AVCodecContext *stream = s->streams[n]->codec;
- if (stream->codec_type == CODEC_TYPE_VIDEO) {
- if (nb_frames < avi->packet_count[n])
- nb_frames = avi->packet_count[n];
- } else {
- if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) {
- nb_frames += avi->packet_count[n];
+ if (!url_is_streamed(pb)){
+ if (avi->riff_id == 1) {
+ end_tag(pb, avi->movi_list);
+ res = avi_write_idx1(s);
+ end_tag(pb, avi->riff_start);
+ } else {
+ avi_write_ix(s);
+ end_tag(pb, avi->movi_list);
+ end_tag(pb, avi->riff_start);
+
+ file_size = url_ftell(pb);
+ url_fseek(pb, avi->odml_list - 8, SEEK_SET);
+ put_tag(pb, "LIST"); /* Making this AVI OpenDML one */
+ url_fskip(pb, 16);
+
+ for (n=nb_frames=0;n<s->nb_streams;n++) {
+ AVCodecContext *stream = s->streams[n]->codec;
+ if (stream->codec_type == CODEC_TYPE_VIDEO) {
+ if (nb_frames < avi->packet_count[n])
+ nb_frames = avi->packet_count[n];
+ } else {
+ if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) {
+ nb_frames += avi->packet_count[n];
+ }
}
}
- }
- put_le32(pb, nb_frames);
- url_fseek(pb, file_size, SEEK_SET);
+ put_le32(pb, nb_frames);
+ url_fseek(pb, file_size, SEEK_SET);
- avi_write_counters(s, avi->riff_id);
- }
+ avi_write_counters(s, avi->riff_id);
+ }
}
put_flush_packet(pb);
@@ -576,5 +575,6 @@ AVOutputFormat avi_muxer = {
avi_write_header,
avi_write_packet,
avi_write_trailer,
+ .codec_tag= (const AVCodecTag*[]){codec_bmp_tags, codec_wav_tags, 0},
};
#endif //CONFIG_AVI_MUXER
diff --git a/contrib/ffmpeg/libavformat/avio.c b/contrib/ffmpeg/libavformat/avio.c
index a2b8a8325..a22bd22f3 100644
--- a/contrib/ffmpeg/libavformat/avio.c
+++ b/contrib/ffmpeg/libavformat/avio.c
@@ -67,14 +67,17 @@ int url_open(URLContext **puc, const char *filename, int flags)
goto found;
up = up->next;
}
- err = -ENOENT;
+ err = AVERROR(ENOENT);
goto fail;
found:
- uc = av_malloc(sizeof(URLContext) + strlen(filename));
+ uc = av_malloc(sizeof(URLContext) + strlen(filename) + 1);
if (!uc) {
- err = -ENOMEM;
+ err = AVERROR(ENOMEM);
goto fail;
}
+#if LIBAVFORMAT_VERSION_INT >= (52<<16)
+ uc->filename = (char *) &uc[1];
+#endif
strcpy(uc->filename, filename);
uc->prot = up;
uc->flags = flags;
@@ -121,7 +124,7 @@ offset_t url_seek(URLContext *h, offset_t pos, int whence)
offset_t ret;
if (!h->prot->url_seek)
- return -EPIPE;
+ return AVERROR(EPIPE);
ret = h->prot->url_seek(h, pos, whence);
return ret;
}
@@ -148,20 +151,17 @@ offset_t url_filesize(URLContext *h)
{
offset_t pos, size;
- pos = url_seek(h, 0, SEEK_CUR);
- size = url_seek(h, -1, SEEK_END)+1;
- url_seek(h, pos, SEEK_SET);
+ size= url_seek(h, 0, AVSEEK_SIZE);
+ if(size<0){
+ pos = url_seek(h, 0, SEEK_CUR);
+ if ((size = url_seek(h, -1, SEEK_END)) < 0)
+ return size;
+ size++;
+ url_seek(h, pos, SEEK_SET);
+ }
return size;
}
-/*
- * Return the maximum packet size associated to packetized file
- * handle. If the file is not packetized (stream like http or file on
- * disk), then 0 is returned.
- *
- * @param h file handle
- * @return maximum packet size in bytes
- */
int url_get_max_packet_size(URLContext *h)
{
return h->max_packet_size;
@@ -178,12 +178,6 @@ static int default_interrupt_cb(void)
return 0;
}
-/**
- * The callback is called in blocking functions to test regulary if
- * asynchronous interruption is needed. -EINTR is returned in this
- * case by the interrupted function. 'NULL' means no interrupt
- * callback is given.
- */
void url_set_interrupt_cb(URLInterruptCB *interrupt_cb)
{
if (!interrupt_cb)
diff --git a/contrib/ffmpeg/libavformat/avio.h b/contrib/ffmpeg/libavformat/avio.h
index f0fd1a85c..be78c9a7b 100644
--- a/contrib/ffmpeg/libavformat/avio.h
+++ b/contrib/ffmpeg/libavformat/avio.h
@@ -30,10 +30,14 @@ typedef int64_t offset_t;
struct URLContext {
struct URLProtocol *prot;
int flags;
- int is_streamed; /* true if streamed (no seek possible), default = false */
- int max_packet_size; /* if non zero, the stream is packetized with this max packet size */
+ int is_streamed; /**< true if streamed (no seek possible), default = false */
+ int max_packet_size; /**< if non zero, the stream is packetized with this max packet size */
void *priv_data;
- char filename[1]; /* specified filename */
+#if LIBAVFORMAT_VERSION_INT >= (52<<16)
+ char *filename; /**< specified filename */
+#else
+ char filename[1]; /**< specified filename */
+#endif
};
typedef struct URLContext URLContext;
@@ -57,18 +61,36 @@ offset_t url_seek(URLContext *h, offset_t pos, int whence);
int url_close(URLContext *h);
int url_exist(const char *filename);
offset_t url_filesize(URLContext *h);
+
+/**
+ * Return the maximum packet size associated to packetized file
+ * handle. If the file is not packetized (stream like http or file on
+ * disk), then 0 is returned.
+ *
+ * @param h file handle
+ * @return maximum packet size in bytes
+ */
int url_get_max_packet_size(URLContext *h);
void url_get_filename(URLContext *h, char *buf, int buf_size);
-/* the callback is called in blocking functions to test regulary if
- asynchronous interruption is needed. -EINTR is returned in this
- case by the interrupted function. 'NULL' means no interrupt
- callback is given. */
+/**
+ * the callback is called in blocking functions to test regulary if
+ * asynchronous interruption is needed. AVERROR(EINTR) is returned
+ * in this case by the interrupted function. 'NULL' means no interrupt
+ * callback is given. i
+ */
void url_set_interrupt_cb(URLInterruptCB *interrupt_cb);
/* not implemented */
int url_poll(URLPollEntry *poll_table, int n, int timeout);
+/**
+ * passing this as the "whence" parameter to a seek function causes it to
+ * return the filesize without seeking anywhere, supporting this is optional
+ * if its not supprted then the seek function will return <0
+ */
+#define AVSEEK_SIZE 0x10000
+
typedef struct URLProtocol {
const char *name;
int (*url_open)(URLContext *h, const char *filename, int flags);
@@ -92,10 +114,10 @@ typedef struct {
int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
offset_t (*seek)(void *opaque, offset_t offset, int whence);
- offset_t pos; /* position in the file of the current buffer */
- int must_flush; /* true if the next seek should flush */
- int eof_reached; /* true if eof reached */
- int write_flag; /* true if open for writing */
+ offset_t pos; /**< position in the file of the current buffer */
+ int must_flush; /**< true if the next seek should flush */
+ int eof_reached; /**< true if eof reached */
+ int write_flag; /**< true if open for writing */
int is_streamed;
int max_packet_size;
unsigned long checksum;
@@ -135,18 +157,27 @@ int url_feof(ByteIOContext *s);
int url_ferror(ByteIOContext *s);
#define URL_EOF (-1)
+/** @note return URL_EOF (-1) if EOF */
int url_fgetc(ByteIOContext *s);
+
+/** @warning currently size is limited */
#ifdef __GNUC__
int url_fprintf(ByteIOContext *s, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3)));
#else
int url_fprintf(ByteIOContext *s, const char *fmt, ...);
#endif
+
+/** @note unlike fgets, the EOL character is not returned and a whole
+ line is parsed. return NULL if first char read was EOF */
char *url_fgets(ByteIOContext *s, char *buf, int buf_size);
void put_flush_packet(ByteIOContext *s);
int get_buffer(ByteIOContext *s, unsigned char *buf, int size);
int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size);
+
+/** @note return 0 if EOF, so you cannot use it if EOF handling is
+ necessary */
int get_byte(ByteIOContext *s);
unsigned int get_le24(ByteIOContext *s);
unsigned int get_le32(ByteIOContext *s);
@@ -165,17 +196,57 @@ static inline int url_is_streamed(ByteIOContext *s)
}
int url_fdopen(ByteIOContext *s, URLContext *h);
+
+/** @warning must be called before any I/O */
int url_setbufsize(ByteIOContext *s, int buf_size);
+
+/** @note when opened as read/write, the buffers are only used for
+ reading */
int url_fopen(ByteIOContext *s, const char *filename, int flags);
int url_fclose(ByteIOContext *s);
URLContext *url_fileno(ByteIOContext *s);
+
+/**
+ * Return the maximum packet size associated to packetized buffered file
+ * handle. If the file is not packetized (stream like http or file on
+ * disk), then 0 is returned.
+ *
+ * @param h buffered file handle
+ * @return maximum packet size in bytes
+ */
int url_fget_max_packet_size(ByteIOContext *s);
int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags);
+
+/** return the written or read size */
int url_close_buf(ByteIOContext *s);
+/**
+ * Open a write only memory stream.
+ *
+ * @param s new IO context
+ * @return zero if no error.
+ */
int url_open_dyn_buf(ByteIOContext *s);
+
+/**
+ * Open a write only packetized memory stream with a maximum packet
+ * size of 'max_packet_size'. The stream is stored in a memory buffer
+ * with a big endian 4 byte header giving the packet size in bytes.
+ *
+ * @param s new IO context
+ * @param max_packet_size maximum packet size (must be > 0)
+ * @return zero if no error.
+ */
int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size);
+
+/**
+ * Return the written size and a pointer to the buffer. The buffer
+ * must be freed with av_free().
+ * @param s IO context
+ * @param pointer to a byte buffer
+ * @return the length of the byte buffer
+ */
int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer);
unsigned long get_checksum(ByteIOContext *s);
diff --git a/contrib/ffmpeg/libavformat/aviobuf.c b/contrib/ffmpeg/libavformat/aviobuf.c
index 866641ad0..2cc247b62 100644
--- a/contrib/ffmpeg/libavformat/aviobuf.c
+++ b/contrib/ffmpeg/libavformat/aviobuf.c
@@ -117,7 +117,7 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
offset_t pos= s->pos - (s->write_flag ? 0 : (s->buf_end - s->buffer));
if (whence != SEEK_CUR && whence != SEEK_SET)
- return -EINVAL;
+ return AVERROR(EINVAL);
if (whence == SEEK_CUR) {
offset1 = pos + (s->buf_ptr - s->buffer);
@@ -136,6 +136,8 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
fill_buffer(s);
s->buf_ptr = s->buf_end + offset - s->pos;
} else {
+ offset_t res = AVERROR(EPIPE);
+
#if defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK)
if (s->write_flag) {
flush_buffer(s);
@@ -146,8 +148,8 @@ offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
s->buf_end = s->buffer;
}
s->buf_ptr = s->buffer;
- if (!s->seek || s->seek(s->opaque, offset, SEEK_SET) == (offset_t)-EPIPE)
- return -EPIPE;
+ if (!s->seek || (res = s->seek(s->opaque, offset, SEEK_SET)) < 0)
+ return res;
s->pos = offset;
}
s->eof_reached = 0;
@@ -169,9 +171,14 @@ offset_t url_fsize(ByteIOContext *s)
offset_t size;
if (!s->seek)
- return -EPIPE;
- size = s->seek(s->opaque, -1, SEEK_END) + 1;
- s->seek(s->opaque, s->pos, SEEK_SET);
+ return AVERROR(EPIPE);
+ size = s->seek(s->opaque, 0, AVSEEK_SIZE);
+ if(size<0){
+ if ((size = s->seek(s->opaque, -1, SEEK_END)) < 0)
+ return size;
+ size++;
+ s->seek(s->opaque, s->pos, SEEK_SET);
+ }
return size;
}
@@ -185,7 +192,6 @@ int url_ferror(ByteIOContext *s)
return s->error;
}
-#if defined(CONFIG_MUXERS) || defined(CONFIG_PROTOCOLS)
void put_le32(ByteIOContext *s, unsigned int val)
{
put_byte(s, val);
@@ -252,7 +258,6 @@ void put_tag(ByteIOContext *s, const char *tag)
put_byte(s, *tag++);
}
}
-#endif //CONFIG_MUXERS || CONFIG_PROTOCOLS
/* Input stream */
@@ -298,8 +303,6 @@ void init_checksum(ByteIOContext *s, unsigned long (*update_checksum)(unsigned l
}
}
-/* NOTE: return 0 if EOF, so you cannot use it if EOF handling is
- necessary */
/* XXX: put an inline version */
int get_byte(ByteIOContext *s)
{
@@ -314,7 +317,6 @@ int get_byte(ByteIOContext *s)
}
}
-/* NOTE: return URL_EOF (-1) if EOF */
int url_fgetc(ByteIOContext *s)
{
if (s->buf_ptr < s->buf_end) {
@@ -506,7 +508,7 @@ int url_fdopen(ByteIOContext *s, URLContext *h)
}
buffer = av_malloc(buffer_size);
if (!buffer)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
if (init_put_byte(s, buffer, buffer_size,
(h->flags & URL_WRONLY || h->flags & URL_RDWR), h,
@@ -519,13 +521,12 @@ int url_fdopen(ByteIOContext *s, URLContext *h)
return 0;
}
-/* XXX: must be called before any I/O */
int url_setbufsize(ByteIOContext *s, int buf_size)
{
uint8_t *buffer;
buffer = av_malloc(buf_size);
if (!buffer)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
av_free(s->buffer);
s->buffer = buffer;
@@ -538,8 +539,6 @@ int url_setbufsize(ByteIOContext *s, int buf_size)
return 0;
}
-/* NOTE: when opened as read/write, the buffers are only used for
- reading */
int url_fopen(ByteIOContext *s, const char *filename, int flags)
{
URLContext *h;
@@ -571,7 +570,6 @@ URLContext *url_fileno(ByteIOContext *s)
}
#ifdef CONFIG_MUXERS
-/* XXX: currently size is limited */
int url_fprintf(ByteIOContext *s, const char *fmt, ...)
{
va_list ap;
@@ -586,8 +584,6 @@ int url_fprintf(ByteIOContext *s, const char *fmt, ...)
}
#endif //CONFIG_MUXERS
-/* note: unlike fgets, the EOL character is not returned and a whole
- line is parsed. return NULL if first char read was EOF */
char *url_fgets(ByteIOContext *s, char *buf, int buf_size)
{
int c;
@@ -609,14 +605,6 @@ char *url_fgets(ByteIOContext *s, char *buf, int buf_size)
return buf;
}
-/*
- * Return the maximum packet size associated to packetized buffered file
- * handle. If the file is not packetized (stream like http or file on
- * disk), then 0 is returned.
- *
- * @param h buffered file handle
- * @return maximum packet size in bytes
- */
int url_fget_max_packet_size(ByteIOContext *s)
{
return s->max_packet_size;
@@ -633,7 +621,6 @@ int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags)
NULL, NULL, NULL, NULL);
}
-/* return the written or read size */
int url_close_buf(ByteIOContext *s)
{
put_flush_packet(s);
@@ -741,26 +728,11 @@ static int url_open_dyn_buf_internal(ByteIOContext *s, int max_packet_size)
return ret;
}
-/*
- * Open a write only memory stream.
- *
- * @param s new IO context
- * @return zero if no error.
- */
int url_open_dyn_buf(ByteIOContext *s)
{
return url_open_dyn_buf_internal(s, 0);
}
-/*
- * Open a write only packetized memory stream with a maximum packet
- * size of 'max_packet_size'. The stream is stored in a memory buffer
- * with a big endian 4 byte header giving the packet size in bytes.
- *
- * @param s new IO context
- * @param max_packet_size maximum packet size (must be > 0)
- * @return zero if no error.
- */
int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size)
{
if (max_packet_size <= 0)
@@ -768,13 +740,6 @@ int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size)
return url_open_dyn_buf_internal(s, max_packet_size);
}
-/*
- * Return the written size and a pointer to the buffer. The buffer
- * must be freed with av_free().
- * @param s IO context
- * @param pointer to a byte buffer
- * @return the length of the byte buffer
- */
int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer)
{
DynBuffer *d = s->opaque;
diff --git a/contrib/ffmpeg/libavformat/barpainet.h b/contrib/ffmpeg/libavformat/barpainet.h
deleted file mode 100644
index b50bf82b6..000000000
--- a/contrib/ffmpeg/libavformat/barpainet.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * copyright (c) 2002 Francois Revol
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef BARPA_INET_H
-#define BARPA_INET_H
-
-#include "config.h"
-
-#ifdef CONFIG_BEOS_NETSERVER
-
-# include <socket.h>
-int inet_aton (const char * str, struct in_addr * add);
-# define PF_INET AF_INET
-# define SO_SNDBUF 0x40000001
-
-/* fake */
-struct ip_mreq {
- struct in_addr imr_multiaddr; /* IP multicast address of group */
- struct in_addr imr_interface; /* local IP address of interface */
-};
-
-#include <netdb.h>
-
-#else
-# include <arpa/inet.h>
-#endif
-
-#endif /* BARPA_INET_H */
diff --git a/contrib/ffmpeg/libavformat/base64.c b/contrib/ffmpeg/libavformat/base64.c
deleted file mode 100644
index 6279244d3..000000000
--- a/contrib/ffmpeg/libavformat/base64.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Base64.c
- * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
-* @file base64.c
- * @brief Base64 Encode/Decode
- * @author Ryan Martell <rdm4@martellventures.com> (with lots of Michael)
- */
-
-#include "common.h"
-#include "base64.h"
-
-/* ---------------- private code */
-static uint8_t map2[] =
-{
- 0x3e, 0xff, 0xff, 0xff, 0x3f, 0x34, 0x35, 0x36,
- 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
- 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
- 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1a, 0x1b,
- 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
- 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
- 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33
-};
-
-int av_base64_decode(uint8_t * out, const char *in, int out_length)
-{
- int i, v;
- uint8_t *dst = out;
-
- v = 0;
- for (i = 0; in[i] && in[i] != '='; i++) {
- unsigned int index= in[i]-43;
- if (index>=(sizeof(map2)/sizeof(map2[0])) || map2[index] == 0xff)
- return -1;
- v = (v << 6) + map2[index];
- if (i & 3) {
- if (dst - out < out_length) {
- *dst++ = v >> (6 - 2 * (i & 3));
- }
- }
- }
-
- return (dst - out);
-}
-
-/*****************************************************************************
-* b64_encode: stolen from VLC's http.c
-* simplified by michael
-* fixed edge cases and made it work from data (vs. strings) by ryan.
-*****************************************************************************/
-
-char *av_base64_encode(uint8_t * src, int len)
-{
- static const char b64[] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
- char *ret, *dst;
- unsigned i_bits = 0;
- int i_shift = 0;
- int bytes_remaining = len;
-
- if (len < UINT_MAX / 4) {
- ret = dst = av_malloc(len * 4 / 3 + 12);
- } else
- return NULL;
-
- if (len) { // special edge case, what should we really do here?
- while (bytes_remaining) {
- i_bits = (i_bits << 8) + *src++;
- bytes_remaining--;
- i_shift += 8;
-
- do {
- *dst++ = b64[(i_bits << 6 >> i_shift) & 0x3f];
- i_shift -= 6;
- } while (i_shift > 6 || (bytes_remaining == 0 && i_shift > 0));
- }
- while ((dst - ret) & 3)
- *dst++ = '=';
- }
- *dst = '\0';
-
- return ret;
-}
-
-// #define TEST_BASE64
-
-#ifdef TEST_BASE64
-#include "avutil.h"
-
-int b64test()
-{
- int numerr = 0;
- int len;
- int numtest = 1;
- uint8_t decode[1000];
- struct test {
- void *data;
- int len;
- const char *result;
- } *t, tests[] = {
- {
- "", 0, ""}, {
- "1", 1, "MQ=="}, {
- "22", 2, "MjI="}, {
- "333", 3, "MzMz"}, {
- "4444", 4, "NDQ0NA=="}, {
- "55555", 5, "NTU1NTU="}, {
- "abc:def", 7, "YWJjOmRlZg=="}, {
- NULL}
- };
- for (t = tests; t->data; t++) {
- char *str;
-
- av_log(NULL, AV_LOG_ERROR, "Encoding %s...\n", (char *) t->data);
- str = av_base64_encode(t->data, t->len);
- if (str) {
- av_log(NULL, AV_LOG_ERROR, "Encoded to %s...\n", str);
- if (strcmp(str, t->result) != 0) {
- av_log(NULL, AV_LOG_ERROR, "failed test %d: %s != %s\n",
- numtest, str, t->result);
- numerr++;
- }
- av_free(str);
- }
-
- av_log(NULL, AV_LOG_ERROR, "Done encoding, about to decode...\n");
- len = av_base64_decode(decode, t->result, sizeof(decode));
- if (len != t->len) {
- av_log(NULL, AV_LOG_ERROR, "failed test %d: len %d != %d\n",
- numtest, len, t->len);
- numerr++;
- } else if (memcmp(decode, t->data, t->len) != 0) {
- av_log(NULL, AV_LOG_ERROR, "failed test %d: data\n", numtest);
- numerr++;
- } else {
- av_log(NULL, AV_LOG_ERROR, "Decoded to %s\n",
- (char *) t->data);
- }
- numtest++;
- }
-
-#undef srand
-#undef rand
-
- {
- int test_count;
- srand(123141); // time(NULL));
- for (test_count = 0; test_count < 100; test_count++) {
- int size = rand() % 1024;
- int ii;
- uint8_t *data;
- char *encoded_result;
-
- av_log(NULL, AV_LOG_ERROR, "Test %d: Size %d bytes...",
- test_count, size);
- data = (uint8_t *) av_malloc(size);
- for (ii = 0; ii < size; ii++) {
- data[ii] = rand() % 255;
- }
-
- encoded_result = av_base64_encode(data, size);
- if (encoded_result) {
- int decode_buffer_size = size + 10; // try without 10 as well
- uint8_t *decode_buffer = av_malloc(decode_buffer_size);
- if (decode_buffer) {
- int decoded_size =
- av_base64_decode(decode_buffer, encoded_result,
- decode_buffer_size);
-
- if (decoded_size != size) {
- av_log(NULL, AV_LOG_ERROR,
- "Decoded/Encoded size mismatch (%d != %d)\n",
- decoded_size, size);
- } else {
- if (memcmp(decode_buffer, data, decoded_size) == 0) {
- av_log(NULL, AV_LOG_ERROR, "Passed!\n");
- } else {
- av_log(NULL, AV_LOG_ERROR,
- "Failed (Data differs)!\n");
- }
- }
- av_free(decode_buffer);
- }
-
- av_free(encoded_result);
- }
- }
- }
-
- // these are invalid strings, that it currently decodes (which it probably shouldn't?)
- {
- uint8_t str[32];
- if (av_base64_decode(str, "M=M=", sizeof(str)) != -1) {
- av_log(NULL, AV_LOG_ERROR,
- "failed test %d: successful decode of `M=M='\n",
- numtest++);
- numerr++;
- }
- if (av_base64_decode(str, "MQ===", sizeof(str)) != -1) {
- av_log(NULL, AV_LOG_ERROR,
- "failed test %d: successful decode of `MQ==='\n",
- numtest++);
- numerr++;
- }
- }
-
- return numerr;
-}
-#endif
-
diff --git a/contrib/ffmpeg/libavformat/beosaudio.cpp b/contrib/ffmpeg/libavformat/beosaudio.cpp
index 6ac45ebb2..6c16f0048 100644
--- a/contrib/ffmpeg/libavformat/beosaudio.cpp
+++ b/contrib/ffmpeg/libavformat/beosaudio.cpp
@@ -194,15 +194,15 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
#ifndef HAVE_BSOUNDRECORDER
if (!is_output)
- return -EIO; /* not for now */
+ return AVERROR(EIO); /* not for now */
#endif
s->input_sem = create_sem(AUDIO_BUFFER_SIZE, "ffmpeg_ringbuffer_input");
if (s->input_sem < B_OK)
- return -EIO;
+ return AVERROR(EIO);
s->output_sem = create_sem(0, "ffmpeg_ringbuffer_output");
if (s->output_sem < B_OK) {
delete_sem(s->input_sem);
- return -EIO;
+ return AVERROR(EIO);
}
s->input_index = 0;
s->output_index = 0;
@@ -226,7 +226,7 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
delete_sem(s->input_sem);
if (s->output_sem)
delete_sem(s->output_sem);
- return -EIO;
+ return AVERROR(EIO);
}
s->codec_id = (iformat.byte_order == B_MEDIA_LITTLE_ENDIAN)?CODEC_ID_PCM_S16LE:CODEC_ID_PCM_S16BE;
s->channels = iformat.channel_count;
@@ -252,7 +252,7 @@ static int audio_open(AudioData *s, int is_output, const char *audio_device)
delete_sem(s->input_sem);
if (s->output_sem)
delete_sem(s->output_sem);
- return -EIO;
+ return AVERROR(EIO);
}
s->player->SetCookie(s);
s->player->SetVolume(1.0);
@@ -293,7 +293,7 @@ static int audio_write_header(AVFormatContext *s1)
s->channels = st->codec->channels;
ret = audio_open(s, 1, NULL);
if (ret < 0)
- return -EIO;
+ return AVERROR(EIO);
return 0;
}
@@ -315,7 +315,7 @@ lat1 = s->player->Latency();
int amount;
len = MIN(size, AUDIO_BLOCK_SIZE);
if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK)
- return -EIO;
+ return AVERROR(EIO);
amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
memcpy(&s->buffer[s->input_index], buf, amount);
s->input_index += amount;
@@ -356,15 +356,15 @@ static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st = av_new_stream(s1, 0);
if (!st) {
- return -ENOMEM;
+ return AVERROR(ENOMEM);
}
s->sample_rate = ap->sample_rate;
s->channels = ap->channels;
- ret = audio_open(s, 0, ap->device);
+ ret = audio_open(s, 0, s1->filename);
if (ret < 0) {
av_free(st);
- return -EIO;
+ return AVERROR(EIO);
}
/* take real parameters */
st->codec->codec_type = CODEC_TYPE_AUDIO;
@@ -384,7 +384,7 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
status_t err;
if (av_new_packet(pkt, s->frame_size) < 0)
- return -EIO;
+ return AVERROR(EIO);
buf = (unsigned char *)pkt->data;
size = pkt->size;
while (size > 0) {
@@ -393,7 +393,7 @@ static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
while ((err=acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL)) == B_INTERRUPTED);
if (err < B_OK) {
av_free_packet(pkt);
- return -EIO;
+ return AVERROR(EIO);
}
amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
memcpy(buf, &s->buffer[s->output_index], amount);
diff --git a/contrib/ffmpeg/libavformat/dc1394.c b/contrib/ffmpeg/libavformat/dc1394.c
index 5098c0fdf..bf140c466 100644
--- a/contrib/ffmpeg/libavformat/dc1394.c
+++ b/contrib/ffmpeg/libavformat/dc1394.c
@@ -42,7 +42,7 @@ struct dc1394_frame_format {
int frame_size_id;
} dc1394_frame_formats[] = {
{ 320, 240, PIX_FMT_UYVY422, MODE_320x240_YUV422 },
- { 640, 480, PIX_FMT_UYVY411, MODE_640x480_YUV411 },
+ { 640, 480, PIX_FMT_UYYVYY411, MODE_640x480_YUV411 },
{ 640, 480, PIX_FMT_UYVY422, MODE_640x480_YUV422 },
{ 0, 0, 0, MODE_320x240_YUV422 } /* default -- gotta be the last one */
};
@@ -118,7 +118,7 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
fmt->frame_size_id,
SPEED_400,
fps->frame_rate_id, 8, 1,
- ap->device,
+ c->filename,
&dc1394->camera);
dc1394_free_camera_nodes(camera_nodes);
if (res != DC1394_SUCCESS) {
diff --git a/contrib/ffmpeg/libavformat/dsicin.c b/contrib/ffmpeg/libavformat/dsicin.c
index f274eadf8..fb9cb50df 100644
--- a/contrib/ffmpeg/libavformat/dsicin.c
+++ b/contrib/ffmpeg/libavformat/dsicin.c
@@ -62,11 +62,11 @@ static int cin_probe(AVProbeData *p)
return 0;
/* header starts with this special marker */
- if (LE_32(&p->buf[0]) != 0x55AA0000)
+ if (AV_RL32(&p->buf[0]) != 0x55AA0000)
return 0;
/* for accuracy, check some header field values */
- if (LE_32(&p->buf[12]) != 22050 || p->buf[16] != 16 || p->buf[17] != 0)
+ if (AV_RL32(&p->buf[12]) != 22050 || p->buf[16] != 16 || p->buf[17] != 0)
return 0;
return AVPROBE_SCORE_MAX;
diff --git a/contrib/ffmpeg/libavformat/dv.c b/contrib/ffmpeg/libavformat/dv.c
index 3ff8a3fe2..10a306260 100644
--- a/contrib/ffmpeg/libavformat/dv.c
+++ b/contrib/ffmpeg/libavformat/dv.c
@@ -358,8 +358,13 @@ static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c,
return offset;
}
-void dv_flush_audio_packets(DVDemuxContext *c)
+void dv_offset_reset(DVDemuxContext *c, int64_t frame_offset)
{
+ c->frames= frame_offset;
+ if (c->ach)
+ c->abytes= av_rescale(c->frames,
+ c->ast[0]->codec->bit_rate * (int64_t)c->sys->frame_rate_base,
+ 8*c->sys->frame_rate);
c->audio_pkt[0].size = c->audio_pkt[1].size = 0;
}
@@ -419,13 +424,8 @@ static int dv_read_seek(AVFormatContext *s, int stream_index,
DVDemuxContext *c = r->dv_demux;
int64_t offset= dv_frame_offset(s, c, timestamp, flags);
- c->frames= offset / c->sys->frame_size;
- if (c->ach)
- c->abytes= av_rescale(c->frames,
- c->ast[0]->codec->bit_rate * (int64_t)c->sys->frame_rate_base,
- 8*c->sys->frame_rate);
+ dv_offset_reset(c, offset / c->sys->frame_size);
- dv_flush_audio_packets(c);
return url_fseek(&s->pb, offset, SEEK_SET);
}
diff --git a/contrib/ffmpeg/libavformat/dv.h b/contrib/ffmpeg/libavformat/dv.h
index f39d22c9f..2fa30036c 100644
--- a/contrib/ffmpeg/libavformat/dv.h
+++ b/contrib/ffmpeg/libavformat/dv.h
@@ -29,7 +29,7 @@ typedef struct DVDemuxContext DVDemuxContext;
DVDemuxContext* dv_init_demux(AVFormatContext* s);
int dv_get_packet(DVDemuxContext*, AVPacket *);
int dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int);
-void dv_flush_audio_packets(DVDemuxContext*);
+void dv_offset_reset(DVDemuxContext *c, int64_t frame_offset);
typedef struct DVMuxContext DVMuxContext;
DVMuxContext* dv_init_mux(AVFormatContext* s);
diff --git a/contrib/ffmpeg/libavformat/dv1394.c b/contrib/ffmpeg/libavformat/dv1394.c
index f00d47435..3a5f479c8 100644
--- a/contrib/ffmpeg/libavformat/dv1394.c
+++ b/contrib/ffmpeg/libavformat/dv1394.c
@@ -40,7 +40,7 @@ struct dv1394_data {
int channel;
int format;
- void *ring; /* Ring buffer */
+ uint8_t *ring; /* Ring buffer */
int index; /* Current frame index */
int avail; /* Number of frames available for reading */
int done; /* Number of completed frames */
@@ -83,7 +83,6 @@ static int dv1394_start(struct dv1394_data *dv)
static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
{
struct dv1394_data *dv = context->priv_data;
- const char *video_device;
dv->dv_demux = dv_init_demux(context);
if (!dv->dv_demux)
@@ -100,10 +99,7 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap
dv->channel = DV1394_DEFAULT_CHANNEL;
/* Open and initialize DV1394 device */
- video_device = ap->device;
- if (!video_device)
- video_device = "/dev/dv1394/0";
- dv->fd = open(video_device, O_RDONLY);
+ dv->fd = open(context->filename, O_RDONLY);
if (dv->fd < 0) {
perror("Failed to open DV interface");
goto failed;
diff --git a/contrib/ffmpeg/libavformat/dvenc.c b/contrib/ffmpeg/libavformat/dvenc.c
index 79cee7af6..bdac43784 100644
--- a/contrib/ffmpeg/libavformat/dvenc.c
+++ b/contrib/ffmpeg/libavformat/dvenc.c
@@ -28,6 +28,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <time.h>
+#include <stdarg.h>
#include "avformat.h"
#include "dvdata.h"
#include "dv.h"
@@ -66,11 +67,12 @@ static int dv_audio_frame_size(const DVprofile* sys, int frame)
sizeof(sys->audio_samples_dist[0]))];
}
-static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf)
+static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf, ...)
{
struct tm tc;
time_t ct;
int ltc_frame;
+ va_list ap;
buf[0] = (uint8_t)pack_id;
switch (pack_id) {
@@ -99,7 +101,8 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
(tc.tm_hour % 10); /* Units of hours */
break;
case dv_audio_source: /* AAUX source pack */
- buf[1] = (0 << 7) | /* locked mode */
+ va_start(ap, buf);
+ buf[1] = (1 << 7) | /* locked mode -- SMPTE only supports locked mode */
(1 << 6) | /* reserved -- always 1 */
(dv_audio_frame_size(c->sys, c->frames) -
c->sys->audio_min_samples[0]);
@@ -107,7 +110,7 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
buf[2] = (0 << 7) | /* multi-stereo */
(0 << 5) | /* #of audio channels per block: 0 -- 1 channel */
(0 << 4) | /* pair bit: 0 -- one pair of channels */
- 0; /* audio mode */
+ !!va_arg(ap, int); /* audio mode */
buf[3] = (1 << 7) | /* res */
(1 << 6) | /* multi-language flag */
(c->sys->dsf << 5) | /* system: 60fields/50fields */
@@ -116,6 +119,7 @@ static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* bu
(0 << 6) | /* emphasis time constant: 0 -- reserved */
(0 << 3) | /* frequency: 0 -- 48Khz, 1 -- 44,1Khz, 2 -- 32Khz */
0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */
+ va_end(ap);
break;
case dv_audio_control:
buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */
@@ -179,7 +183,7 @@ static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr)
for (i = 0; i < c->sys->difseg_size; i++) {
frame_ptr += 6 * 80; /* skip DIF segment header */
for (j = 0; j < 9; j++) {
- dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3]);
+ dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3], i >= c->sys->difseg_size/2);
for (d = 8; d < 80; d+=2) {
of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride;
if (of*2 >= size)
diff --git a/contrib/ffmpeg/libavformat/dxa.c b/contrib/ffmpeg/libavformat/dxa.c
new file mode 100644
index 000000000..f49d3d4ac
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dxa.c
@@ -0,0 +1,214 @@
+/*
+ * DXA demuxer
+ * Copyright (c) 2007 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "riff.h"
+
+#define DXA_EXTRA_SIZE 9
+
+typedef struct{
+ int frames;
+ int has_sound;
+ int bpc;
+ uint32_t bytes_left;
+ int64_t wavpos, vidpos;
+ int readvid;
+}DXAContext;
+
+static int dxa_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 4)
+ return 0;
+ if (p->buf[0] == 'D' && p->buf[1] == 'E' &&
+ p->buf[2] == 'X' && p->buf[3] == 'A')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int dxa_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ DXAContext *c = s->priv_data;
+ AVStream *st, *ast;
+ uint32_t tag;
+ int32_t fps;
+ int w, h;
+ int num, den;
+ int flags;
+
+ tag = get_le32(pb);
+ if (tag != MKTAG('D', 'E', 'X', 'A'))
+ return -1;
+ flags = get_byte(pb);
+ c->frames = get_be16(pb);
+ if(!c->frames){
+ av_log(s, AV_LOG_ERROR, "File contains no frames ???\n");
+ return -1;
+ }
+
+ fps = get_be32(pb);
+ if(fps > 0){
+ den = 1000;
+ num = fps;
+ }else if (fps < 0){
+ den = 100000;
+ num = -fps;
+ }else{
+ den = 10;
+ num = 1;
+ }
+ w = get_be16(pb);
+ h = get_be16(pb);
+ c->has_sound = 0;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+
+ // Parse WAV data header
+ if(get_le32(pb) == MKTAG('W', 'A', 'V', 'E')){
+ uint32_t size, fsize;
+ c->has_sound = 1;
+ size = get_be32(pb);
+ c->vidpos = url_ftell(pb) + size;
+ url_fskip(pb, 16);
+ fsize = get_le32(pb);
+
+ ast = av_new_stream(s, 0);
+ if (!ast)
+ return -1;
+ get_wav_header(pb, ast->codec, fsize);
+ // find 'data' chunk
+ while(url_ftell(pb) < c->vidpos && !url_feof(pb)){
+ tag = get_le32(pb);
+ fsize = get_le32(pb);
+ if(tag == MKTAG('d', 'a', 't', 'a')) break;
+ url_fskip(pb, fsize);
+ }
+ c->bpc = (fsize + c->frames - 1) / c->frames;
+ if(ast->codec->block_align)
+ c->bpc = ((c->bpc + ast->codec->block_align - 1) / ast->codec->block_align) * ast->codec->block_align;
+ c->bytes_left = fsize;
+ c->wavpos = url_ftell(pb);
+ url_fseek(pb, c->vidpos, SEEK_SET);
+ }
+
+ /* now we are ready: build format streams */
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DXA;
+ st->codec->width = w;
+ st->codec->height = h;
+ av_reduce(&den, &num, den, num, (1UL<<31)-1);
+ av_set_pts_info(st, 33, num, den);
+ /* flags & 0x80 means that image is interlaced,
+ * flags & 0x40 means that image has double height
+ * either way set true height
+ */
+ if(flags & 0xC0){
+ st->codec->height >>= 1;
+ }
+ c->readvid = !c->has_sound;
+ c->vidpos = url_ftell(pb);
+ s->start_time = 0;
+ s->duration = (int64_t)c->frames * AV_TIME_BASE * num / den;
+ av_log(s, AV_LOG_DEBUG, "%d frame(s)\n",c->frames);
+
+ return 0;
+}
+
+static int dxa_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ DXAContext *c = s->priv_data;
+ int ret;
+ uint32_t size;
+ uint8_t buf[DXA_EXTRA_SIZE], pal[768+4];
+ int pal_size = 0;
+
+ if(!c->readvid && c->has_sound && c->bytes_left){
+ c->readvid = 1;
+ url_fseek(&s->pb, c->wavpos, SEEK_SET);
+ size = FFMIN(c->bytes_left, c->bpc);
+ ret = av_get_packet(&s->pb, pkt, size);
+ pkt->stream_index = 1;
+ if(ret != size)
+ return AVERROR_IO;
+ c->bytes_left -= size;
+ c->wavpos = url_ftell(&s->pb);
+ return 0;
+ }
+ url_fseek(&s->pb, c->vidpos, SEEK_SET);
+ while(!url_feof(&s->pb) && c->frames){
+ get_buffer(&s->pb, buf, 4);
+ switch(AV_RL32(buf)){
+ case MKTAG('N', 'U', 'L', 'L'):
+ if(av_new_packet(pkt, 4 + pal_size) < 0)
+ return AVERROR_NOMEM;
+ pkt->stream_index = 0;
+ if(pal_size) memcpy(pkt->data, pal, pal_size);
+ memcpy(pkt->data + pal_size, buf, 4);
+ c->frames--;
+ c->vidpos = url_ftell(&s->pb);
+ c->readvid = 0;
+ return 0;
+ case MKTAG('C', 'M', 'A', 'P'):
+ pal_size = 768+4;
+ memcpy(pal, buf, 4);
+ get_buffer(&s->pb, pal + 4, 768);
+ break;
+ case MKTAG('F', 'R', 'A', 'M'):
+ get_buffer(&s->pb, buf + 4, DXA_EXTRA_SIZE - 4);
+ size = AV_RB32(buf + 5);
+ if(size > 0xFFFFFF){
+ av_log(s, AV_LOG_ERROR, "Frame size is too big: %d\n", size);
+ return -1;
+ }
+ if(av_new_packet(pkt, size + DXA_EXTRA_SIZE + pal_size) < 0)
+ return AVERROR_NOMEM;
+ memcpy(pkt->data + pal_size, buf, DXA_EXTRA_SIZE);
+ ret = get_buffer(&s->pb, pkt->data + DXA_EXTRA_SIZE + pal_size, size);
+ if(ret != size){
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ if(pal_size) memcpy(pkt->data, pal, pal_size);
+ pkt->stream_index = 0;
+ c->frames--;
+ c->vidpos = url_ftell(&s->pb);
+ c->readvid = 0;
+ return 0;
+ default:
+ av_log(s, AV_LOG_ERROR, "Unknown tag %c%c%c%c\n", buf[0], buf[1], buf[2], buf[3]);
+ return -1;
+ }
+ }
+ return AVERROR(EIO);
+}
+
+AVInputFormat dxa_demuxer = {
+ "dxa",
+ "dxa",
+ sizeof(DXAContext),
+ dxa_probe,
+ dxa_read_header,
+ dxa_read_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/electronicarts.c b/contrib/ffmpeg/libavformat/electronicarts.c
index 943f75b42..762d658ab 100644
--- a/contrib/ffmpeg/libavformat/electronicarts.c
+++ b/contrib/ffmpeg/libavformat/electronicarts.c
@@ -168,7 +168,7 @@ static int ea_probe(AVProbeData *p)
if (p->buf_size < 4)
return 0;
- if (LE_32(&p->buf[0]) != SCHl_TAG)
+ if (AV_RL32(&p->buf[0]) != SCHl_TAG)
return 0;
return AVPROBE_SCORE_MAX;
@@ -230,8 +230,8 @@ static int ea_read_packet(AVFormatContext *s,
if (get_buffer(pb, preamble, EA_PREAMBLE_SIZE) != EA_PREAMBLE_SIZE)
return AVERROR_IO;
- chunk_type = LE_32(&preamble[0]);
- chunk_size = LE_32(&preamble[4]) - EA_PREAMBLE_SIZE;
+ chunk_type = AV_RL32(&preamble[0]);
+ chunk_size = AV_RL32(&preamble[4]) - EA_PREAMBLE_SIZE;
switch (chunk_type) {
/* audio data */
diff --git a/contrib/ffmpeg/libavformat/ffm.c b/contrib/ffmpeg/libavformat/ffm.c
index 539b45d5f..a2970ae42 100644
--- a/contrib/ffmpeg/libavformat/ffm.c
+++ b/contrib/ffmpeg/libavformat/ffm.c
@@ -201,6 +201,7 @@ static int ffm_write_header(AVFormatContext *s)
put_be32(pb, codec->nsse_weight);
put_be32(pb, codec->frame_skip_cmp);
put_be64(pb, av_dbl2int(codec->rc_buffer_aggressivity));
+ put_be32(pb, codec->codec_tag);
break;
case CODEC_TYPE_AUDIO:
put_be32(pb, codec->sample_rate);
@@ -469,7 +470,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
ffm->file_size = url_fsize(pb);
adjust_write_index(s);
} else {
- ffm->file_size = (uint64_t_C(1) << 63) - 1;
+ ffm->file_size = (UINT64_C(1) << 63) - 1;
}
nb_streams = get_be32(pb);
@@ -534,6 +535,7 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
codec->nsse_weight = get_be32(pb);
codec->frame_skip_cmp = get_be32(pb);
codec->rc_buffer_aggressivity = av_int2dbl(get_be64(pb));
+ codec->codec_tag = get_be32(pb);
break;
case CODEC_TYPE_AUDIO:
codec->sample_rate = get_be32(pb);
@@ -579,7 +581,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
switch(ffm->read_state) {
case READ_HEADER:
if (!ffm_is_avail_data(s, FRAME_HEADER_SIZE)) {
- return -EAGAIN;
+ return AVERROR(EAGAIN);
}
#if 0
printf("pos=%08"PRIx64" spos=%"PRIx64", write_index=%"PRIx64" size=%"PRIx64"\n",
@@ -587,7 +589,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
#endif
if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
FRAME_HEADER_SIZE)
- return -EAGAIN;
+ return AVERROR(EAGAIN);
#if 0
{
int i;
@@ -601,7 +603,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
case READ_DATA:
size = (ffm->header[2] << 16) | (ffm->header[3] << 8) | ffm->header[4];
if (!ffm_is_avail_data(s, size)) {
- return -EAGAIN;
+ return AVERROR(EAGAIN);
}
duration = (ffm->header[5] << 16) | (ffm->header[6] << 8) | ffm->header[7];
@@ -616,7 +618,7 @@ static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
if (ffm_read_data(s, pkt->data, size, 0) != size) {
/* bad case: desynchronized packet. we cancel all the packet loading */
av_free_packet(pkt);
- return -EAGAIN;
+ return AVERROR(EAGAIN);
}
if (ffm->first_frame_in_packet)
{
diff --git a/contrib/ffmpeg/libavformat/file.c b/contrib/ffmpeg/libavformat/file.c
index db671698f..3caf80a61 100644
--- a/contrib/ffmpeg/libavformat/file.c
+++ b/contrib/ffmpeg/libavformat/file.c
@@ -20,14 +20,8 @@
*/
#include "avformat.h"
#include <fcntl.h>
-#ifndef __MINGW32__
#include <unistd.h>
-#include <sys/ioctl.h>
#include <sys/time.h>
-#else
-#include <io.h>
-#define open(fname,oflag,pmode) _open(fname,oflag,pmode)
-#endif /* __MINGW32__ */
/* standard file protocol */
@@ -51,7 +45,7 @@ static int file_open(URLContext *h, const char *filename, int flags)
#endif
fd = open(filename, access, 0666);
if (fd < 0)
- return -ENOENT;
+ return AVERROR(ENOENT);
h->priv_data = (void *)(size_t)fd;
return 0;
}
@@ -72,11 +66,7 @@ static int file_write(URLContext *h, unsigned char *buf, int size)
static offset_t file_seek(URLContext *h, offset_t pos, int whence)
{
int fd = (size_t)h->priv_data;
-#if defined(__MINGW32__)
- return _lseeki64(fd, pos, whence);
-#else
return lseek(fd, pos, whence);
-#endif
}
static int file_close(URLContext *h)
diff --git a/contrib/ffmpeg/libavformat/flic.c b/contrib/ffmpeg/libavformat/flic.c
index ac32e7392..0c3a7f01f 100644
--- a/contrib/ffmpeg/libavformat/flic.c
+++ b/contrib/ffmpeg/libavformat/flic.c
@@ -58,7 +58,7 @@ static int flic_probe(AVProbeData *p)
if (p->buf_size < 6)
return 0;
- magic_number = LE_16(&p->buf[4]);
+ magic_number = AV_RL16(&p->buf[4]);
if ((magic_number != FLIC_FILE_MAGIC_1) &&
(magic_number != FLIC_FILE_MAGIC_2) &&
(magic_number != FLIC_FILE_MAGIC_3))
@@ -83,8 +83,8 @@ static int flic_read_header(AVFormatContext *s,
if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
return AVERROR_IO;
- magic_number = LE_16(&header[4]);
- speed = LE_32(&header[0x10]);
+ magic_number = AV_RL16(&header[4]);
+ speed = AV_RL32(&header[0x10]);
/* initialize the decoder streams */
st = av_new_stream(s, 0);
@@ -94,8 +94,8 @@ static int flic_read_header(AVFormatContext *s,
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_FLIC;
st->codec->codec_tag = 0; /* no fourcc */
- st->codec->width = LE_16(&header[0x08]);
- st->codec->height = LE_16(&header[0x0A]);
+ st->codec->width = AV_RL16(&header[0x08]);
+ st->codec->height = AV_RL16(&header[0x0A]);
if (!st->codec->width || !st->codec->height)
return AVERROR_INVALIDDATA;
@@ -110,7 +110,7 @@ static int flic_read_header(AVFormatContext *s,
/* Time to figure out the framerate: If there is a FLIC chunk magic
* number at offset 0x10, assume this is from the Bullfrog game,
* Magic Carpet. */
- if (LE_16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
+ if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
flic->frame_pts_inc = FLIC_MC_PTS_INC;
@@ -146,8 +146,10 @@ static int flic_read_header(AVFormatContext *s,
* therefore, the frame pts increment = n * 90
*/
flic->frame_pts_inc = speed * 90;
- } else
+ } else {
+ av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
return AVERROR_INVALIDDATA;
+ }
if (flic->frame_pts_inc == 0)
flic->frame_pts_inc = FLIC_DEFAULT_PTS_INC;
@@ -174,8 +176,8 @@ static int flic_read_packet(AVFormatContext *s,
break;
}
- size = LE_32(&preamble[0]);
- magic = LE_16(&preamble[4]);
+ size = AV_RL32(&preamble[0]);
+ magic = AV_RL16(&preamble[4]);
if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
if (av_new_packet(pkt, size)) {
diff --git a/contrib/ffmpeg/libavformat/flv.h b/contrib/ffmpeg/libavformat/flv.h
new file mode 100644
index 000000000..1484ac15d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flv.h
@@ -0,0 +1,110 @@
+/**
+ * @file flv.h
+ * FLV common header
+ *
+ * Copyright (c) 2006 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FLV_H
+#define FLV_H
+
+/* offsets for packed values */
+#define FLV_AUDIO_SAMPLESSIZE_OFFSET 1
+#define FLV_AUDIO_SAMPLERATE_OFFSET 2
+#define FLV_AUDIO_CODECID_OFFSET 4
+
+#define FLV_VIDEO_FRAMETYPE_OFFSET 4
+
+/* bitmasks to isolate specific values */
+#define FLV_AUDIO_CHANNEL_MASK 0x01
+#define FLV_AUDIO_SAMPLESIZE_MASK 0x02
+#define FLV_AUDIO_SAMPLERATE_MASK 0x0c
+#define FLV_AUDIO_CODECID_MASK 0xf0
+
+#define FLV_VIDEO_CODECID_MASK 0x0f
+#define FLV_VIDEO_FRAMETYPE_MASK 0xf0
+
+#define AMF_END_OF_OBJECT 0x09
+
+enum {
+ FLV_HEADER_FLAG_HASVIDEO = 1,
+ FLV_HEADER_FLAG_HASAUDIO = 4,
+};
+
+enum {
+ FLV_TAG_TYPE_AUDIO = 0x08,
+ FLV_TAG_TYPE_VIDEO = 0x09,
+ FLV_TAG_TYPE_META = 0x12,
+};
+
+enum {
+ FLV_MONO = 0,
+ FLV_STEREO = 1,
+};
+
+enum {
+ FLV_SAMPLESSIZE_8BIT = 0,
+ FLV_SAMPLESSIZE_16BIT = 1 << FLV_AUDIO_SAMPLESSIZE_OFFSET,
+};
+
+enum {
+ FLV_SAMPLERATE_SPECIAL = 0, /**< signifies 5512Hz and 8000Hz in the case of NELLYMOSER */
+ FLV_SAMPLERATE_11025HZ = 1 << FLV_AUDIO_SAMPLERATE_OFFSET,
+ FLV_SAMPLERATE_22050HZ = 2 << FLV_AUDIO_SAMPLERATE_OFFSET,
+ FLV_SAMPLERATE_44100HZ = 3 << FLV_AUDIO_SAMPLERATE_OFFSET,
+};
+
+enum {
+ FLV_CODECID_PCM_BE = 0,
+ FLV_CODECID_ADPCM = 1 << FLV_AUDIO_CODECID_OFFSET,
+ FLV_CODECID_MP3 = 2 << FLV_AUDIO_CODECID_OFFSET,
+ FLV_CODECID_PCM_LE = 3 << FLV_AUDIO_CODECID_OFFSET,
+ FLV_CODECID_NELLYMOSER_8HZ_MONO = 5 << FLV_AUDIO_CODECID_OFFSET,
+ FLV_CODECID_NELLYMOSER = 6 << FLV_AUDIO_CODECID_OFFSET,
+};
+
+enum {
+ FLV_CODECID_H263 = 2,
+ FLV_CODECID_SCREEN = 3,
+ FLV_CODECID_VP6 = 4,
+ FLV_CODECID_VP6A = 5,
+ FLV_CODECID_SCREEN2 = 6,
+};
+
+enum {
+ FLV_FRAME_KEY = 1 << FLV_VIDEO_FRAMETYPE_OFFSET,
+ FLV_FRAME_INTER = 2 << FLV_VIDEO_FRAMETYPE_OFFSET,
+ FLV_FRAME_DISP_INTER = 3 << FLV_VIDEO_FRAMETYPE_OFFSET,
+};
+
+typedef enum {
+ AMF_DATA_TYPE_NUMBER = 0x00,
+ AMF_DATA_TYPE_BOOL = 0x01,
+ AMF_DATA_TYPE_STRING = 0x02,
+ AMF_DATA_TYPE_OBJECT = 0x03,
+ AMF_DATA_TYPE_NULL = 0x05,
+ AMF_DATA_TYPE_UNDEFINED = 0x06,
+ AMF_DATA_TYPE_REFERENCE = 0x07,
+ AMF_DATA_TYPE_MIXEDARRAY = 0x08,
+ AMF_DATA_TYPE_ARRAY = 0x0a,
+ AMF_DATA_TYPE_DATE = 0x0b,
+ AMF_DATA_TYPE_UNSUPPORTED = 0x0d,
+} AMFDataType;
+
+#endif /* FLV_H */
diff --git a/contrib/ffmpeg/libavformat/flvdec.c b/contrib/ffmpeg/libavformat/flvdec.c
index a1c2aa4eb..bf91fbbc7 100644
--- a/contrib/ffmpeg/libavformat/flvdec.c
+++ b/contrib/ffmpeg/libavformat/flvdec.c
@@ -25,6 +25,7 @@
* - lower 4bits: difference between encoded height and visible height
*/
#include "avformat.h"
+#include "flv.h"
static int flv_probe(AVProbeData *p)
{
@@ -33,34 +34,225 @@ static int flv_probe(AVProbeData *p)
if (p->buf_size < 6)
return 0;
d = p->buf;
- if (d[0] == 'F' && d[1] == 'L' && d[2] == 'V') {
- return 50;
+ if (d[0] == 'F' && d[1] == 'L' && d[2] == 'V' && d[3] < 5 && d[5]==0) {
+ return AVPROBE_SCORE_MAX;
}
return 0;
}
+static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream, int flv_codecid) {
+ AVCodecContext *acodec = astream->codec;
+ switch(flv_codecid) {
+ //no distinction between S16 and S8 PCM codec flags
+ case FLV_CODECID_PCM_BE:
+ acodec->codec_id = acodec->bits_per_sample == 8 ? CODEC_ID_PCM_S8 : CODEC_ID_PCM_S16BE; break;
+ case FLV_CODECID_PCM_LE:
+ acodec->codec_id = acodec->bits_per_sample == 8 ? CODEC_ID_PCM_S8 : CODEC_ID_PCM_S16LE; break;
+ case FLV_CODECID_ADPCM: acodec->codec_id = CODEC_ID_ADPCM_SWF; break;
+ case FLV_CODECID_MP3 : acodec->codec_id = CODEC_ID_MP3 ; astream->need_parsing = 1 ; break;
+ case FLV_CODECID_NELLYMOSER_8HZ_MONO:
+ acodec->sample_rate = 8000; //in case metadata does not otherwise declare samplerate
+ case FLV_CODECID_NELLYMOSER:
+ default:
+ av_log(s, AV_LOG_INFO, "Unsupported audio codec (%x)\n", flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
+ acodec->codec_tag = flv_codecid >> FLV_AUDIO_CODECID_OFFSET;
+ }
+}
+
+static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream, int flv_codecid) {
+ AVCodecContext *vcodec = vstream->codec;
+ switch(flv_codecid) {
+ case FLV_CODECID_H263 : vcodec->codec_id = CODEC_ID_FLV1 ; break;
+ case FLV_CODECID_SCREEN: vcodec->codec_id = CODEC_ID_FLASHSV; break;
+ case FLV_CODECID_VP6 : vcodec->codec_id = CODEC_ID_VP6F ;
+ if(vcodec->extradata_size != 1) {
+ vcodec->extradata_size = 1;
+ vcodec->extradata = av_malloc(1);
+ }
+ vcodec->extradata[0] = get_byte(&s->pb);
+ return 1; // 1 byte body size adjustment for flv_read_packet()
+ default:
+ av_log(s, AV_LOG_INFO, "Unsupported video codec (%x)\n", flv_codecid);
+ vcodec->codec_tag = flv_codecid;
+ }
+
+ return 0;
+}
+
+static int amf_get_string(ByteIOContext *ioc, char *buffer, int buffsize) {
+ int length = get_be16(ioc);
+ if(length >= buffsize) {
+ url_fskip(ioc, length);
+ return -1;
+ }
+
+ get_buffer(ioc, buffer, length);
+
+ buffer[length] = '\0';
+
+ return length;
+}
+
+static int amf_parse_object(AVFormatContext *s, AVStream *astream, AVStream *vstream, const char *key, unsigned int max_pos, int depth) {
+ AVCodecContext *acodec, *vcodec;
+ ByteIOContext *ioc;
+ AMFDataType amf_type;
+ char str_val[256];
+ double num_val;
+
+ num_val = 0;
+ ioc = &s->pb;
+
+ amf_type = get_byte(ioc);
+
+ switch(amf_type) {
+ case AMF_DATA_TYPE_NUMBER:
+ num_val = av_int2dbl(get_be64(ioc)); break;
+ case AMF_DATA_TYPE_BOOL:
+ num_val = get_byte(ioc); break;
+ case AMF_DATA_TYPE_STRING:
+ if(amf_get_string(ioc, str_val, sizeof(str_val)) < 0)
+ return -1;
+ break;
+ case AMF_DATA_TYPE_OBJECT: {
+ unsigned int keylen;
+
+ while(url_ftell(ioc) < max_pos - 2 && (keylen = get_be16(ioc))) {
+ url_fskip(ioc, keylen); //skip key string
+ if(amf_parse_object(s, NULL, NULL, NULL, max_pos, depth + 1) < 0)
+ return -1; //if we couldn't skip, bomb out.
+ }
+ if(get_byte(ioc) != AMF_END_OF_OBJECT)
+ return -1;
+ }
+ break;
+ case AMF_DATA_TYPE_NULL:
+ case AMF_DATA_TYPE_UNDEFINED:
+ case AMF_DATA_TYPE_UNSUPPORTED:
+ break; //these take up no additional space
+ case AMF_DATA_TYPE_MIXEDARRAY:
+ url_fskip(ioc, 4); //skip 32-bit max array index
+ while(url_ftell(ioc) < max_pos - 2 && amf_get_string(ioc, str_val, sizeof(str_val)) > 0) {
+ //this is the only case in which we would want a nested parse to not skip over the object
+ if(amf_parse_object(s, astream, vstream, str_val, max_pos, depth + 1) < 0)
+ return -1;
+ }
+ if(get_byte(ioc) != AMF_END_OF_OBJECT)
+ return -1;
+ break;
+ case AMF_DATA_TYPE_ARRAY: {
+ unsigned int arraylen, i;
+
+ arraylen = get_be32(ioc);
+ for(i = 0; i < arraylen && url_ftell(ioc) < max_pos - 1; i++) {
+ if(amf_parse_object(s, NULL, NULL, NULL, max_pos, depth + 1) < 0)
+ return -1; //if we couldn't skip, bomb out.
+ }
+ }
+ break;
+ case AMF_DATA_TYPE_DATE:
+ url_fskip(ioc, 8 + 2); //timestamp (double) and UTC offset (int16)
+ break;
+ default: //unsupported type, we couldn't skip
+ return -1;
+ }
+
+ if(depth == 1 && key) { //only look for metadata values when we are not nested and key != NULL
+ acodec = astream ? astream->codec : NULL;
+ vcodec = vstream ? vstream->codec : NULL;
+
+ if(amf_type == AMF_DATA_TYPE_BOOL) {
+ if(!strcmp(key, "stereo") && acodec) acodec->channels = num_val > 0 ? 2 : 1;
+ } else if(amf_type == AMF_DATA_TYPE_NUMBER) {
+ if(!strcmp(key, "duration")) s->duration = num_val * AV_TIME_BASE;
+// else if(!strcmp(key, "width") && vcodec && num_val > 0) vcodec->width = num_val;
+// else if(!strcmp(key, "height") && vcodec && num_val > 0) vcodec->height = num_val;
+ else if(!strcmp(key, "audiocodecid") && acodec) flv_set_audio_codec(s, astream, (int)num_val << FLV_AUDIO_CODECID_OFFSET);
+ else if(!strcmp(key, "videocodecid") && vcodec) flv_set_video_codec(s, vstream, (int)num_val);
+ else if(!strcmp(key, "audiosamplesize") && acodec && num_val >= 0) {
+ acodec->bits_per_sample = num_val;
+ //we may have to rewrite a previously read codecid because FLV only marks PCM endianness.
+ if(num_val == 8 && (acodec->codec_id == CODEC_ID_PCM_S16BE || acodec->codec_id == CODEC_ID_PCM_S16LE))
+ acodec->codec_id = CODEC_ID_PCM_S8;
+ }
+ else if(!strcmp(key, "audiosamplerate") && acodec && num_val >= 0) {
+ //some tools, like FLVTool2, write consistently approximate metadata sample rates
+ switch((int)num_val) {
+ case 44000: acodec->sample_rate = 44100 ; break;
+ case 22000: acodec->sample_rate = 22050 ; break;
+ case 11000: acodec->sample_rate = 11025 ; break;
+ case 5000 : acodec->sample_rate = 5512 ; break;
+ default : acodec->sample_rate = num_val;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int flv_read_metabody(AVFormatContext *s, unsigned int next_pos) {
+ AMFDataType type;
+ AVStream *stream, *astream, *vstream;
+ ByteIOContext *ioc;
+ int i, keylen;
+ char buffer[11]; //only needs to hold the string "onMetaData". Anything longer is something we don't want.
+
+ astream = NULL;
+ vstream = NULL;
+ keylen = 0;
+ ioc = &s->pb;
+
+ //first object needs to be "onMetaData" string
+ type = get_byte(ioc);
+ if(type != AMF_DATA_TYPE_STRING || amf_get_string(ioc, buffer, sizeof(buffer)) < 0 || strcmp(buffer, "onMetaData"))
+ return -1;
+
+ //find the streams now so that amf_parse_object doesn't need to do the lookup every time it is called.
+ for(i = 0; i < s->nb_streams; i++) {
+ stream = s->streams[i];
+ if (stream->codec->codec_type == CODEC_TYPE_AUDIO) astream = stream;
+ else if(stream->codec->codec_type == CODEC_TYPE_VIDEO) vstream = stream;
+ }
+
+ //parse the second object (we want a mixed array)
+ if(amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0)
+ return -1;
+
+ return 0;
+}
+
static int flv_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
- int offset, flags, size;
-
- s->ctx_flags |= AVFMTCTX_NOHEADER; //ok we have a header but theres no fps, codec type, sample_rate, ...
+ int offset, flags;
+ AVStream *st;
url_fskip(&s->pb, 4);
flags = get_byte(&s->pb);
+ /* old flvtool cleared this field */
+ /* FIXME: better fix needed */
+ if (!flags) {
+ flags = FLV_HEADER_FLAG_HASVIDEO | FLV_HEADER_FLAG_HASAUDIO;
+ av_log(s, AV_LOG_WARNING, "Broken FLV file, which says no streams present, this might fail\n");
+ }
- offset = get_be32(&s->pb);
-
- if(!url_is_streamed(&s->pb)){
- const int fsize= url_fsize(&s->pb);
- url_fseek(&s->pb, fsize-4, SEEK_SET);
- size= get_be32(&s->pb);
- url_fseek(&s->pb, fsize-3-size, SEEK_SET);
- if(size == get_be24(&s->pb) + 11){
- s->duration= get_be24(&s->pb) * (int64_t)AV_TIME_BASE / 1000;
- }
+ if(flags & FLV_HEADER_FLAG_HASVIDEO){
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ av_set_pts_info(st, 24, 1, 1000); /* 24 bit pts in ms */
+ }
+ if(flags & FLV_HEADER_FLAG_HASAUDIO){
+ st = av_new_stream(s, 1);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ av_set_pts_info(st, 24, 1, 1000); /* 24 bit pts in ms */
}
+ offset = get_be32(&s->pb);
url_fseek(&s->pb, offset, SEEK_SET);
s->start_time = 0;
@@ -90,55 +282,17 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
next= size + url_ftell(&s->pb);
- if (type == 8) {
+ if (type == FLV_TAG_TYPE_AUDIO) {
is_audio=1;
flags = get_byte(&s->pb);
- } else if (type == 9) {
+ } else if (type == FLV_TAG_TYPE_VIDEO) {
is_audio=0;
flags = get_byte(&s->pb);
- } else if (type == 18 && size > 13+1+4) {
- url_fskip(&s->pb, 13); //onMetaData blah
- if(get_byte(&s->pb) == 8){
- url_fskip(&s->pb, 4);
- }
- while(url_ftell(&s->pb) + 5 < next){
- char tmp[128];
- int type, len;
- double d= 0;
-
- len= get_be16(&s->pb);
- if(len >= sizeof(tmp) || !len)
- break;
- get_buffer(&s->pb, tmp, len);
- tmp[len]=0;
-
- type= get_byte(&s->pb);
- if(type==0){
- d= av_int2dbl(get_be64(&s->pb));
- }else if(type==2){
- len= get_be16(&s->pb);
- if(len >= sizeof(tmp))
- break;
- url_fskip(&s->pb, len);
- }else if(type==8){
- //array
- break;
- }else if(type==11){
- d= av_int2dbl(get_be64(&s->pb));
- get_be16(&s->pb);
- }
-
- if(!strcmp(tmp, "duration")){
- s->duration = d*AV_TIME_BASE;
- }else if(!strcmp(tmp, "videodatarate")){
- }else if(!strcmp(tmp, "audiodatarate")){
- }
- }
- url_fseek(&s->pb, next, SEEK_SET);
- continue;
} else {
- /* skip packet */
- av_log(s, AV_LOG_ERROR, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
+ if (type == FLV_TAG_TYPE_META && size > 13+1+4)
+ flv_read_metabody(s, next);
+ else /* skip packet */
+ av_log(s, AV_LOG_ERROR, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
url_fseek(&s->pb, next, SEEK_SET);
continue;
}
@@ -150,67 +304,49 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
break;
}
if(i == s->nb_streams){
- st = av_new_stream(s, is_audio);
- if (!st)
- return AVERROR_NOMEM;
-
- av_set_pts_info(st, 24, 1, 1000); /* 24 bit pts in ms */
- st->codec->time_base= (AVRational){1,1000};
+ av_log(NULL, AV_LOG_ERROR, "invalid stream\n");
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
}
// av_log(NULL, AV_LOG_DEBUG, "%d %X %d \n", is_audio, flags, st->discard);
- if( (st->discard >= AVDISCARD_NONKEY && !((flags >> 4)==1 || is_audio))
- ||(st->discard >= AVDISCARD_BIDIR && ((flags >> 4)==3 && !is_audio))
+ if( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || is_audio))
+ ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && !is_audio))
|| st->discard >= AVDISCARD_ALL
){
url_fseek(&s->pb, next, SEEK_SET);
continue;
}
- if ((flags >> 4)==1)
+ if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)
av_add_index_entry(st, pos, pts, size, 0, AVINDEX_KEYFRAME);
break;
}
+ // if not streamed and no duration from metadata then seek to end to find the duration from the timestamps
+ if(!url_is_streamed(&s->pb) && s->duration==AV_NOPTS_VALUE){
+ int size;
+ const int pos= url_ftell(&s->pb);
+ const int fsize= url_fsize(&s->pb);
+ url_fseek(&s->pb, fsize-4, SEEK_SET);
+ size= get_be32(&s->pb);
+ url_fseek(&s->pb, fsize-3-size, SEEK_SET);
+ if(size == get_be24(&s->pb) + 11){
+ s->duration= get_be24(&s->pb) * (int64_t)AV_TIME_BASE / 1000;
+ }
+ url_fseek(&s->pb, pos, SEEK_SET);
+ }
+
if(is_audio){
- if(st->codec->sample_rate == 0){
- st->codec->codec_type = CODEC_TYPE_AUDIO;
- st->codec->channels = (flags&1)+1;
- if((flags >> 4) == 5)
+ if(!st->codec->sample_rate || !st->codec->bits_per_sample || (!st->codec->codec_id && !st->codec->codec_tag)) {
+ st->codec->channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1;
+ if((flags & FLV_AUDIO_CODECID_MASK) == FLV_CODECID_NELLYMOSER_8HZ_MONO)
st->codec->sample_rate= 8000;
else
- st->codec->sample_rate = (44100<<((flags>>2)&3))>>3;
- switch(flags >> 4){/* 0: uncompressed 1: ADPCM 2: mp3 5: Nellymoser 8kHz mono 6: Nellymoser*/
- case 0: if (flags&2) st->codec->codec_id = CODEC_ID_PCM_S16BE;
- else st->codec->codec_id = CODEC_ID_PCM_S8; break;
- case 1: st->codec->codec_id = CODEC_ID_ADPCM_SWF; break;
- case 2: st->codec->codec_id = CODEC_ID_MP3; st->need_parsing = 1; break;
- // this is not listed at FLV but at SWF, strange...
- case 3: if (flags&2) st->codec->codec_id = CODEC_ID_PCM_S16LE;
- else st->codec->codec_id = CODEC_ID_PCM_S8; break;
- default:
- av_log(s, AV_LOG_INFO, "Unsupported audio codec (%x)\n", flags >> 4);
- st->codec->codec_tag= (flags >> 4);
- }
- st->codec->bits_per_sample = (flags & 2) ? 16 : 8;
+ st->codec->sample_rate = (44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3);
+ st->codec->bits_per_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
+ flv_set_audio_codec(s, st, flags & FLV_AUDIO_CODECID_MASK);
}
}else{
- st->codec->codec_type = CODEC_TYPE_VIDEO;
- switch(flags & 0xF){
- case 2: st->codec->codec_id = CODEC_ID_FLV1; break;
- case 3: st->codec->codec_id = CODEC_ID_FLASHSV; break;
- case 4:
- st->codec->codec_id = CODEC_ID_VP6F;
- if (st->codec->extradata_size != 1) {
- st->codec->extradata_size = 1;
- st->codec->extradata = av_malloc(1);
- }
- /* width and height adjustment */
- st->codec->extradata[0] = get_byte(&s->pb);
- size--;
- break;
- default:
- av_log(s, AV_LOG_INFO, "Unsupported video codec (%x)\n", flags & 0xf);
- st->codec->codec_tag= flags & 0xF;
- }
+ size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK);
}
ret= av_get_packet(&s->pb, pkt, size - 1);
@@ -223,7 +359,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->pts = pts;
pkt->stream_index = st->index;
- if (is_audio || ((flags >> 4)==1))
+ if (is_audio || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY))
pkt->flags |= PKT_FLAG_KEY;
return ret;
diff --git a/contrib/ffmpeg/libavformat/flvenc.c b/contrib/ffmpeg/libavformat/flvenc.c
index 0b09d9830..ece585d77 100644
--- a/contrib/ffmpeg/libavformat/flvenc.c
+++ b/contrib/ffmpeg/libavformat/flvenc.c
@@ -19,10 +19,29 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
+#include "flv.h"
+#include "riff.h"
#undef NDEBUG
#include <assert.h>
+static const AVCodecTag flv_video_codec_ids[] = {
+ {CODEC_ID_FLV1, FLV_CODECID_H263 },
+ {CODEC_ID_FLASHSV, FLV_CODECID_SCREEN},
+ {CODEC_ID_VP6F, FLV_CODECID_VP6 },
+ {CODEC_ID_VP6, FLV_CODECID_VP6 },
+ {CODEC_ID_NONE, 0}
+};
+
+static const AVCodecTag flv_audio_codec_ids[] = {
+ {CODEC_ID_MP3, FLV_CODECID_MP3 >> FLV_AUDIO_CODECID_OFFSET},
+ {CODEC_ID_PCM_S8, FLV_CODECID_PCM_BE >> FLV_AUDIO_CODECID_OFFSET},
+ {CODEC_ID_PCM_S16BE, FLV_CODECID_PCM_BE >> FLV_AUDIO_CODECID_OFFSET},
+ {CODEC_ID_PCM_S16LE, FLV_CODECID_PCM_LE >> FLV_AUDIO_CODECID_OFFSET},
+ {CODEC_ID_ADPCM_SWF, FLV_CODECID_ADPCM >> FLV_AUDIO_CODECID_OFFSET},
+ {CODEC_ID_NONE, 0}
+};
+
typedef struct FLVContext {
int hasAudio;
int hasVideo;
@@ -33,21 +52,21 @@ typedef struct FLVContext {
} FLVContext;
static int get_audio_flags(AVCodecContext *enc){
- int flags = (enc->bits_per_sample == 16) ? 0x2 : 0x0;
+ int flags = (enc->bits_per_sample == 16) ? FLV_SAMPLESSIZE_16BIT : FLV_SAMPLESSIZE_8BIT;
switch (enc->sample_rate) {
case 44100:
- flags |= 0x0C;
+ flags |= FLV_SAMPLERATE_44100HZ;
break;
case 22050:
- flags |= 0x08;
+ flags |= FLV_SAMPLERATE_22050HZ;
break;
case 11025:
- flags |= 0x04;
+ flags |= FLV_SAMPLERATE_11025HZ;
break;
case 8000: //nellymoser only
case 5512: //not mp3
- flags |= 0x00;
+ flags |= FLV_SAMPLERATE_SPECIAL;
break;
default:
av_log(enc, AV_LOG_ERROR, "flv doesnt support that sample rate, choose from (44100, 22050, 11025)\n");
@@ -55,23 +74,24 @@ static int get_audio_flags(AVCodecContext *enc){
}
if (enc->channels > 1) {
- flags |= 0x01;
+ flags |= FLV_STEREO;
}
switch(enc->codec_id){
case CODEC_ID_MP3:
- flags |= 0x20 | 0x2;
+ flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT;
break;
case CODEC_ID_PCM_S8:
+ flags |= FLV_CODECID_PCM_BE | FLV_SAMPLESSIZE_8BIT;
break;
case CODEC_ID_PCM_S16BE:
- flags |= 0x2;
+ flags |= FLV_CODECID_PCM_BE | FLV_SAMPLESSIZE_16BIT;
break;
case CODEC_ID_PCM_S16LE:
- flags |= 0x30 | 0x2;
+ flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT;
break;
case CODEC_ID_ADPCM_SWF:
- flags |= 0x10;
+ flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT;
break;
case 0:
flags |= enc->codec_tag<<4;
@@ -84,14 +104,6 @@ static int get_audio_flags(AVCodecContext *enc){
return flags;
}
-#define AMF_DOUBLE 0
-#define AMF_BOOLEAN 1
-#define AMF_STRING 2
-#define AMF_OBJECT 3
-#define AMF_MIXED_ARRAY 8
-#define AMF_ARRAY 10
-#define AMF_DATE 11
-
static void put_amf_string(ByteIOContext *pb, const char *str)
{
size_t len = strlen(str);
@@ -101,27 +113,26 @@ static void put_amf_string(ByteIOContext *pb, const char *str)
static void put_amf_double(ByteIOContext *pb, double d)
{
- put_byte(pb, AMF_DOUBLE);
+ put_byte(pb, AMF_DATA_TYPE_NUMBER);
put_be64(pb, av_dbl2int(d));
}
+static void put_amf_bool(ByteIOContext *pb, int b) {
+ put_byte(pb, AMF_DATA_TYPE_BOOL);
+ put_byte(pb, !!b);
+}
+
static int flv_write_header(AVFormatContext *s)
{
ByteIOContext *pb = &s->pb;
FLVContext *flv = s->priv_data;
- int i, width, height, samplerate;
+ int i, width, height, samplerate, samplesize, channels, audiocodecid, videocodecid;
double framerate = 0.0;
int metadata_size_pos, data_size;
flv->hasAudio = 0;
flv->hasVideo = 0;
- put_tag(pb,"FLV");
- put_byte(pb,1);
- put_byte(pb,0); // delayed write
- put_be32(pb,9);
- put_be32(pb,0);
-
for(i=0; i<s->nb_streams; i++){
AVCodecContext *enc = s->streams[i]->codec;
if (enc->codec_type == CODEC_TYPE_VIDEO) {
@@ -133,12 +144,34 @@ static int flv_write_header(AVFormatContext *s)
framerate = 1/av_q2d(s->streams[i]->codec->time_base);
}
flv->hasVideo=1;
+
+ videocodecid = enc->codec_tag;
+ if(videocodecid == 0) {
+ av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n");
+ return -1;
+ }
} else {
flv->hasAudio=1;
samplerate = enc->sample_rate;
+ channels = enc->channels;
+
+ audiocodecid = enc->codec_tag;
+ samplesize = (enc->codec_id == CODEC_ID_PCM_S8) ? 8 : 16;
+
+ if(get_audio_flags(enc)<0)
+ return -1;
}
av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */
- if(enc->codec_tag == 5){
+ }
+ put_tag(pb,"FLV");
+ put_byte(pb,1);
+ put_byte(pb, FLV_HEADER_FLAG_HASAUDIO * flv->hasAudio
+ + FLV_HEADER_FLAG_HASVIDEO * flv->hasVideo);
+ put_be32(pb,9);
+ put_be32(pb,0);
+
+ for(i=0; i<s->nb_streams; i++){
+ if(s->streams[i]->codec->codec_tag == 5){
put_byte(pb,8); // message type
put_be24(pb,0); // include flags
put_be24(pb,0); // time stamp
@@ -146,8 +179,6 @@ static int flv_write_header(AVFormatContext *s)
put_be32(pb,11); // size
flv->reserved=5;
}
- if(enc->codec_type == CODEC_TYPE_AUDIO && get_audio_flags(enc)<0)
- return -1;
}
/* write meta_tag */
@@ -160,12 +191,12 @@ static int flv_write_header(AVFormatContext *s)
/* now data of data_size size */
/* first event name as a string */
- put_byte(pb, AMF_STRING); // 1 byte
+ put_byte(pb, AMF_DATA_TYPE_STRING);
put_amf_string(pb, "onMetaData"); // 12 bytes
/* mixed array (hash) with size and string/type/data tuples */
- put_byte(pb, AMF_MIXED_ARRAY);
- put_be32(pb, 4*flv->hasVideo + flv->hasAudio + 2); // +2 for duration and file size
+ put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY);
+ put_be32(pb, 5*flv->hasVideo + 4*flv->hasAudio + 2); // +2 for duration and file size
put_amf_string(pb, "duration");
flv->duration_offset= url_ftell(pb);
@@ -183,11 +214,23 @@ static int flv_write_header(AVFormatContext *s)
put_amf_string(pb, "framerate");
put_amf_double(pb, framerate);
+
+ put_amf_string(pb, "videocodecid");
+ put_amf_double(pb, videocodecid);
}
if(flv->hasAudio){
put_amf_string(pb, "audiosamplerate");
put_amf_double(pb, samplerate);
+
+ put_amf_string(pb, "audiosamplesize");
+ put_amf_double(pb, samplesize);
+
+ put_amf_string(pb, "stereo");
+ put_amf_bool(pb, (channels == 2));
+
+ put_amf_string(pb, "audiocodecid");
+ put_amf_double(pb, audiocodecid);
}
put_amf_string(pb, "filesize");
@@ -195,7 +238,7 @@ static int flv_write_header(AVFormatContext *s)
put_amf_double(pb, 0); // delayed write
put_amf_string(pb, "");
- put_byte(pb, 9); // end marker 1 byte
+ put_byte(pb, AMF_END_OF_OBJECT);
/* write total size of tag */
data_size= url_ftell(pb) - metadata_size_pos - 10;
@@ -210,16 +253,11 @@ static int flv_write_header(AVFormatContext *s)
static int flv_write_trailer(AVFormatContext *s)
{
int64_t file_size;
- int flags = 0;
ByteIOContext *pb = &s->pb;
FLVContext *flv = s->priv_data;
file_size = url_ftell(pb);
- flags |= flv->hasAudio ? 4 : 0;
- flags |= flv->hasVideo ? 1 : 0;
- url_fseek(pb, 4, SEEK_SET);
- put_byte(pb,flags);
/* update informations */
url_fseek(pb, flv->duration_offset, SEEK_SET);
@@ -242,22 +280,35 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
// av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size);
if (enc->codec_type == CODEC_TYPE_VIDEO) {
- put_byte(pb, 9);
- flags = 2; // choose h263
- flags |= pkt->flags & PKT_FLAG_KEY ? 0x10 : 0x20; // add keyframe indicator
+ put_byte(pb, FLV_TAG_TYPE_VIDEO);
+
+ flags = enc->codec_tag;
+ if(flags == 0) {
+ av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n",enc->codec_id);
+ return -1;
+ }
+
+ flags |= pkt->flags & PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
} else {
assert(enc->codec_type == CODEC_TYPE_AUDIO);
flags = get_audio_flags(enc);
assert(size);
- put_byte(pb, 8);
+ put_byte(pb, FLV_TAG_TYPE_AUDIO);
}
- put_be24(pb,size+1); // include flags
+ if ((enc->codec_id == CODEC_ID_VP6) || (enc->codec_id == CODEC_ID_VP6F))
+ put_be24(pb,size+2); // include the extra byte needed for VP6 in flv and flags
+ else
+ put_be24(pb,size+1); // include flags
put_be24(pb,pkt->pts);
put_be32(pb,flv->reserved);
put_byte(pb,flags);
+ if (enc->codec_id == CODEC_ID_VP6)
+ put_byte(pb,0);
+ if (enc->codec_id == CODEC_ID_VP6F)
+ put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0);
put_buffer(pb, pkt->data, size);
put_be32(pb,size+1+11); // previous tag size
flv->duration = pkt->pts + pkt->duration;
@@ -272,13 +323,14 @@ AVOutputFormat flv_muxer = {
"video/x-flv",
"flv",
sizeof(FLVContext),
-#ifdef CONFIG_MP3LAME
+#ifdef CONFIG_LIBMP3LAME
CODEC_ID_MP3,
-#else // CONFIG_MP3LAME
+#else // CONFIG_LIBMP3LAME
CODEC_ID_NONE,
-#endif // CONFIG_MP3LAME
+#endif // CONFIG_LIBMP3LAME
CODEC_ID_FLV1,
flv_write_header,
flv_write_packet,
flv_write_trailer,
+ .codec_tag= (const AVCodecTag*[]){flv_video_codec_ids, flv_audio_codec_ids, 0},
};
diff --git a/contrib/ffmpeg/libavformat/framehook.c b/contrib/ffmpeg/libavformat/framehook.c
index 03bbc95f6..8738f8030 100644
--- a/contrib/ffmpeg/libavformat/framehook.c
+++ b/contrib/ffmpeg/libavformat/framehook.c
@@ -57,7 +57,7 @@ int frame_hook_add(int argc, char *argv[])
fhe = av_mallocz(sizeof(*fhe));
if (!fhe) {
- return errno;
+ return AVERROR(ENOMEM);
}
fhe->Configure = dlsym(loaded, "Configure");
@@ -66,18 +66,18 @@ int frame_hook_add(int argc, char *argv[])
if (!fhe->Process) {
av_log(NULL, AV_LOG_ERROR, "Failed to find Process entrypoint in %s\n", argv[0]);
- return -1;
+ return AVERROR(ENOENT);
}
if (!fhe->Configure && argc > 1) {
av_log(NULL, AV_LOG_ERROR, "Failed to find Configure entrypoint in %s\n", argv[0]);
- return -1;
+ return AVERROR(ENOENT);
}
if (argc > 1 || fhe->Configure) {
if (fhe->Configure(&fhe->ctx, argc, argv)) {
av_log(NULL, AV_LOG_ERROR, "Failed to Configure %s\n", argv[0]);
- return -1;
+ return AVERROR(EINVAL);
}
}
@@ -93,11 +93,10 @@ int frame_hook_add(int argc, char *argv[])
#endif
}
-void frame_hook_process(AVPicture *pict, enum PixelFormat pix_fmt, int width, int height)
+void frame_hook_process(AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
{
if (first_hook) {
FrameHookEntry *fhe;
- int64_t pts = av_gettime();
for (fhe = first_hook; fhe; fhe = fhe->next) {
fhe->Process(fhe->ctx, pict, pix_fmt, width, height, pts);
diff --git a/contrib/ffmpeg/libavformat/framehook.h b/contrib/ffmpeg/libavformat/framehook.h
index d843ddb85..06ed4f889 100644
--- a/contrib/ffmpeg/libavformat/framehook.h
+++ b/contrib/ffmpeg/libavformat/framehook.h
@@ -22,6 +22,8 @@
#ifndef _FRAMEHOOK_H
#define _FRAMEHOOK_H
+#warning VHOOK is deprecated please help porting libmpcodecs or a better filter system to ffmpeg instead of wasting your time writing new fiters for this crappy one
+
/*
* Prototypes for interface to .so that implement a video processing hook
*/
@@ -44,7 +46,7 @@ typedef FrameHookRelease *FrameHookReleaseFn;
extern FrameHookRelease Release;
extern int frame_hook_add(int argc, char *argv[]);
-extern void frame_hook_process(struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height);
+extern void frame_hook_process(struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts);
extern void frame_hook_release(void);
#endif
diff --git a/contrib/ffmpeg/libavformat/gifdec.c b/contrib/ffmpeg/libavformat/gifdec.c
index 692ca6466..1d31211f6 100644
--- a/contrib/ffmpeg/libavformat/gifdec.c
+++ b/contrib/ffmpeg/libavformat/gifdec.c
@@ -305,13 +305,13 @@ static int gif_read_image(GifState *s)
/* verify that all the image is inside the screen dimensions */
if (left + width > s->screen_width ||
top + height > s->screen_height)
- return -EINVAL;
+ return AVERROR(EINVAL);
/* build the palette */
if (s->pix_fmt == PIX_FMT_RGB24) {
line = av_malloc(width);
if (!line)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
} else {
n = (1 << bits_per_pixel);
spal = palette;
@@ -537,7 +537,7 @@ static int gif_read_header(AVFormatContext * s1,
s->image_linesize = s->screen_width * 3;
s->image_buf = av_malloc(s->screen_height * s->image_linesize);
if (!s->image_buf)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
s->pix_fmt = PIX_FMT_RGB24;
/* now we are ready: build format streams */
st = av_new_stream(s1, 0);
diff --git a/contrib/ffmpeg/libavformat/grab.c b/contrib/ffmpeg/libavformat/grab.c
index 4e85772e5..5e778ecc0 100644
--- a/contrib/ffmpeg/libavformat/grab.c
+++ b/contrib/ffmpeg/libavformat/grab.c
@@ -68,7 +68,6 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
struct video_tuner tuner;
struct video_audio audio;
struct video_picture pict;
- const char *video_device;
int j;
if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
@@ -92,7 +91,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st = av_new_stream(s1, 0);
if (!st)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
s->width = width;
@@ -100,12 +99,9 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->frame_rate = frame_rate;
s->frame_rate_base = frame_rate_base;
- video_device = ap->device;
- if (!video_device)
- video_device = "/dev/video";
- video_fd = open(video_device, O_RDWR);
+ video_fd = open(s1->filename, O_RDWR);
if (video_fd < 0) {
- perror(video_device);
+ perror(s1->filename);
goto fail;
}
@@ -124,7 +120,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
if (ap->pix_fmt == PIX_FMT_YUV420P) {
desired_palette = VIDEO_PALETTE_YUV420P;
desired_depth = 12;
- } else if (ap->pix_fmt == PIX_FMT_YUV422) {
+ } else if (ap->pix_fmt == PIX_FMT_YUYV422) {
desired_palette = VIDEO_PALETTE_YUV422;
desired_depth = 16;
} else if (ap->pix_fmt == PIX_FMT_BGR24) {
@@ -174,12 +170,13 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
pict.palette=VIDEO_PALETTE_RGB24;
pict.depth=24;
ret = ioctl(video_fd, VIDIOCSPICT, &pict);
- if (ret < 0)
+ if (ret < 0) {
pict.palette=VIDEO_PALETTE_GREY;
pict.depth=8;
ret = ioctl(video_fd, VIDIOCSPICT, &pict);
if (ret < 0)
goto fail1;
+ }
}
}
}
@@ -219,8 +216,11 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
} else {
s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
if ((unsigned char*)-1 == s->video_buf) {
- perror("mmap");
- goto fail;
+ s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ perror("mmap");
+ goto fail;
+ }
}
s->gb_frame = 0;
s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
@@ -256,7 +256,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
break;
case VIDEO_PALETTE_YUV422:
frame_size = width * height * 2;
- st->codec->pix_fmt = PIX_FMT_YUV422;
+ st->codec->pix_fmt = PIX_FMT_YUYV422;
break;
case VIDEO_PALETTE_RGB24:
frame_size = width * height * 3;
@@ -321,16 +321,16 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
struct timespec ts;
/* Calculate the time of the next frame */
- s->time_frame += int64_t_C(1000000);
+ s->time_frame += INT64_C(1000000);
/* wait based on the frame rate */
for(;;) {
curtime = av_gettime();
delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
if (delay <= 0) {
- if (delay < int64_t_C(-1000000) * s->frame_rate_base / s->frame_rate) {
+ if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
/* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
- s->time_frame += int64_t_C(1000000);
+ s->time_frame += INT64_C(1000000);
}
break;
}
diff --git a/contrib/ffmpeg/libavformat/grab_bktr.c b/contrib/ffmpeg/libavformat/grab_bktr.c
index 214599490..100653db7 100644
--- a/contrib/ffmpeg/libavformat/grab_bktr.c
+++ b/contrib/ffmpeg/libavformat/grab_bktr.c
@@ -24,21 +24,16 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
-#if defined(__FreeBSD__)
-# if __FreeBSD__ >= 502100
-# include <dev/bktr/ioctl_meteor.h>
-# include <dev/bktr/ioctl_bt848.h>
-# else
-# include <machine/ioctl_meteor.h>
-# include <machine/ioctl_bt848.h>
-# endif
-#elif defined(__FreeBSD_kernel__)
+#if defined (HAVE_DEV_BKTR_IOCTL_METEOR_H) && defined (HAVE_DEV_BKTR_IOCTL_BT848_H)
# include <dev/bktr/ioctl_meteor.h>
# include <dev/bktr/ioctl_bt848.h>
-#elif defined(__DragonFly__)
+#elif defined (HAVE_MACHINE_IOCTL_METEOR_H) && defined (HAVE_MACHINE_IOCTL_BT848_H)
+# include <machine/ioctl_meteor.h>
+# include <machine/ioctl_bt848.h>
+#elif defined (HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H) && defined (HAVE_DEV_VIDEO_METEOR_IOCTL_BT848_H)
# include <dev/video/meteor/ioctl_meteor.h>
# include <dev/video/bktr/ioctl_bt848.h>
-#else
+#elif HAVE_DEV_IC_BT8XX_H
# include <dev/ic/bt8xx.h>
#endif
#include <unistd.h>
@@ -230,7 +225,7 @@ static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
VideoData *s = s1->priv_data;
if (av_new_packet(pkt, video_buf_size) < 0)
- return -EIO;
+ return AVERROR(EIO);
bktr_getframe(s->per_frame);
@@ -248,7 +243,6 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
int frame_rate;
int frame_rate_base;
int format = -1;
- const char *video_device;
if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
return -1;
@@ -258,13 +252,9 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
frame_rate = ap->time_base.den;
frame_rate_base = ap->time_base.num;
- video_device = ap->device;
- if (!video_device)
- video_device = "/dev/bktr0";
-
st = av_new_stream(s1, 0);
if (!st)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
s->width = width;
@@ -290,9 +280,9 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
format = NTSC;
}
- if (bktr_init(video_device, width, height, format,
+ if (bktr_init(s1->filename, width, height, format,
&(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0)
- return -EIO;
+ return AVERROR(EIO);
nsignals = 0;
last_frame_time = 0;
diff --git a/contrib/ffmpeg/libavformat/gxf.c b/contrib/ffmpeg/libavformat/gxf.c
index 897cdade0..ba2463ead 100644
--- a/contrib/ffmpeg/libavformat/gxf.c
+++ b/contrib/ffmpeg/libavformat/gxf.c
@@ -360,10 +360,11 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) {
}
}
if (pkt_type == PKT_UMF) {
- if (len >= 9) {
+ if (len >= 0x39) {
AVRational fps;
- len -= 9;
- url_fskip(pb, 5);
+ len -= 0x39;
+ url_fskip(pb, 5); // preamble
+ url_fskip(pb, 0x30); // payload description
fps = fps_umf2avr(get_le32(pb));
if (!main_timebase.num || !main_timebase.den) {
// this may not always be correct, but simply the best we can get
@@ -375,13 +376,11 @@ static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) {
} else
av_log(s, AV_LOG_INFO, "GXF: UMF packet missing\n");
url_fskip(pb, len);
+ if (!main_timebase.num || !main_timebase.den)
+ main_timebase = (AVRational){1, 50}; // set some arbitrary fallback
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
- if (main_timebase.num && main_timebase.den)
- st->time_base = main_timebase;
- else {
- st->start_time = st->duration = AV_NOPTS_VALUE;
- }
+ av_set_pts_info(st, 32, main_timebase.num, main_timebase.den);
}
return 0;
}
diff --git a/contrib/ffmpeg/libavformat/gxfenc.c b/contrib/ffmpeg/libavformat/gxfenc.c
index fef5ec104..39b9ed3e1 100644
--- a/contrib/ffmpeg/libavformat/gxfenc.c
+++ b/contrib/ffmpeg/libavformat/gxfenc.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
+ * along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -85,7 +85,7 @@ static const GXF_Lines gxf_lines_tab[] = {
{ 720, 6 },
};
-static const CodecTag gxf_media_types[] = {
+static const AVCodecTag gxf_media_types[] = {
{ CODEC_ID_MJPEG , 3 }, /* NTSC */
{ CODEC_ID_MJPEG , 4 }, /* PAL */
{ CODEC_ID_PCM_S24LE , 9 },
@@ -572,6 +572,8 @@ static int gxf_write_umf_packet(ByteIOContext *pb, GXFContext *ctx)
return updatePacketSize(pb, pos);
}
+#define GXF_NODELAY -5000
+
static int gxf_write_header(AVFormatContext *s)
{
ByteIOContext *pb = &s->pb;
@@ -623,7 +625,8 @@ static int gxf_write_header(AVFormatContext *s)
gxf->flags |= 0x00000040;
}
gxf->sample_rate = sc->sample_rate;
- av_set_pts_info(st, 64, 1, sc->sample_rate);
+ av_set_pts_info(st, 64, 1, st->codec->time_base.den);
+ sc->dts_delay = GXF_NODELAY;
if (gxf_find_lines_index(sc) < 0)
sc->lines_index = -1;
sc->sample_size = st->codec->bit_rate;
@@ -707,7 +710,7 @@ static int gxf_parse_mpeg_frame(GXFStreamContext *sc, const uint8_t *buf, int si
static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size)
{
GXFStreamContext *sc = &ctx->streams[pkt->stream_index];
- int64_t dts = av_rescale(pkt->dts, ctx->sample_rate, sc->sample_rate);
+ int64_t dts = av_rescale(pkt->dts, ctx->sample_rate, sc->codec->time_base.den);
put_byte(pb, sc->media_type);
put_byte(pb, sc->index);
@@ -799,13 +802,9 @@ static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pk
break; /* add pkt right now into list */
}
}
- } else if (pkt) {
- /* adjust dts if negative */
- if (pkt->dts < 0 && !sc->dts_delay) {
- /* XXX: rescale if codec time base is different from stream time base */
- sc->dts_delay = av_rescale_q(pkt->dts, st->codec->time_base, st->time_base);
- pkt->dts = sc->dts_delay; /* set to 0 */
- }
+ } else if (pkt && pkt->stream_index == i) {
+ if (sc->dts_delay == GXF_NODELAY) /* adjust dts if needed */
+ sc->dts_delay = pkt->dts;
pkt->dts -= sc->dts_delay;
}
}
diff --git a/contrib/ffmpeg/libavformat/http.c b/contrib/ffmpeg/libavformat/http.c
index 34dd5031a..e057d6efe 100644
--- a/contrib/ffmpeg/libavformat/http.c
+++ b/contrib/ffmpeg/libavformat/http.c
@@ -20,15 +20,7 @@
*/
#include "avformat.h"
#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
-#include <netdb.h>
+#include "network.h"
#include "base64.h"
@@ -40,40 +32,34 @@
/* used for protocol handling */
#define BUFFER_SIZE 1024
#define URL_SIZE 4096
+#define MAX_REDIRECTS 8
typedef struct {
URLContext *hd;
unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end;
int line_count;
int http_code;
+ offset_t off, filesize;
char location[URL_SIZE];
} HTTPContext;
static int http_connect(URLContext *h, const char *path, const char *hoststr,
- const char *auth);
+ const char *auth, int *new_location);
static int http_write(URLContext *h, uint8_t *buf, int size);
/* return non zero if error */
-static int http_open(URLContext *h, const char *uri, int flags)
+static int http_open_cnx(URLContext *h)
{
const char *path, *proxy_path;
char hostname[1024], hoststr[1024];
char auth[1024];
char path1[1024];
char buf[1024];
- int port, use_proxy, err;
- HTTPContext *s;
+ int port, use_proxy, err, location_changed = 0, redirects = 0;
+ HTTPContext *s = h->priv_data;
URLContext *hd = NULL;
- h->is_streamed = 1;
-
- s = av_malloc(sizeof(HTTPContext));
- if (!s) {
- return -ENOMEM;
- }
- h->priv_data = s;
-
proxy_path = getenv("http_proxy");
use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
strstart(proxy_path, "http://", NULL);
@@ -82,7 +68,7 @@ static int http_open(URLContext *h, const char *uri, int flags)
redo:
/* needed in any case to build the host string */
url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
- path1, sizeof(path1), uri);
+ path1, sizeof(path1), s->location);
if (port > 0) {
snprintf(hoststr, sizeof(hoststr), "%s:%d", hostname, port);
} else {
@@ -92,7 +78,7 @@ static int http_open(URLContext *h, const char *uri, int flags)
if (use_proxy) {
url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
NULL, 0, proxy_path);
- path = uri;
+ path = s->location;
} else {
if (path1[0] == '\0')
path = "/";
@@ -108,22 +94,44 @@ static int http_open(URLContext *h, const char *uri, int flags)
goto fail;
s->hd = hd;
- if (http_connect(h, path, hoststr, auth) < 0)
+ if (http_connect(h, path, hoststr, auth, &location_changed) < 0)
goto fail;
- if (s->http_code == 303 && s->location[0] != '\0') {
+ if ((s->http_code == 302 || s->http_code == 303) && location_changed == 1) {
/* url moved, get next */
- uri = s->location;
url_close(hd);
+ if (redirects++ >= MAX_REDIRECTS)
+ return AVERROR_IO;
+ location_changed = 0;
goto redo;
}
return 0;
fail:
if (hd)
url_close(hd);
- av_free(s);
return AVERROR_IO;
}
+static int http_open(URLContext *h, const char *uri, int flags)
+{
+ HTTPContext *s;
+ int ret;
+
+ h->is_streamed = 1;
+
+ s = av_malloc(sizeof(HTTPContext));
+ if (!s) {
+ return AVERROR(ENOMEM);
+ }
+ h->priv_data = s;
+ s->filesize = -1;
+ s->off = 0;
+ pstrcpy (s->location, URL_SIZE, uri);
+
+ ret = http_open_cnx(h);
+ if (ret != 0)
+ av_free (s);
+ return ret;
+}
static int http_getc(HTTPContext *s)
{
int len;
@@ -141,8 +149,10 @@ static int http_getc(HTTPContext *s)
return *s->buf_ptr++;
}
-static int process_line(HTTPContext *s, char *line, int line_count)
+static int process_line(URLContext *h, char *line, int line_count,
+ int *new_location)
{
+ HTTPContext *s = h->priv_data;
char *tag, *p;
/* end of header */
@@ -159,6 +169,9 @@ static int process_line(HTTPContext *s, char *line, int line_count)
#ifdef DEBUG
printf("http_code=%d\n", s->http_code);
#endif
+ /* error codes are 4xx and 5xx */
+ if (s->http_code >= 400 && s->http_code < 600)
+ return -1;
} else {
while (*p != '\0' && *p != ':')
p++;
@@ -172,34 +185,52 @@ static int process_line(HTTPContext *s, char *line, int line_count)
p++;
if (!strcmp(tag, "Location")) {
strcpy(s->location, p);
+ *new_location = 1;
+ } else if (!strcmp (tag, "Content-Length") && s->filesize == -1) {
+ s->filesize = atoll(p);
+ } else if (!strcmp (tag, "Content-Range")) {
+ /* "bytes $from-$to/$document_size" */
+ const char *slash;
+ if (!strncmp (p, "bytes ", 6)) {
+ p += 6;
+ s->off = atoll(p);
+ if ((slash = strchr(p, '/')) && strlen(slash) > 0)
+ s->filesize = atoll(slash+1);
+ }
+ h->is_streamed = 0; /* we _can_ in fact seek */
}
}
return 1;
}
static int http_connect(URLContext *h, const char *path, const char *hoststr,
- const char *auth)
+ const char *auth, int *new_location)
{
HTTPContext *s = h->priv_data;
int post, err, ch;
char line[1024], *q;
char *auth_b64;
+ int auth_b64_len = strlen(auth)* 4 / 3 + 12;
+ offset_t off = s->off;
/* send http header */
post = h->flags & URL_WRONLY;
-
- auth_b64 = av_base64_encode((uint8_t *)auth, strlen(auth));
+ auth_b64 = av_malloc(auth_b64_len);
+ av_base64_encode(auth_b64, auth_b64_len, (uint8_t *)auth, strlen(auth));
snprintf(s->buffer, sizeof(s->buffer),
- "%s %s HTTP/1.0\r\n"
+ "%s %s HTTP/1.1\r\n"
"User-Agent: %s\r\n"
"Accept: */*\r\n"
+ "Range: bytes=%"PRId64"-\r\n"
"Host: %s\r\n"
"Authorization: Basic %s\r\n"
+ "Connection: close\r\n"
"\r\n",
post ? "POST" : "GET",
path,
LIBAVFORMAT_IDENT,
+ s->off,
hoststr,
auth_b64);
@@ -211,7 +242,8 @@ static int http_connect(URLContext *h, const char *path, const char *hoststr,
s->buf_ptr = s->buffer;
s->buf_end = s->buffer;
s->line_count = 0;
- s->location[0] = '\0';
+ s->off = 0;
+ s->filesize = -1;
if (post) {
sleep(1);
return 0;
@@ -231,11 +263,11 @@ static int http_connect(URLContext *h, const char *path, const char *hoststr,
#ifdef DEBUG
printf("header='%s'\n", line);
#endif
- err = process_line(s, line, s->line_count);
+ err = process_line(h, line, s->line_count, new_location);
if (err < 0)
return err;
if (err == 0)
- return 0;
+ break;
s->line_count++;
q = line;
} else {
@@ -243,6 +275,8 @@ static int http_connect(URLContext *h, const char *path, const char *hoststr,
*q++ = ch;
}
}
+
+ return (off == s->off) ? 0 : -1;
}
@@ -261,6 +295,8 @@ static int http_read(URLContext *h, uint8_t *buf, int size)
} else {
len = url_read(s->hd, buf, size);
}
+ if (len > 0)
+ s->off += len;
return len;
}
@@ -279,11 +315,40 @@ static int http_close(URLContext *h)
return 0;
}
+static offset_t http_seek(URLContext *h, offset_t off, int whence)
+{
+ HTTPContext *s = h->priv_data;
+ URLContext *old_hd = s->hd;
+ offset_t old_off = s->off;
+
+ if (whence == AVSEEK_SIZE)
+ return s->filesize;
+ else if ((s->filesize == -1 && whence == SEEK_END) || h->is_streamed)
+ return -1;
+
+ /* we save the old context in case the seek fails */
+ s->hd = NULL;
+ if (whence == SEEK_CUR)
+ off += s->off;
+ else if (whence == SEEK_END)
+ off += s->filesize;
+ s->off = off;
+
+ /* if it fails, continue on old connection */
+ if (http_open_cnx(h) < 0) {
+ s->hd = old_hd;
+ s->off = old_off;
+ return -1;
+ }
+ url_close(old_hd);
+ return off;
+}
+
URLProtocol http_protocol = {
"http",
http_open,
http_read,
http_write,
- NULL, /* seek */
+ http_seek,
http_close,
};
diff --git a/contrib/ffmpeg/libavformat/idcin.c b/contrib/ffmpeg/libavformat/idcin.c
index 48d1e250d..e2c92f3b4 100644
--- a/contrib/ffmpeg/libavformat/idcin.c
+++ b/contrib/ffmpeg/libavformat/idcin.c
@@ -109,27 +109,27 @@ static int idcin_probe(AVProbeData *p)
return 0;
/* check the video width */
- number = LE_32(&p->buf[0]);
+ number = AV_RL32(&p->buf[0]);
if ((number == 0) || (number > 1024))
return 0;
/* check the video height */
- number = LE_32(&p->buf[4]);
+ number = AV_RL32(&p->buf[4]);
if ((number == 0) || (number > 1024))
return 0;
/* check the audio sample rate */
- number = LE_32(&p->buf[8]);
+ number = AV_RL32(&p->buf[8]);
if ((number != 0) && ((number < 8000) | (number > 48000)))
return 0;
/* check the audio bytes/sample */
- number = LE_32(&p->buf[12]);
+ number = AV_RL32(&p->buf[12]);
if (number > 2)
return 0;
/* check the audio channels */
- number = LE_32(&p->buf[16]);
+ number = AV_RL32(&p->buf[16]);
if (number > 2)
return 0;
diff --git a/contrib/ffmpeg/libavformat/idroq.c b/contrib/ffmpeg/libavformat/idroq.c
index 419696c9a..b8ee176ab 100644
--- a/contrib/ffmpeg/libavformat/idroq.c
+++ b/contrib/ffmpeg/libavformat/idroq.c
@@ -61,8 +61,8 @@ static int roq_probe(AVProbeData *p)
if (p->buf_size < 6)
return 0;
- if ((LE_16(&p->buf[0]) != RoQ_MAGIC_NUMBER) ||
- (LE_32(&p->buf[2]) != 0xFFFFFFFF))
+ if ((AV_RL16(&p->buf[0]) != RoQ_MAGIC_NUMBER) ||
+ (AV_RL32(&p->buf[2]) != 0xFFFFFFFF))
return 0;
return AVPROBE_SCORE_MAX;
@@ -83,7 +83,7 @@ static int roq_read_header(AVFormatContext *s,
if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- roq->framerate = LE_16(&preamble[6]);
+ roq->framerate = AV_RL16(&preamble[6]);
roq->frame_pts_inc = 90000 / roq->framerate;
/* init private context parameters */
@@ -96,8 +96,8 @@ static int roq_read_header(AVFormatContext *s,
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- chunk_type = LE_16(&preamble[0]);
- chunk_size = LE_32(&preamble[2]);
+ chunk_type = AV_RL16(&preamble[0]);
+ chunk_size = AV_RL32(&preamble[2]);
switch (chunk_type) {
@@ -106,8 +106,8 @@ static int roq_read_header(AVFormatContext *s,
if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- roq->width = LE_16(&preamble[0]);
- roq->height = LE_16(&preamble[2]);
+ roq->width = AV_RL16(&preamble[0]);
+ roq->height = AV_RL16(&preamble[2]);
break;
case RoQ_QUAD_CODEBOOK:
@@ -127,7 +127,7 @@ static int roq_read_header(AVFormatContext *s,
break;
default:
- av_log(s, AV_LOG_ERROR, " unknown RoQ chunk type (%04X)\n", LE_16(&preamble[0]));
+ av_log(s, AV_LOG_ERROR, " unknown RoQ chunk type (%04X)\n", AV_RL16(&preamble[0]));
return AVERROR_INVALIDDATA;
break;
}
@@ -196,8 +196,8 @@ static int roq_read_packet(AVFormatContext *s,
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- chunk_type = LE_16(&preamble[0]);
- chunk_size = LE_32(&preamble[2]);
+ chunk_type = AV_RL16(&preamble[0]);
+ chunk_size = AV_RL32(&preamble[2]);
if(chunk_size > INT_MAX)
return AVERROR_INVALIDDATA;
@@ -216,7 +216,7 @@ static int roq_read_packet(AVFormatContext *s,
if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- chunk_size = LE_32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
+ chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
codebook_size;
/* rewind */
diff --git a/contrib/ffmpeg/libavformat/img.c b/contrib/ffmpeg/libavformat/img.c
deleted file mode 100644
index 5223c691e..000000000
--- a/contrib/ffmpeg/libavformat/img.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Image format
- * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-
-typedef struct {
- int width;
- int height;
- int img_first;
- int img_last;
- int img_number;
- int img_count;
- int img_size;
- AVImageFormat *img_fmt;
- int pix_fmt;
- int is_pipe;
- char path[1024];
- /* temporary usage */
- void *ptr;
-} VideoData;
-
-
-/* return -1 if no image found */
-static int find_image_range(int *pfirst_index, int *plast_index,
- const char *path)
-{
- char buf[1024];
- int range, last_index, range1, first_index;
-
- /* find the first image */
- for(first_index = 0; first_index < 5; first_index++) {
- if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0)
- goto fail;
- if (url_exist(buf))
- break;
- }
- if (first_index == 5)
- goto fail;
-
- /* find the last image */
- last_index = first_index;
- for(;;) {
- range = 0;
- for(;;) {
- if (!range)
- range1 = 1;
- else
- range1 = 2 * range;
- if (av_get_frame_filename(buf, sizeof(buf), path,
- last_index + range1) < 0)
- goto fail;
- if (!url_exist(buf))
- break;
- range = range1;
- /* just in case... */
- if (range >= (1 << 30))
- goto fail;
- }
- /* we are sure than image last_index + range exists */
- if (!range)
- break;
- last_index += range;
- }
- *pfirst_index = first_index;
- *plast_index = last_index;
- return 0;
- fail:
- return -1;
-}
-
-
-static int image_probe(AVProbeData *p)
-{
- if (av_filename_number_test(p->filename) && guess_image_format(p->filename))
- return AVPROBE_SCORE_MAX-1;
- else
- return 0;
-}
-
-static int read_header_alloc_cb(void *opaque, AVImageInfo *info)
-{
- VideoData *s = opaque;
-
- s->width = info->width;
- s->height = info->height;
- s->pix_fmt = info->pix_fmt;
- /* stop image reading but no error */
- return 1;
-}
-
-static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
-{
- VideoData *s = s1->priv_data;
- int ret, first_index, last_index;
- char buf[1024];
- ByteIOContext pb1, *f = &pb1;
- AVStream *st;
-
- st = av_new_stream(s1, 0);
- if (!st) {
- return -ENOMEM;
- }
-
- if (ap->image_format)
- s->img_fmt = ap->image_format;
-
- pstrcpy(s->path, sizeof(s->path), s1->filename);
- s->img_number = 0;
- s->img_count = 0;
-
- /* find format */
- if (s1->iformat->flags & AVFMT_NOFILE)
- s->is_pipe = 0;
- else
- s->is_pipe = 1;
-
- if (!ap->time_base.num) {
- st->codec->time_base= (AVRational){1,25};
- } else {
- st->codec->time_base= ap->time_base;
- }
-
- if (!s->is_pipe) {
- if (find_image_range(&first_index, &last_index, s->path) < 0)
- goto fail;
- s->img_first = first_index;
- s->img_last = last_index;
- s->img_number = first_index;
- /* compute duration */
- st->start_time = 0;
- st->duration = last_index - first_index + 1;
- if (av_get_frame_filename(buf, sizeof(buf), s->path, s->img_number) < 0)
- goto fail;
- if (url_fopen(f, buf, URL_RDONLY) < 0)
- goto fail;
- } else {
- f = &s1->pb;
- }
-
- ret = av_read_image(f, s1->filename, s->img_fmt, read_header_alloc_cb, s);
- if (ret < 0)
- goto fail1;
-
- if (!s->is_pipe) {
- url_fclose(f);
- } else {
- url_fseek(f, 0, SEEK_SET);
- }
-
- st->codec->codec_type = CODEC_TYPE_VIDEO;
- st->codec->codec_id = CODEC_ID_RAWVIDEO;
- st->codec->width = s->width;
- st->codec->height = s->height;
- st->codec->pix_fmt = s->pix_fmt;
- s->img_size = avpicture_get_size(s->pix_fmt, (s->width+15)&(~15), (s->height+15)&(~15));
-
- return 0;
- fail1:
- if (!s->is_pipe)
- url_fclose(f);
- fail:
- return AVERROR_IO;
-}
-
-static int read_packet_alloc_cb(void *opaque, AVImageInfo *info)
-{
- VideoData *s = opaque;
-
- if (info->width != s->width ||
- info->height != s->height)
- return -1;
- avpicture_fill(&info->pict, s->ptr, info->pix_fmt, (info->width+15)&(~15), (info->height+15)&(~15));
- return 0;
-}
-
-static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
-{
- VideoData *s = s1->priv_data;
- char filename[1024];
- int ret;
- ByteIOContext f1, *f;
-
- if (!s->is_pipe) {
- /* loop over input */
- if (s1->loop_input && s->img_number > s->img_last) {
- s->img_number = s->img_first;
- }
- if (av_get_frame_filename(filename, sizeof(filename),
- s->path, s->img_number) < 0)
- return AVERROR_IO;
- f = &f1;
- if (url_fopen(f, filename, URL_RDONLY) < 0)
- return AVERROR_IO;
- } else {
- f = &s1->pb;
- if (url_feof(f))
- return AVERROR_IO;
- }
-
- av_new_packet(pkt, s->img_size);
- pkt->stream_index = 0;
-
- s->ptr = pkt->data;
- ret = av_read_image(f, filename, s->img_fmt, read_packet_alloc_cb, s);
- if (!s->is_pipe) {
- url_fclose(f);
- }
-
- if (ret < 0) {
- av_free_packet(pkt);
- return AVERROR_IO; /* signal EOF */
- } else {
- /* XXX: computing this pts is not necessary as it is done in
- the generic code too */
- pkt->pts = av_rescale((int64_t)s->img_count * s1->streams[0]->codec->time_base.num, s1->streams[0]->time_base.den, s1->streams[0]->codec->time_base.den) / s1->streams[0]->time_base.num;
- s->img_count++;
- s->img_number++;
- return 0;
- }
-}
-
-static int img_read_close(AVFormatContext *s1)
-{
- return 0;
-}
-
-/******************************************************/
-/* image output */
-
-static int img_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
-{
- VideoData *img = s->priv_data;
- AVStream *st;
- AVImageFormat *img_fmt;
- int i;
-
- /* find output image format */
- if (ap->image_format) {
- img_fmt = ap->image_format;
- } else {
- img_fmt = guess_image_format(s->filename);
- }
- if (!img_fmt)
- return -1;
-
- if (s->nb_streams != 1)
- return -1;
-
- st = s->streams[0];
- /* we select the first matching format */
- for(i=0;i<PIX_FMT_NB;i++) {
- if (img_fmt->supported_pixel_formats & (1 << i))
- break;
- }
- if (i >= PIX_FMT_NB)
- return -1;
- img->img_fmt = img_fmt;
- img->pix_fmt = i;
- st->codec->pix_fmt = img->pix_fmt;
- return 0;
-}
-
-static int img_write_header(AVFormatContext *s)
-{
- VideoData *img = s->priv_data;
-
- img->img_number = 1;
- pstrcpy(img->path, sizeof(img->path), s->filename);
-
- /* find format */
- if (s->oformat->flags & AVFMT_NOFILE)
- img->is_pipe = 0;
- else
- img->is_pipe = 1;
-
- return 0;
-}
-
-static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
-{
- VideoData *img = s->priv_data;
- AVStream *st = s->streams[pkt->stream_index];
- ByteIOContext pb1, *pb;
- AVPicture *picture;
- int width, height, ret;
- char filename[1024];
- AVImageInfo info;
-
- width = st->codec->width;
- height = st->codec->height;
-
- picture = (AVPicture *)pkt->data;
-
- if (!img->is_pipe) {
- if (av_get_frame_filename(filename, sizeof(filename),
- img->path, img->img_number) < 0)
- return AVERROR_IO;
- pb = &pb1;
- if (url_fopen(pb, filename, URL_WRONLY) < 0)
- return AVERROR_IO;
- } else {
- pb = &s->pb;
- }
- info.width = width;
- info.height = height;
- info.pix_fmt = st->codec->pix_fmt;
- info.interleaved = 0; /* FIXME: there should be a way to set it right */
- info.pict = *picture;
- ret = av_write_image(pb, img->img_fmt, &info);
- if (!img->is_pipe) {
- url_fclose(pb);
- }
-
- img->img_number++;
- return 0;
-}
-
-static int img_write_trailer(AVFormatContext *s)
-{
- return 0;
-}
-
-/* input */
-#ifdef CONFIG_IMAGE_DEMUXER
-AVInputFormat image_demuxer = {
- "image",
- "image sequence",
- sizeof(VideoData),
- image_probe,
- img_read_header,
- img_read_packet,
- img_read_close,
- NULL,
- NULL,
- AVFMT_NOFILE | AVFMT_NEEDNUMBER,
-};
-#endif
-#ifdef CONFIG_IMAGEPIPE_DEMUXER
-AVInputFormat imagepipe_demuxer = {
- "imagepipe",
- "piped image sequence",
- sizeof(VideoData),
- NULL, /* no probe */
- img_read_header,
- img_read_packet,
- img_read_close,
- NULL,
-};
-#endif
-
-/* output */
-#ifdef CONFIG_IMAGE_MUXER
-AVOutputFormat image_muxer = {
- "image",
- "image sequence",
- "",
- "",
- sizeof(VideoData),
- CODEC_ID_NONE,
- CODEC_ID_RAWVIDEO,
- img_write_header,
- img_write_packet,
- img_write_trailer,
- AVFMT_NOFILE | AVFMT_NEEDNUMBER | AVFMT_RAWPICTURE,
- img_set_parameters,
-};
-#endif
-#ifdef CONFIG_IMAGEPIPE_MUXER
-AVOutputFormat imagepipe_muxer = {
- "imagepipe",
- "piped image sequence",
- "",
- "",
- sizeof(VideoData),
- CODEC_ID_NONE,
- CODEC_ID_RAWVIDEO,
- img_write_header,
- img_write_packet,
- img_write_trailer,
- AVFMT_RAWPICTURE,
- img_set_parameters,
-};
-#endif
diff --git a/contrib/ffmpeg/libavformat/img2.c b/contrib/ffmpeg/libavformat/img2.c
index 303190ad2..fa67ee742 100644
--- a/contrib/ffmpeg/libavformat/img2.c
+++ b/contrib/ffmpeg/libavformat/img2.c
@@ -154,10 +154,13 @@ static int find_image_range(int *pfirst_index, int *plast_index,
static int image_probe(AVProbeData *p)
{
- if (av_filename_number_test(p->filename) && av_str2id(img_tags, p->filename))
- return AVPROBE_SCORE_MAX;
- else
- return 0;
+ if (p->filename && av_str2id(img_tags, p->filename)) {
+ if (av_filename_number_test(p->filename))
+ return AVPROBE_SCORE_MAX;
+ else
+ return AVPROBE_SCORE_MAX/2;
+ }
+ return 0;
}
enum CodecID av_guess_image2_codec(const char *filename){
@@ -174,7 +177,7 @@ static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st = av_new_stream(s1, 0);
if (!st) {
- return -ENOMEM;
+ return AVERROR(ENOMEM);
}
pstrcpy(s->path, sizeof(s->path), s1->filename);
diff --git a/contrib/ffmpeg/libavformat/ipmovie.c b/contrib/ffmpeg/libavformat/ipmovie.c
index 3c0459938..975bfd36b 100644
--- a/contrib/ffmpeg/libavformat/ipmovie.c
+++ b/contrib/ffmpeg/libavformat/ipmovie.c
@@ -227,7 +227,7 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
/* see if there are any pending packets */
chunk_type = load_ipmovie_packet(s, pb, pkt);
- if ((chunk_type == CHUNK_VIDEO) && (chunk_type != CHUNK_DONE))
+ if (chunk_type != CHUNK_DONE)
return chunk_type;
/* read the next chunk, wherever the file happens to be pointing */
@@ -236,8 +236,8 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return CHUNK_BAD;
- chunk_size = LE_16(&chunk_preamble[0]);
- chunk_type = LE_16(&chunk_preamble[2]);
+ chunk_size = AV_RL16(&chunk_preamble[0]);
+ chunk_type = AV_RL16(&chunk_preamble[2]);
debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);
@@ -287,7 +287,7 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
break;
}
- opcode_size = LE_16(&opcode_preamble[0]);
+ opcode_size = AV_RL16(&opcode_preamble[0]);
opcode_type = opcode_preamble[2];
opcode_version = opcode_preamble[3];
@@ -325,10 +325,10 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
chunk_type = CHUNK_BAD;
break;
}
- s->fps = 1000000.0 / (LE_32(&scratch[0]) * LE_16(&scratch[4]));
+ s->fps = 1000000.0 / (AV_RL32(&scratch[0]) * AV_RL16(&scratch[4]));
s->frame_pts_inc = 90000 / s->fps;
debug_ipmovie(" %.2f frames/second (timer div = %d, subdiv = %d)\n",
- s->fps, LE_32(&scratch[0]), LE_16(&scratch[4]));
+ s->fps, AV_RL32(&scratch[0]), AV_RL16(&scratch[4]));
break;
case OPCODE_INIT_AUDIO_BUFFERS:
@@ -343,8 +343,8 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
chunk_type = CHUNK_BAD;
break;
}
- s->audio_sample_rate = LE_16(&scratch[4]);
- audio_flags = LE_16(&scratch[2]);
+ s->audio_sample_rate = AV_RL16(&scratch[4]);
+ audio_flags = AV_RL16(&scratch[2]);
/* bit 0 of the flags: 0 = mono, 1 = stereo */
s->audio_channels = (audio_flags & 1) + 1;
/* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
@@ -381,8 +381,8 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
chunk_type = CHUNK_BAD;
break;
}
- s->video_width = LE_16(&scratch[0]) * 8;
- s->video_height = LE_16(&scratch[2]) * 8;
+ s->video_width = AV_RL16(&scratch[0]) * 8;
+ s->video_height = AV_RL16(&scratch[2]) * 8;
debug_ipmovie("video resolution: %d x %d\n",
s->video_width, s->video_height);
break;
@@ -442,8 +442,8 @@ static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
}
/* load the palette into internal data structure */
- first_color = LE_16(&scratch[0]);
- last_color = first_color + LE_16(&scratch[2]) - 1;
+ first_color = AV_RL16(&scratch[0]);
+ last_color = first_color + AV_RL16(&scratch[2]) - 1;
/* sanity check (since they are 16 bit values) */
if ((first_color > 0xFF) || (last_color > 0xFF)) {
debug_ipmovie("demux_ipmovie: set_palette indices out of range (%d -> %d)\n",
@@ -542,7 +542,7 @@ static int ipmovie_read_header(AVFormatContext *s,
if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return AVERROR_IO;
- chunk_type = LE_16(&chunk_preamble[2]);
+ chunk_type = AV_RL16(&chunk_preamble[2]);
url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);
if (chunk_type == CHUNK_VIDEO)
@@ -601,8 +601,10 @@ static int ipmovie_read_packet(AVFormatContext *s,
ret = AVERROR_IO;
else if (ret == CHUNK_NOMEM)
ret = AVERROR_NOMEM;
- else
+ else if (ret == CHUNK_VIDEO)
ret = 0;
+ else
+ ret = -1;
return ret;
}
diff --git a/contrib/ffmpeg/libavformat/isom.c b/contrib/ffmpeg/libavformat/isom.c
index d4e923853..f913bc0a9 100644
--- a/contrib/ffmpeg/libavformat/isom.c
+++ b/contrib/ffmpeg/libavformat/isom.c
@@ -25,8 +25,8 @@
#include "riff.h"
#include "isom.h"
-/* http://gpac.sourceforge.net/tutorial/mediatypes.htm */
-const CodecTag ff_mov_obj_type[] = {
+/* http://www.mp4ra.org */
+const AVCodecTag ff_mp4_obj_type[] = {
{ CODEC_ID_MPEG4 , 32 },
{ CODEC_ID_H264 , 33 },
{ CODEC_ID_AAC , 64 },
@@ -39,12 +39,15 @@ const CodecTag ff_mov_obj_type[] = {
{ CODEC_ID_AAC , 102 }, /* MPEG2 AAC Main */
{ CODEC_ID_AAC , 103 }, /* MPEG2 AAC Low */
{ CODEC_ID_AAC , 104 }, /* MPEG2 AAC SSR */
- { CODEC_ID_MP3 , 105 },
- { CODEC_ID_MPEG1VIDEO, 106 },
- { CODEC_ID_MP2 , 107 },
- { CODEC_ID_MJPEG , 108 },
- { CODEC_ID_PCM_S16LE , 224 },
+ { CODEC_ID_MP3 , 105 }, /* 13818-3 */
+ { CODEC_ID_MPEG1VIDEO, 106 }, /* 11172-2 */
+ { CODEC_ID_MP3 , 107 }, /* 11172-3 */
+ { CODEC_ID_MJPEG , 108 }, /* 10918-1 */
+ { CODEC_ID_PNG , 109 },
+ { CODEC_ID_JPEG2000 , 110 }, /* 15444-1 */
+ { CODEC_ID_VC1 , 163 },
{ CODEC_ID_VORBIS , 221 },
+ { CODEC_ID_PCM_S16LE , 224 },
{ CODEC_ID_QCELP , 225 },
{ CODEC_ID_AC3 , 226 },
{ CODEC_ID_PCM_ALAW , 227 },
@@ -55,6 +58,119 @@ const CodecTag ff_mov_obj_type[] = {
{ 0, 0 },
};
+const AVCodecTag codec_movvideo_tags[] = {
+/* { CODEC_ID_, MKTAG('I', 'V', '5', '0') }, *//* Indeo 5.0 */
+
+ { CODEC_ID_RAWVIDEO, MKTAG('r', 'a', 'w', ' ') }, /* Uncompressed RGB */
+/* { CODEC_ID_RAWVIDEO, MKTAG('Y', 'u', 'v', '2') }, *//* Uncompressed YUV422 */
+ { CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'U', 'I') }, /* YUV with alpha-channel (AVID Uncompressed) */
+ { CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, /* UNCOMPRESSED 8BIT 4:2:2 */
+
+ { CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, /* PhotoJPEG */
+ { CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, /* Motion-JPEG (format A) */
+ { CODEC_ID_MJPEG, MKTAG('A', 'V', 'D', 'J') }, /* MJPEG with alpha-channel (AVID JFIF meridien compressed) */
+/* { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, *//* MJPEG with alpha-channel (AVID ABVB/Truevision NuVista) */
+ { CODEC_ID_MJPEGB, MKTAG('m', 'j', 'p', 'b') }, /* Motion-JPEG (format B) */
+
+ { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') }, /* Sorenson Video v1 */
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, /* Sorenson Video v1 */
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', 'i') }, /* Sorenson Video v1 (from QT specs)*/
+ { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') }, /* Sorenson Video v3 */
+
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, /* OpenDiVX *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
+ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
+ { CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, /* experimental: 3IVX files before ivx D4 4.5.1 */
+
+ { CODEC_ID_H263, MKTAG('h', '2', '6', '3') }, /* H263 */
+ { CODEC_ID_H263, MKTAG('s', '2', '6', '3') }, /* H263 ?? works */
+
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', 'v') }, /* AVID DV */
+ //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '5') }, /* DVCPRO HD 50i produced by FCP */
+ //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '6') }, /* DVCPRO HD 60i produced by FCP */
+
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, /* On2 VP3 */
+ { CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, /* Apple Video (RPZA) */
+ { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, /* Cinepak */
+ { CODEC_ID_8BPS, MKTAG('8', 'B', 'P', 'S') }, /* Planar RGB (8BPS) */
+ { CODEC_ID_SMC, MKTAG('s', 'm', 'c', ' ') }, /* Apple Graphics (SMC) */
+ { CODEC_ID_QTRLE, MKTAG('r', 'l', 'e', ' ') }, /* Apple Animation (RLE) */
+ { CODEC_ID_MSRLE, MKTAG('W', 'R', 'L', 'E') },
+ { CODEC_ID_QDRAW, MKTAG('q', 'd', 'r', 'w') }, /* QuickDraw */
+
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, /* AVC-1/H.264 */
+
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'e', 'g') }, /* MPEG */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '1') }, /* HDV 720p30 */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '2') }, /* MPEG2 produced by Sony HD camera */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '3') }, /* HDV produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'n') }, /* MPEG2 IMX NTSC 525/60 50mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'p') }, /* MPEG2 IMX PAL 625/50 50mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'n') }, /* MPEG2 IMX NTSC 525/60 30mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'p') }, /* MPEG2 IMX PAL 625/50 30mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('A', 'V', 'm', 'p') }, /* AVID IMX PAL */
+
+ //{ CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') }, /* JPEG 2000 produced by FCP */
+
+ { CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, /* Truevision Targa */
+ { CODEC_ID_TIFF, MKTAG('t', 'i', 'f', 'f') }, /* TIFF embedded in MOV */
+ { CODEC_ID_GIF, MKTAG('g', 'i', 'f', ' ') }, /* embedded gif files as frames (usually one "click to play movie" frame) */
+ { CODEC_ID_PNG, MKTAG('p', 'n', 'g', ' ') },
+
+ { CODEC_ID_VC1, MKTAG('v', 'c', '-', '1') }, /* SMPTE RP 2025 */
+
+ { CODEC_ID_DNXHD, MKTAG('A', 'V', 'd', 'n') }, /* AVID DNxHD */
+
+ { CODEC_ID_NONE, 0 },
+};
+
+const AVCodecTag codec_movaudio_tags[] = {
+ { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_PCM_S32LE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') }, /* 16 bits */
+ { CODEC_ID_PCM_S16BE, MKTAG('N', 'O', 'N', 'E') }, /* uncompressed */
+ { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') }, /* */
+ { CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
+ { CODEC_ID_PCM_S8, MKTAG('s', 'o', 'w', 't') },
+ { CODEC_ID_PCM_U8, MKTAG('r', 'a', 'w', ' ') }, /* 8 bits unsigned */
+ { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') }, /* */
+ { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') }, /* */
+
+ { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') }, /* IMA-4 ADPCM */
+ { CODEC_ID_ADPCM_MS, MKTAG('m', 's', 0x00, 0x02) }, /* MS ADPCM */
+
+ { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') }, /* Macintosh Audio Compression and Expansion 3:1 */
+ { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') }, /* Macintosh Audio Compression and Expansion 6:1 */
+
+ { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') }, /* MPEG layer 3 */ /* sample files at http://www.3ivx.com/showcase.html use this tag */
+ { CODEC_ID_MP3, 0x6D730055 }, /* MPEG layer 3 */
+ { CODEC_ID_MP3, MKTAG('m', 's', 0x00, 0x55) }, /* MPEG layer 3 *//* XXX: check endianness */
+
+/* { CODEC_ID_OGG_VORBIS, MKTAG('O', 'g', 'g', 'S') }, *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
+
+ { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, /* MPEG-4 AAC */
+
+ { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
+ { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
+
+ { CODEC_ID_AC3, MKTAG('m', 's', 0x20, 0x00) }, /* Dolby AC-3 */
+
+ { CODEC_ID_ALAC, MKTAG('a', 'l', 'a', 'c') }, /* Apple Lossless */
+ { CODEC_ID_QDM2, MKTAG('Q', 'D', 'M', '2') }, /* QDM2 */
+
+ { CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
+ { CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
+
+ { CODEC_ID_NONE, 0 },
+};
+
/* map numeric codes from mdhd atom to ISO 639 */
/* cf. QTFileFormat.pdf p253, qtff.pdf p205 */
/* http://developer.apple.com/documentation/mac/Text/Text-368.html */
diff --git a/contrib/ffmpeg/libavformat/isom.h b/contrib/ffmpeg/libavformat/isom.h
index 85cbbdc6c..efcb1fc42 100644
--- a/contrib/ffmpeg/libavformat/isom.h
+++ b/contrib/ffmpeg/libavformat/isom.h
@@ -25,7 +25,9 @@
#define FFMPEG_ISOM_H
/* isom.c */
-extern const CodecTag ff_mov_obj_type[];
+extern const AVCodecTag ff_mp4_obj_type[];
+extern const AVCodecTag codec_movvideo_tags[];
+extern const AVCodecTag codec_movaudio_tags[];
int ff_mov_iso639_to_lang(const char *lang, int mp4);
int ff_mov_lang_to_iso639(int code, char *to);
diff --git a/contrib/ffmpeg/libavformat/jpeg.c b/contrib/ffmpeg/libavformat/jpeg.c
deleted file mode 100644
index b5fc043c9..000000000
--- a/contrib/ffmpeg/libavformat/jpeg.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * JPEG image format
- * Copyright (c) 2003 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-
-static int jpeg_probe(AVProbeData *pd)
-{
- if (pd->buf_size >= 64 &&
- pd->buf[0] == 0xff && pd->buf[1] == 0xd8 && pd->buf[2] == 0xff)
- return AVPROBE_SCORE_MAX;
- else
- return 0;
-}
-
-typedef struct JpegOpaque {
- int (*alloc_cb)(void *opaque, AVImageInfo *info);
- void *opaque;
- int ret_code;
-} JpegOpaque;
-
-/* called by the codec to allocate the image */
-static int jpeg_get_buffer(AVCodecContext *c, AVFrame *picture)
-{
- JpegOpaque *jctx = c->opaque;
- AVImageInfo info1, *info = &info1;
- int ret, i;
-
- info->width = c->width;
- info->height = c->height;
- switch(c->pix_fmt) {
- case PIX_FMT_YUV420P:
- info->pix_fmt = PIX_FMT_YUVJ420P;
- break;
- case PIX_FMT_YUV422P:
- info->pix_fmt = PIX_FMT_YUVJ422P;
- break;
- case PIX_FMT_YUV444P:
- info->pix_fmt = PIX_FMT_YUVJ444P;
- break;
- default:
- return -1;
- }
- ret = jctx->alloc_cb(jctx->opaque, info);
- if (ret) {
- jctx->ret_code = ret;
- return -1;
- } else {
- for(i=0;i<3;i++) {
- picture->data[i] = info->pict.data[i];
- picture->linesize[i] = info->pict.linesize[i];
- }
- return 0;
- }
-}
-
-static void jpeg_img_copy(uint8_t *dst, int dst_wrap,
- uint8_t *src, int src_wrap,
- int width, int height)
-{
- for(;height > 0; height--) {
- memcpy(dst, src, width);
- dst += dst_wrap;
- src += src_wrap;
- }
-}
-
-/* XXX: libavcodec is broken for truncated jpegs! */
-#define IO_BUF_SIZE (1024*1024)
-
-static int jpeg_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- AVCodecContext *c;
- AVFrame *picture, picture1;
- int len, size, got_picture, i;
- uint8_t *inbuf_ptr, inbuf[IO_BUF_SIZE];
- JpegOpaque jctx;
-
- jctx.alloc_cb = alloc_cb;
- jctx.opaque = opaque;
- jctx.ret_code = -1; /* default return code is error */
-
- c = avcodec_alloc_context();
- if (!c)
- return -1;
- picture= avcodec_alloc_frame();
- if (!picture) {
- av_free(c);
- return -1;
- }
- c->opaque = &jctx;
- c->get_buffer = jpeg_get_buffer;
- c->flags |= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
- if (avcodec_open(c, &mjpeg_decoder) < 0)
- goto fail1;
- for(;;) {
- size = get_buffer(f, inbuf, sizeof(inbuf));
- if (size == 0)
- break;
- inbuf_ptr = inbuf;
- while (size > 0) {
- len = avcodec_decode_video(c, &picture1, &got_picture,
- inbuf_ptr, size);
- if (len < 0)
- goto fail;
- if (got_picture)
- goto the_end;
- size -= len;
- inbuf_ptr += len;
- }
- }
- the_end:
- /* XXX: currently, the mjpeg decoder does not use AVFrame, so we
- must do it by hand */
- if (jpeg_get_buffer(c, picture) < 0)
- goto fail;
- for(i=0;i<3;i++) {
- int w, h;
- w = c->width;
- h = c->height;
- if (i >= 1) {
- switch(c->pix_fmt) {
- default:
- case PIX_FMT_YUV420P:
- w = (w + 1) >> 1;
- h = (h + 1) >> 1;
- break;
- case PIX_FMT_YUV422P:
- w = (w + 1) >> 1;
- break;
- case PIX_FMT_YUV444P:
- break;
- }
- }
- jpeg_img_copy(picture->data[i], picture->linesize[i],
- picture1.data[i], picture1.linesize[i],
- w, h);
- }
- jctx.ret_code = 0;
- fail:
- avcodec_close(c);
- fail1:
- av_free(picture);
- av_free(c);
- return jctx.ret_code;
-}
-
-#if defined(CONFIG_MUXERS) && defined(CONFIG_MJPEG_ENCODER)
-static int jpeg_write(ByteIOContext *pb, AVImageInfo *info)
-{
- AVCodecContext *c;
- uint8_t *outbuf = NULL;
- int outbuf_size, ret, size, i;
- AVFrame *picture;
-
- ret = -1;
- c = avcodec_alloc_context();
- if (!c)
- return -1;
- picture = avcodec_alloc_frame();
- if (!picture)
- goto fail2;
- c->width = info->width;
- c->height = info->height;
- /* XXX: currently move that to the codec ? */
- switch(info->pix_fmt) {
- case PIX_FMT_YUVJ420P:
- c->pix_fmt = PIX_FMT_YUV420P;
- break;
- case PIX_FMT_YUVJ422P:
- c->pix_fmt = PIX_FMT_YUV422P;
- break;
- case PIX_FMT_YUVJ444P:
- c->pix_fmt = PIX_FMT_YUV444P;
- break;
- default:
- goto fail1;
- }
- for(i=0;i<3;i++) {
- picture->data[i] = info->pict.data[i];
- picture->linesize[i] = info->pict.linesize[i];
- }
- /* set the quality */
- picture->quality = 3; /* XXX: a parameter should be used */
- c->flags |= CODEC_FLAG_QSCALE;
-
- if (avcodec_open(c, &mjpeg_encoder) < 0)
- goto fail1;
-
- /* XXX: needs to sort out that size problem */
- outbuf_size = 1000000;
- outbuf = av_malloc(outbuf_size);
-
- size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
- if (size < 0)
- goto fail;
- put_buffer(pb, outbuf, size);
- put_flush_packet(pb);
- ret = 0;
-
- fail:
- avcodec_close(c);
- av_free(outbuf);
- fail1:
- av_free(picture);
- fail2:
- av_free(c);
- return ret;
-}
-#endif //CONFIG_MUXERS
-
-AVImageFormat jpeg_image_format = {
- "jpeg",
- "jpg,jpeg",
- jpeg_probe,
- jpeg_read,
- (1 << PIX_FMT_YUVJ420P) | (1 << PIX_FMT_YUVJ422P) | (1 << PIX_FMT_YUVJ444P),
-#if defined(CONFIG_MUXERS) && defined(CONFIG_MJPEG_ENCODER)
- jpeg_write,
-#else
- NULL,
-#endif //CONFIG_MUXERS
-};
diff --git a/contrib/ffmpeg/libavformat/libnut.c b/contrib/ffmpeg/libavformat/libnut.c
index d4e7201ab..0f7b879a9 100644
--- a/contrib/ffmpeg/libavformat/libnut.c
+++ b/contrib/ffmpeg/libavformat/libnut.c
@@ -1,3 +1,30 @@
+/*
+ * NUT (de)muxing via libnut
+ * copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libnut.c
+ * NUT demuxing and muxing via libnut.
+ * @author Oded Shimon <ods15@ods15.dyndns.org>
+ */
+
#include "avformat.h"
#include "riff.h"
#include <libnut.h>
@@ -10,7 +37,7 @@ typedef struct {
nut_stream_header_t * s;
} NUTContext;
-static const CodecTag nut_tags[] = {
+static const AVCodecTag nut_tags[] = {
{ CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
{ CODEC_ID_MP3, MKTAG('m', 'p', '3', ' ') },
{ CODEC_ID_VORBIS, MKTAG('v', 'r', 'b', 's') },
@@ -48,7 +75,7 @@ static int nut_write_header(AVFormatContext * avf) {
AVCodecContext * codec = avf->streams[i]->codec;
int j;
int fourcc = 0;
- int nom, denom, ssize;
+ int num, denom, ssize;
s[i].type = codec->codec_type == CODEC_TYPE_VIDEO ? NUT_VIDEO_CLASS : NUT_AUDIO_CLASS;
@@ -56,19 +83,19 @@ static int nut_write_header(AVFormatContext * avf) {
else fourcc = codec_get_tag(nut_tags, codec->codec_id);
if (!fourcc) {
- if (codec->codec_type == CODEC_TYPE_VIDEO) fourcc = codec_get_bmp_tag(codec->codec_id);
- if (codec->codec_type == CODEC_TYPE_AUDIO) fourcc = codec_get_wav_tag(codec->codec_id);
+ if (codec->codec_type == CODEC_TYPE_VIDEO) fourcc = codec_get_tag(codec_bmp_tags, codec->codec_id);
+ if (codec->codec_type == CODEC_TYPE_AUDIO) fourcc = codec_get_tag(codec_wav_tags, codec->codec_id);
}
s[i].fourcc_len = 4;
s[i].fourcc = av_malloc(s[i].fourcc_len);
for (j = 0; j < s[i].fourcc_len; j++) s[i].fourcc[j] = (fourcc >> (j*8)) & 0xFF;
- ff_parse_specific_params(codec, &nom, &ssize, &denom);
- av_set_pts_info(avf->streams[i], 60, denom, nom);
+ ff_parse_specific_params(codec, &num, &ssize, &denom);
+ av_set_pts_info(avf->streams[i], 60, denom, num);
- s[i].time_base.nom = denom;
- s[i].time_base.den = nom;
+ s[i].time_base.num = denom;
+ s[i].time_base.den = num;
s[i].fixed_fps = 0;
s[i].decode_delay = codec->has_b_frames;
@@ -82,7 +109,7 @@ static int nut_write_header(AVFormatContext * avf) {
s[i].sample_height = 0;
s[i].colorspace_type = 0;
} else {
- s[i].samplerate_nom = codec->sample_rate;
+ s[i].samplerate_num = codec->sample_rate;
s[i].samplerate_denom = 1;
s[i].channel_count = codec->channels;
}
@@ -123,7 +150,7 @@ static int nut_write_trailer(AVFormatContext * avf) {
return 0;
}
-AVOutputFormat nut_muxer = {
+AVOutputFormat libnut_muxer = {
"nut",
"nut format",
"video/x-nut",
@@ -199,7 +226,7 @@ static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) {
memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size);
}
- av_set_pts_info(avf->streams[i], 60, s[i].time_base.nom, s[i].time_base.den);
+ av_set_pts_info(avf->streams[i], 60, s[i].time_base.num, s[i].time_base.den);
st->start_time = 0;
st->duration = s[i].max_pts;
@@ -208,14 +235,14 @@ static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) {
switch(s[i].type) {
case NUT_AUDIO_CLASS:
st->codec->codec_type = CODEC_TYPE_AUDIO;
- if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_wav_id(st->codec->codec_tag);
+ if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_id(codec_wav_tags, st->codec->codec_tag);
st->codec->channels = s[i].channel_count;
- st->codec->sample_rate = s[i].samplerate_nom / s[i].samplerate_denom;
+ st->codec->sample_rate = s[i].samplerate_num / s[i].samplerate_denom;
break;
case NUT_VIDEO_CLASS:
st->codec->codec_type = CODEC_TYPE_VIDEO;
- if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_bmp_id(st->codec->codec_tag);
+ if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_id(codec_bmp_tags, st->codec->codec_tag);
st->codec->width = s[i].width;
st->codec->height = s[i].height;
@@ -255,7 +282,7 @@ static int nut_read_packet(AVFormatContext * avf, AVPacket * pkt) {
static int nut_read_seek(AVFormatContext * avf, int stream_index, int64_t target_ts, int flags) {
NUTContext * priv = avf->priv_data;
int active_streams[] = { stream_index, -1 };
- double time_pos = target_ts * priv->s[stream_index].time_base.nom / (double)priv->s[stream_index].time_base.den;
+ double time_pos = target_ts * priv->s[stream_index].time_base.num / (double)priv->s[stream_index].time_base.den;
if (nut_seek(priv->nut, time_pos, 2*!(flags & AVSEEK_FLAG_BACKWARD), active_streams)) return -1;
@@ -270,7 +297,7 @@ static int nut_read_close(AVFormatContext *s) {
return 0;
}
-AVInputFormat nut_demuxer = {
+AVInputFormat libnut_demuxer = {
"nut",
"nut format",
sizeof(NUTContext),
diff --git a/contrib/ffmpeg/libavformat/matroska.c b/contrib/ffmpeg/libavformat/matroska.c
index 0cd119e71..591530490 100644
--- a/contrib/ffmpeg/libavformat/matroska.c
+++ b/contrib/ffmpeg/libavformat/matroska.c
@@ -29,7 +29,7 @@
*/
#include "avformat.h"
-/* For codec_get_bmp_id and codec_get_wav_id. */
+/* For codec_get_id(). */
#include "riff.h"
#include "intfloat_readwrite.h"
@@ -139,6 +139,7 @@
/* IDs in the cluster master */
#define MATROSKA_ID_CLUSTERTIMECODE 0xE7
#define MATROSKA_ID_BLOCKGROUP 0xA0
+#define MATROSKA_ID_SIMPLEBLOCK 0xA3
/* IDs in the blockgroup master */
#define MATROSKA_ID_BLOCK 0xA1
@@ -177,6 +178,7 @@ typedef enum {
MATROSKA_TRACK_DEFAULT = (1<<1),
MATROSKA_TRACK_LACING = (1<<2),
MATROSKA_TRACK_REAL_V = (1<<4),
+ MATROSKA_TRACK_REORDER = (1<<8),
MATROSKA_TRACK_SHIFT = (1<<16)
} MatroskaTrackFlags;
@@ -211,6 +213,7 @@ static CodecTags codec_tags[]={
{"V_REAL/RV20" , CODEC_ID_RV20},
{"V_REAL/RV30" , CODEC_ID_RV30},
{"V_REAL/RV40" , CODEC_ID_RV40},
+ {"V_THEORA" , CODEC_ID_THEORA},
/* TODO: Real/Quicktime */
// {"A_MS/ACM" , CODEC_ID_NONE},
@@ -252,7 +255,7 @@ typedef struct Track {
unsigned char *codec_priv;
int codec_priv_size;
- int64_t default_duration;
+ uint64_t default_duration;
MatroskaTrackFlags flags;
} MatroskaTrack;
@@ -288,6 +291,10 @@ typedef struct MatroskaSubtitleTrack {
//..
} MatroskaSubtitleTrack;
+#define MAX_TRACK_SIZE (FFMAX(FFMAX(sizeof(MatroskaVideoTrack), \
+ sizeof(MatroskaAudioTrack)), \
+ sizeof(MatroskaSubtitleTrack)))
+
typedef struct MatroskaLevel {
uint64_t start, length;
} MatroskaLevel;
@@ -331,6 +338,10 @@ typedef struct MatroskaDemuxContext {
/* The packet queue. */
AVPacket **packets;
int num_packets;
+ /* Second packet queue used to reorder pts of some video track. */
+ AVPacket **packets_reorder;
+ int num_packets_reorder;
+ uint64_t reorder_max_pts;
/* have we already parse metadata/cues/clusters? */
int metadata_parsed,
@@ -602,7 +613,6 @@ ebml_read_sint (MatroskaDemuxContext *matroska,
negative = 1;
*num &= ~0x80;
}
- *num = 0;
while (n++ < size)
*num = (*num << 8) | get_byte(pb);
@@ -1017,6 +1027,43 @@ matroska_queue_packet (MatroskaDemuxContext *matroska,
}
/*
+ * Put a packet into our internal reordering queue. Will be moved to the
+ * main packet queue when enough packets are available to reorder pts.
+ */
+
+static void
+matroska_queue_packet_reordered (MatroskaDemuxContext *matroska,
+ AVPacket *pkt,
+ int is_bframe)
+{
+ if (matroska->num_packets_reorder && !is_bframe
+ && pkt->pts > matroska->reorder_max_pts) {
+ /* reorder pts */
+ int i, j, k = 1;
+ for (j=matroska->num_packets_reorder-1; j && k; j--) {
+ k = 0;
+ for (i=0; i<j; i++) {
+ if (matroska->packets_reorder[i]->pts > matroska->packets_reorder[i+1]->pts) {
+ FFSWAP(uint64_t, matroska->packets_reorder[i]->pts, matroska->packets_reorder[i+1]->pts);
+ k = 1;
+ }
+ }
+ }
+ /* then really queue the packets */
+ for (i=0; i<matroska->num_packets_reorder; i++)
+ matroska_queue_packet (matroska, matroska->packets_reorder[i]);
+ matroska->num_packets_reorder = 0;
+ }
+ matroska->packets_reorder =
+ av_realloc(matroska->packets_reorder,
+ (matroska->num_packets_reorder + 1) * sizeof(AVPacket *));
+ matroska->packets_reorder[matroska->num_packets_reorder++] = pkt;
+ if (pkt->pts > matroska->reorder_max_pts)
+ matroska->reorder_max_pts = pkt->pts;
+}
+
+
+/*
* Autodetecting...
*/
@@ -1055,7 +1102,7 @@ matroska_probe (AVProbeData *p)
* we don't parse the whole header but simply check for the
* availability of that array of characters inside the header.
* Not fully fool-proof, but good enough. */
- for (n = 4 + size; n < 4 + size + total - sizeof(probe_data); n++)
+ for (n = 4 + size; n <= 4 + size + total - sizeof(probe_data); n++)
if (!memcmp (&p->buf[n], probe_data, sizeof(probe_data)))
return AVPROBE_SCORE_MAX;
@@ -1164,7 +1211,7 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
av_log(matroska->ctx, AV_LOG_DEBUG, "parsing track, adding stream..,\n");
/* Allocate a generic track. As soon as we know its type we'll realloc. */
- track = av_mallocz(sizeof(MatroskaTrack));
+ track = av_mallocz(MAX_TRACK_SIZE);
matroska->num_tracks++;
/* start with the master */
@@ -1203,30 +1250,19 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
/* track type (video, audio, combined, subtitle, etc.) */
case MATROSKA_ID_TRACKTYPE: {
uint64_t num;
- if (track->type != 0) {
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (track->type && track->type != num) {
av_log(matroska->ctx, AV_LOG_INFO,
"More than one tracktype in an entry - skip\n");
break;
}
- if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
- break;
track->type = num;
- /* ok, so we're actually going to reallocate this thing */
switch (track->type) {
case MATROSKA_TRACK_TYPE_VIDEO:
- track = (MatroskaTrack *)
- av_realloc(track, sizeof(MatroskaVideoTrack));
- break;
case MATROSKA_TRACK_TYPE_AUDIO:
- track = (MatroskaTrack *)
- av_realloc(track, sizeof(MatroskaAudioTrack));
- ((MatroskaAudioTrack *)track)->channels = 1;
- ((MatroskaAudioTrack *)track)->samplerate = 8000;
- break;
case MATROSKA_TRACK_TYPE_SUBTITLE:
- track = (MatroskaTrack *)
- av_realloc(track, sizeof(MatroskaSubtitleTrack));
break;
case MATROSKA_TRACK_TYPE_COMPLEX:
case MATROSKA_TRACK_TYPE_LOGO:
@@ -1245,6 +1281,8 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
/* tracktype specific stuff for video */
case MATROSKA_ID_TRACKVIDEO: {
MatroskaVideoTrack *videotrack;
+ if (!track->type)
+ track->type = MATROSKA_TRACK_TYPE_VIDEO;
if (track->type != MATROSKA_TRACK_TYPE_VIDEO) {
av_log(matroska->ctx, AV_LOG_INFO,
"video data in non-video track - ignoring\n");
@@ -1270,7 +1308,7 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
if ((res = ebml_read_uint (matroska, &id,
&num)) < 0)
break;
- track->default_duration = num;
+ track->default_duration = num/matroska->time_scale;
break;
}
@@ -1280,7 +1318,7 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
if ((res = ebml_read_float(matroska, &id,
&num)) < 0)
break;
- track->default_duration = 1000000000 * (1. / num);
+ track->default_duration = 1000000000/(matroska->time_scale*num);
break;
}
@@ -1412,6 +1450,8 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
/* tracktype specific stuff for audio */
case MATROSKA_ID_TRACKAUDIO: {
MatroskaAudioTrack *audiotrack;
+ if (!track->type)
+ track->type = MATROSKA_TRACK_TYPE_AUDIO;
if (track->type != MATROSKA_TRACK_TYPE_AUDIO) {
av_log(matroska->ctx, AV_LOG_INFO,
"audio data in non-audio track - ignoring\n");
@@ -1420,6 +1460,8 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
} else if ((res = ebml_read_master(matroska, &id)) < 0)
break;
audiotrack = (MatroskaAudioTrack *)track;
+ audiotrack->channels = 1;
+ audiotrack->samplerate = 8000;
while (res == 0) {
if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
@@ -1579,7 +1621,7 @@ matroska_add_stream (MatroskaDemuxContext *matroska)
uint64_t num;
if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
break;
- track->default_duration = num;
+ track->default_duration = num / matroska->time_scale;
break;
}
@@ -2076,9 +2118,9 @@ matroska_read_header (AVFormatContext *s,
return AVERROR_NOFMT;
}
av_free(doctype);
- if (version != 1) {
+ if (version > 2) {
av_log(matroska->ctx, AV_LOG_ERROR,
- "Matroska demuxer version 1 too old for file version %d\n",
+ "Matroska demuxer version 2 too old for file version %d\n",
version);
return AVERROR_NOFMT;
}
@@ -2226,7 +2268,7 @@ matroska_read_header (AVFormatContext *s,
p = (unsigned char *)track->codec_priv + 16;
((MatroskaVideoTrack *)track)->fourcc = (p[3] << 24) |
(p[2] << 16) | (p[1] << 8) | p[0];
- codec_id = codec_get_bmp_id(((MatroskaVideoTrack *)track)->fourcc);
+ codec_id = codec_get_id(codec_bmp_tags, ((MatroskaVideoTrack *)track)->fourcc);
}
@@ -2242,7 +2284,7 @@ matroska_read_header (AVFormatContext *s,
/* Offset of wFormatTag. Stored in LE. */
p = (unsigned char *)track->codec_priv;
tag = (p[1] << 8) | p[0];
- codec_id = codec_get_wav_id(tag);
+ codec_id = codec_get_id(codec_wav_tags, tag);
}
@@ -2264,6 +2306,7 @@ matroska_read_header (AVFormatContext *s,
} else {
extradata_size = 2;
}
+ track->default_duration = 1024*1000 / audiotrack->internal_samplerate;
}
else if (codec_id == CODEC_ID_TTA) {
@@ -2308,7 +2351,7 @@ matroska_read_header (AVFormatContext *s,
if (track->default_duration)
av_reduce(&st->codec->time_base.num, &st->codec->time_base.den,
- track->default_duration, 1000000000, 30000);
+ track->default_duration, 1000, 30000);
if(extradata){
st->codec->extradata = extradata;
@@ -2338,6 +2381,7 @@ matroska_read_header (AVFormatContext *s,
st->codec->height * videotrack->display_width,
st->codec-> width * videotrack->display_height,
255);
+ st->need_parsing = 2;
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *)track;
@@ -2372,7 +2416,190 @@ matroska_find_track_by_num (MatroskaDemuxContext *matroska,
static inline int
rv_offset(uint8_t *data, int slice, int slices)
{
- return LE_32(data+8*slice+4) + 8*slices;
+ return AV_RL32(data+8*slice+4) + 8*slices;
+}
+
+static int
+matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data, int size,
+ int64_t pos, uint64_t cluster_time, uint64_t duration,
+ int is_keyframe, int is_bframe)
+{
+ int res = 0;
+ int track;
+ AVPacket *pkt;
+ uint8_t *origdata = data;
+ int16_t block_time;
+ uint32_t *lace_size = NULL;
+ int n, flags, laces = 0;
+ uint64_t num;
+
+ /* first byte(s): tracknum */
+ if ((n = matroska_ebmlnum_uint(data, size, &num)) < 0) {
+ av_log(matroska->ctx, AV_LOG_ERROR, "EBML block data error\n");
+ av_free(origdata);
+ return res;
+ }
+ data += n;
+ size -= n;
+
+ /* fetch track from num */
+ track = matroska_find_track_by_num(matroska, num);
+ if (size <= 3 || track < 0 || track >= matroska->num_tracks) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Invalid stream %d or size %u\n", track, size);
+ av_free(origdata);
+ return res;
+ }
+ if(matroska->ctx->streams[ matroska->tracks[track]->stream_index ]->discard >= AVDISCARD_ALL){
+ av_free(origdata);
+ return res;
+ }
+ if (duration == AV_NOPTS_VALUE)
+ duration = matroska->tracks[track]->default_duration;
+
+ /* block_time (relative to cluster time) */
+ block_time = (data[0] << 8) | data[1];
+ data += 2;
+ size -= 2;
+ flags = *data;
+ data += 1;
+ size -= 1;
+ if (is_keyframe == -1)
+ is_keyframe = flags & 1 ? PKT_FLAG_KEY : 0;
+ switch ((flags & 0x06) >> 1) {
+ case 0x0: /* no lacing */
+ laces = 1;
+ lace_size = av_mallocz(sizeof(int));
+ lace_size[0] = size;
+ break;
+
+ case 0x1: /* xiph lacing */
+ case 0x2: /* fixed-size lacing */
+ case 0x3: /* EBML lacing */
+ if (size == 0) {
+ res = -1;
+ break;
+ }
+ laces = (*data) + 1;
+ data += 1;
+ size -= 1;
+ lace_size = av_mallocz(laces * sizeof(int));
+
+ switch ((flags & 0x06) >> 1) {
+ case 0x1: /* xiph lacing */ {
+ uint8_t temp;
+ uint32_t total = 0;
+ for (n = 0; res == 0 && n < laces - 1; n++) {
+ while (1) {
+ if (size == 0) {
+ res = -1;
+ break;
+ }
+ temp = *data;
+ lace_size[n] += temp;
+ data += 1;
+ size -= 1;
+ if (temp != 0xff)
+ break;
+ }
+ total += lace_size[n];
+ }
+ lace_size[n] = size - total;
+ break;
+ }
+
+ case 0x2: /* fixed-size lacing */
+ for (n = 0; n < laces; n++)
+ lace_size[n] = size / laces;
+ break;
+
+ case 0x3: /* EBML lacing */ {
+ uint32_t total;
+ n = matroska_ebmlnum_uint(data, size, &num);
+ if (n < 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "EBML block data error\n");
+ break;
+ }
+ data += n;
+ size -= n;
+ total = lace_size[0] = num;
+ for (n = 1; res == 0 && n < laces - 1; n++) {
+ int64_t snum;
+ int r;
+ r = matroska_ebmlnum_sint (data, size, &snum);
+ if (r < 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "EBML block data error\n");
+ break;
+ }
+ data += r;
+ size -= r;
+ lace_size[n] = lace_size[n - 1] + snum;
+ total += lace_size[n];
+ }
+ lace_size[n] = size - total;
+ break;
+ }
+ }
+ break;
+ }
+
+ if (res == 0) {
+ int real_v = matroska->tracks[track]->flags & MATROSKA_TRACK_REAL_V;
+ uint64_t timecode = AV_NOPTS_VALUE;
+
+ if (cluster_time != (uint64_t)-1 && cluster_time + block_time >= 0)
+ timecode = cluster_time + block_time;
+
+ for (n = 0; n < laces; n++) {
+ int slice, slices = 1;
+
+ if (real_v) {
+ slices = *data++ + 1;
+ lace_size[n]--;
+ }
+
+ for (slice=0; slice<slices; slice++) {
+ int slice_size, slice_offset = 0;
+ if (real_v)
+ slice_offset = rv_offset(data, slice, slices);
+ if (slice+1 == slices)
+ slice_size = lace_size[n] - slice_offset;
+ else
+ slice_size = rv_offset(data, slice+1, slices) - slice_offset;
+ pkt = av_mallocz(sizeof(AVPacket));
+ /* XXX: prevent data copy... */
+ if (av_new_packet(pkt, slice_size) < 0) {
+ res = AVERROR_NOMEM;
+ n = laces-1;
+ break;
+ }
+ memcpy (pkt->data, data+slice_offset, slice_size);
+
+ if (n == 0)
+ pkt->flags = is_keyframe;
+ pkt->stream_index = matroska->tracks[track]->stream_index;
+
+ pkt->pts = timecode;
+ pkt->pos = pos;
+ pkt->duration = duration;
+
+ if (matroska->tracks[track]->flags & MATROSKA_TRACK_REORDER)
+ matroska_queue_packet_reordered(matroska, pkt, is_bframe);
+ else
+ matroska_queue_packet(matroska, pkt);
+
+ if (timecode != AV_NOPTS_VALUE)
+ timecode = duration ? timecode + duration : AV_NOPTS_VALUE;
+ }
+ data += lace_size[n];
+ }
+ }
+
+ av_free(lace_size);
+ av_free(origdata);
+ return res;
}
static int
@@ -2381,10 +2608,12 @@ matroska_parse_blockgroup (MatroskaDemuxContext *matroska,
{
int res = 0;
uint32_t id;
- AVPacket *pkt = NULL;
+ int is_bframe = 0;
int is_keyframe = PKT_FLAG_KEY, last_num_packets = matroska->num_packets;
uint64_t duration = AV_NOPTS_VALUE;
- int track = -1;
+ uint8_t *data;
+ int size = 0;
+ int64_t pos = 0;
av_log(matroska->ctx, AV_LOG_DEBUG, "parsing blockgroup...\n");
@@ -2402,194 +2631,31 @@ matroska_parse_blockgroup (MatroskaDemuxContext *matroska,
* of the harder things, so this code is a bit complicated.
* See http://www.matroska.org/ for documentation. */
case MATROSKA_ID_BLOCK: {
- uint8_t *data, *origdata;
- int size;
- int16_t block_time;
- uint32_t *lace_size = NULL;
- int n, flags, laces = 0;
- uint64_t num;
- int64_t pos= url_ftell(&matroska->ctx->pb);
-
- if ((res = ebml_read_binary(matroska, &id, &data, &size)) < 0)
- break;
- origdata = data;
-
- /* first byte(s): tracknum */
- if ((n = matroska_ebmlnum_uint(data, size, &num)) < 0) {
- av_log(matroska->ctx, AV_LOG_ERROR,
- "EBML block data error\n");
- av_free(origdata);
- break;
- }
- data += n;
- size -= n;
-
- /* fetch track from num */
- track = matroska_find_track_by_num(matroska, num);
- if (size <= 3 || track < 0 || track >= matroska->num_tracks) {
- av_log(matroska->ctx, AV_LOG_INFO,
- "Invalid stream %d or size %u\n", track, size);
- av_free(origdata);
- break;
- }
- if(matroska->ctx->streams[ matroska->tracks[track]->stream_index ]->discard >= AVDISCARD_ALL){
- av_free(origdata);
- break;
- }
-
- /* block_time (relative to cluster time) */
- block_time = (data[0] << 8) | data[1];
- data += 2;
- size -= 2;
- flags = *data;
- data += 1;
- size -= 1;
- switch ((flags & 0x06) >> 1) {
- case 0x0: /* no lacing */
- laces = 1;
- lace_size = av_mallocz(sizeof(int));
- lace_size[0] = size;
- break;
-
- case 0x1: /* xiph lacing */
- case 0x2: /* fixed-size lacing */
- case 0x3: /* EBML lacing */
- if (size == 0) {
- res = -1;
- break;
- }
- laces = (*data) + 1;
- data += 1;
- size -= 1;
- lace_size = av_mallocz(laces * sizeof(int));
-
- switch ((flags & 0x06) >> 1) {
- case 0x1: /* xiph lacing */ {
- uint8_t temp;
- uint32_t total = 0;
- for (n = 0; res == 0 && n < laces - 1; n++) {
- while (1) {
- if (size == 0) {
- res = -1;
- break;
- }
- temp = *data;
- lace_size[n] += temp;
- data += 1;
- size -= 1;
- if (temp != 0xff)
- break;
- }
- total += lace_size[n];
- }
- lace_size[n] = size - total;
- break;
- }
-
- case 0x2: /* fixed-size lacing */
- for (n = 0; n < laces; n++)
- lace_size[n] = size / laces;
- break;
-
- case 0x3: /* EBML lacing */ {
- uint32_t total;
- n = matroska_ebmlnum_uint(data, size, &num);
- if (n < 0) {
- av_log(matroska->ctx, AV_LOG_INFO,
- "EBML block data error\n");
- break;
- }
- data += n;
- size -= n;
- total = lace_size[0] = num;
- for (n = 1; res == 0 && n < laces - 1; n++) {
- int64_t snum;
- int r;
- r = matroska_ebmlnum_sint (data, size,
- &snum);
- if (r < 0) {
- av_log(matroska->ctx, AV_LOG_INFO,
- "EBML block data error\n");
- break;
- }
- data += r;
- size -= r;
- lace_size[n] = lace_size[n - 1] + snum;
- total += lace_size[n];
- }
- lace_size[n] = size - total;
- break;
- }
- }
- break;
- }
-
- if (res == 0) {
- int real_v = matroska->tracks[track]->flags & MATROSKA_TRACK_REAL_V;
- for (n = 0; n < laces; n++) {
- uint64_t timecode = AV_NOPTS_VALUE;
- int slice, slices = 1;
-
- if (real_v) {
- slices = *data++ + 1;
- lace_size[n]--;
- }
- if (cluster_time != (uint64_t)-1 && n == 0) {
- if (cluster_time + block_time >= 0)
- timecode = (cluster_time + block_time) * matroska->time_scale;
- }
- /* FIXME: duration */
-
- for (slice=0; slice<slices; slice++) {
- int slice_size, slice_offset = 0;
- if (real_v)
- slice_offset = rv_offset(data, slice, slices);
- if (slice+1 == slices)
- slice_size = lace_size[n] - slice_offset;
- else
- slice_size = rv_offset(data, slice+1, slices) - slice_offset;
- pkt = av_mallocz(sizeof(AVPacket));
- /* XXX: prevent data copy... */
- if (av_new_packet(pkt, slice_size) < 0) {
- res = AVERROR_NOMEM;
- n = laces-1;
- break;
- }
- memcpy (pkt->data, data+slice_offset, slice_size);
-
- if (n == 0)
- pkt->flags = is_keyframe;
- pkt->stream_index =
- matroska->tracks[track]->stream_index;
-
- pkt->pts = timecode;
- pkt->pos = pos;
-
- matroska_queue_packet(matroska, pkt);
- }
- data += lace_size[n];
- }
- }
-
- av_free(lace_size);
- av_free(origdata);
+ pos = url_ftell(&matroska->ctx->pb);
+ res = ebml_read_binary(matroska, &id, &data, &size);
break;
}
case MATROSKA_ID_BLOCKDURATION: {
if ((res = ebml_read_uint(matroska, &id, &duration)) < 0)
break;
+ duration /= matroska->time_scale;
break;
}
- case MATROSKA_ID_BLOCKREFERENCE:
+ case MATROSKA_ID_BLOCKREFERENCE: {
+ int64_t num;
/* We've found a reference, so not even the first frame in
* the lace is a key frame. */
is_keyframe = 0;
if (last_num_packets != matroska->num_packets)
matroska->packets[last_num_packets]->flags = 0;
- res = ebml_read_skip(matroska);
+ if ((res = ebml_read_sint(matroska, &id, &num)) < 0)
+ break;
+ if (num > 0)
+ is_bframe = 1;
break;
+ }
default:
av_log(matroska->ctx, AV_LOG_INFO,
@@ -2607,13 +2673,12 @@ matroska_parse_blockgroup (MatroskaDemuxContext *matroska,
}
}
- if (pkt)
- {
- if (duration != AV_NOPTS_VALUE)
- pkt->duration = duration;
- else if (track >= 0 && track < matroska->num_tracks)
- pkt->duration = matroska->tracks[track]->default_duration / matroska->time_scale;
- }
+ if (res)
+ return res;
+
+ if (size > 0)
+ res = matroska_parse_block(matroska, data, size, pos, cluster_time,
+ duration, is_keyframe, is_bframe);
return res;
}
@@ -2624,6 +2689,9 @@ matroska_parse_cluster (MatroskaDemuxContext *matroska)
int res = 0;
uint32_t id;
uint64_t cluster_time = 0;
+ uint8_t *data;
+ int64_t pos;
+ int size;
av_log(matroska->ctx, AV_LOG_DEBUG,
"parsing cluster at %"PRId64"\n", url_ftell(&matroska->ctx->pb));
@@ -2654,6 +2722,15 @@ matroska_parse_cluster (MatroskaDemuxContext *matroska)
res = matroska_parse_blockgroup(matroska, cluster_time);
break;
+ case MATROSKA_ID_SIMPLEBLOCK:
+ pos = url_ftell(&matroska->ctx->pb);
+ res = ebml_read_binary(matroska, &id, &data, &size);
+ if (res == 0)
+ res = matroska_parse_block(matroska, data, size, pos,
+ cluster_time, AV_NOPTS_VALUE,
+ -1, 0);
+ break;
+
default:
av_log(matroska->ctx, AV_LOG_INFO,
"Unknown entry 0x%x in cluster data\n", id);
@@ -2681,47 +2758,46 @@ matroska_read_packet (AVFormatContext *s,
int res = 0;
uint32_t id;
- /* Do we still have a packet queued? */
- if (matroska_deliver_packet(matroska, pkt) == 0)
- return 0;
+ /* Read stream until we have a packet queued. */
+ while (matroska_deliver_packet(matroska, pkt)) {
- /* Have we already reached the end? */
- if (matroska->done)
- return AVERROR_IO;
+ /* Have we already reached the end? */
+ if (matroska->done)
+ return AVERROR_IO;
- while (res == 0) {
- if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
- res = AVERROR_IO;
- break;
- } else if (matroska->level_up) {
- matroska->level_up--;
- break;
- }
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ return AVERROR_IO;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
- switch (id) {
- case MATROSKA_ID_CLUSTER:
- if ((res = ebml_read_master(matroska, &id)) < 0)
+ switch (id) {
+ case MATROSKA_ID_CLUSTER:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ if ((res = matroska_parse_cluster(matroska)) == 0)
+ res = 1; /* Parsed one cluster, let's get out. */
break;
- if ((res = matroska_parse_cluster(matroska)) == 0)
- res = 1; /* Parsed one cluster, let's get out. */
- break;
- default:
- case EBML_ID_VOID:
- res = ebml_read_skip(matroska);
+ default:
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
break;
+ }
}
- if (matroska->level_up) {
- matroska->level_up--;
- break;
- }
+ if (res == -1)
+ matroska->done = 1;
}
- if (res == -1)
- matroska->done = 1;
-
- return matroska_deliver_packet(matroska, pkt);
+ return 0;
}
static int
@@ -2741,6 +2817,13 @@ matroska_read_close (AVFormatContext *s)
}
av_free(matroska->packets);
}
+ if (matroska->packets_reorder) {
+ for (n = 0; n < matroska->num_packets_reorder; n++) {
+ av_free_packet(matroska->packets_reorder[n]);
+ av_free(matroska->packets_reorder[n]);
+ }
+ av_free(matroska->packets_reorder);
+ }
for (n = 0; n < matroska->num_tracks; n++) {
MatroskaTrack *track = matroska->tracks[n];
diff --git a/contrib/ffmpeg/libavformat/mm.c b/contrib/ffmpeg/libavformat/mm.c
index a3c637fb2..443b70929 100644
--- a/contrib/ffmpeg/libavformat/mm.c
+++ b/contrib/ffmpeg/libavformat/mm.c
@@ -61,9 +61,9 @@ static int mm_probe(AVProbeData *p)
/* the first chunk is always the header */
if (p->buf_size < MM_PREAMBLE_SIZE)
return 0;
- if (LE_16(&p->buf[0]) != MM_TYPE_HEADER)
+ if (AV_RL16(&p->buf[0]) != MM_TYPE_HEADER)
return 0;
- if (LE_32(&p->buf[2]) != MM_HEADER_LEN_V && LE_32(&p->buf[2]) != MM_HEADER_LEN_AV)
+ if (AV_RL32(&p->buf[2]) != MM_HEADER_LEN_V && AV_RL32(&p->buf[2]) != MM_HEADER_LEN_AV)
return 0;
/* only return half certainty since this check is a bit sketchy */
@@ -141,8 +141,8 @@ static int mm_read_packet(AVFormatContext *s,
return AVERROR_IO;
}
- type = LE_16(&preamble[0]);
- length = LE_16(&preamble[2]);
+ type = AV_RL16(&preamble[0]);
+ length = AV_RL16(&preamble[2]);
switch(type) {
case MM_TYPE_PALETTE :
diff --git a/contrib/ffmpeg/libavformat/mov.c b/contrib/ffmpeg/libavformat/mov.c
index 3ceac64b1..e9b577576 100644
--- a/contrib/ffmpeg/libavformat/mov.c
+++ b/contrib/ffmpeg/libavformat/mov.c
@@ -66,95 +66,6 @@
#undef NDEBUG
#include <assert.h>
-static const CodecTag mov_video_tags[] = {
-/* { CODEC_ID_, MKTAG('c', 'v', 'i', 'd') }, *//* Cinepak */
-/* { CODEC_ID_H263, MKTAG('r', 'a', 'w', ' ') }, *//* Uncompressed RGB */
-/* { CODEC_ID_H263, MKTAG('Y', 'u', 'v', '2') }, *//* Uncompressed YUV422 */
-/* { CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'U', 'I') }, *//* YUV with alpha-channel (AVID Uncompressed) */
-/* Graphics */
-/* Animation */
-/* Apple video */
-/* Kodak Photo CD */
- { CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, /* PhotoJPEG */
- { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'e', 'g') }, /* MPEG */
- { CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, /* Motion-JPEG (format A) */
- { CODEC_ID_MJPEGB, MKTAG('m', 'j', 'p', 'b') }, /* Motion-JPEG (format B) */
- { CODEC_ID_MJPEG, MKTAG('A', 'V', 'D', 'J') }, /* MJPEG with alpha-channel (AVID JFIF meridien compressed) */
-/* { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, *//* MJPEG with alpha-channel (AVID ABVB/Truevision NuVista) */
- { CODEC_ID_GIF, MKTAG('g', 'i', 'f', ' ') }, /* embedded gif files as frames (usually one "click to play movie" frame) */
-/* Sorenson video */
- { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') }, /* Sorenson Video v1 */
- { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, /* Sorenson Video v1 */
- { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', 'i') }, /* Sorenson Video v1 (from QT specs)*/
- { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') }, /* Sorenson Video v3 */
- { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
- { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, /* OpenDiVX *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
- { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
- { CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, /* experimental: 3IVX files before ivx D4 4.5.1 */
-/* { CODEC_ID_, MKTAG('I', 'V', '5', '0') }, *//* Indeo 5.0 */
- { CODEC_ID_H263, MKTAG('h', '2', '6', '3') }, /* H263 */
- { CODEC_ID_H263, MKTAG('s', '2', '6', '3') }, /* H263 ?? works */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
- { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, /* On2 VP3 */
- { CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, /* Apple Video (RPZA) */
- { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, /* Cinepak */
- { CODEC_ID_8BPS, MKTAG('8', 'B', 'P', 'S') }, /* Planar RGB (8BPS) */
- { CODEC_ID_SMC, MKTAG('s', 'm', 'c', ' ') }, /* Apple Graphics (SMC) */
- { CODEC_ID_QTRLE, MKTAG('r', 'l', 'e', ' ') }, /* Apple Animation (RLE) */
- { CODEC_ID_QDRAW, MKTAG('q', 'd', 'r', 'w') }, /* QuickDraw */
- { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, /* AVC-1/H.264 */
- { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '2') }, /* MPEG2 produced by Sony HD camera */
- { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '3') }, /* HDV produced by FCP */
- { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'n') }, /* MPEG2 IMX NTSC 525/60 50mb/s produced by FCP */
- { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'p') }, /* MPEG2 IMX PAL 625/50 50mb/s produced by FCP */
- { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'n') }, /* MPEG2 IMX NTSC 525/60 30mb/s produced by FCP */
- { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'p') }, /* MPEG2 IMX PAL 625/50 30mb/s produced by FCP */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL produced by FCP */
- //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '5') }, /* DVCPRO HD 50i produced by FCP */
- //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '6') }, /* DVCPRO HD 60i produced by FCP */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL produced by FCP */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC produced by FCP */
- { CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', 'v') }, /* AVID DV */
- //{ CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') }, /* JPEG 2000 produced by FCP */
- { CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, /* Truevision Targa */
- { CODEC_ID_TIFF, MKTAG('t', 'i', 'f', 'f') }, /* TIFF embedded in MOV */
- { CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, /* UNCOMPRESSED 8BIT 4:2:2 */
- { CODEC_ID_NONE, 0 },
-};
-
-static const CodecTag mov_audio_tags[] = {
- { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
- { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
- { CODEC_ID_PCM_S16BE, MKTAG('N', 'O', 'N', 'E') }, /* uncompressed */
- { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') }, /* 16 bits */
- { CODEC_ID_PCM_U8, MKTAG('r', 'a', 'w', ' ') }, /* 8 bits unsigned */
- { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') }, /* */
- { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') }, /* */
- { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') }, /* */
- { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') }, /* IMA-4 ADPCM */
- { CODEC_ID_ADPCM_MS, MKTAG('m', 's', 0x00, 0x02) }, /* MS ADPCM */
- { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') }, /* Macintosh Audio Compression and Expansion 3:1 */
- { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') }, /* Macintosh Audio Compression and Expansion 6:1 */
-
- { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') }, /* MPEG layer 3 */ /* sample files at http://www.3ivx.com/showcase.html use this tag */
- { CODEC_ID_MP2, 0x6D730055 }, /* MPEG layer 3 */
- { CODEC_ID_MP2, 0x5500736D }, /* MPEG layer 3 *//* XXX: check endianness */
-/* { CODEC_ID_OGG_VORBIS, MKTAG('O', 'g', 'g', 'S') }, *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
-/* MP4 tags */
- { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, /* MPEG-4 AAC */
- /* The standard for mpeg4 audio is still not normalised AFAIK anyway */
- { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
- { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
- { CODEC_ID_AC3, MKTAG('m', 's', 0x20, 0x00) }, /* Dolby AC-3 */
- { CODEC_ID_ALAC,MKTAG('a', 'l', 'a', 'c') }, /* Apple Lossless */
- { CODEC_ID_QDM2,MKTAG('Q', 'D', 'M', '2') }, /* QDM2 */
- { CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
- { CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
- { CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
- { CODEC_ID_NONE, 0 },
-};
-
/* the QuickTime file format is quite convoluted...
* it has lots of index tables, each indexing something in another one...
* Here we just use what is needed to read the chunks
@@ -237,27 +148,28 @@ struct MOVParseTableEntry;
typedef struct MOVStreamContext {
int ffindex; /* the ffmpeg stream id */
long next_chunk;
- long chunk_count;
+ unsigned int chunk_count;
int64_t *chunk_offsets;
- int stts_count;
+ unsigned int stts_count;
Time2Sample *stts_data;
- int ctts_count;
+ unsigned int ctts_count;
Time2Sample *ctts_data;
- int edit_count; /* number of 'edit' (elst atom) */
- long sample_to_chunk_sz;
+ unsigned int edit_count; /* number of 'edit' (elst atom) */
+ unsigned int sample_to_chunk_sz;
MOV_sample_to_chunk_tbl *sample_to_chunk;
int sample_to_ctime_index;
int sample_to_ctime_sample;
- long sample_size;
- long sample_count;
+ unsigned int sample_size;
+ unsigned int sample_count;
long *sample_sizes;
- long keyframe_count;
+ unsigned int keyframe_count;
long *keyframes;
int time_scale;
int time_rate;
long current_sample;
MOV_esds_t esds;
- AVRational sample_size_v1;
+ unsigned int bytes_per_frame;
+ unsigned int samples_per_frame;
int dv_audio_container;
} MOVStreamContext;
@@ -267,12 +179,8 @@ typedef struct MOVContext {
int64_t duration; /* duration of the longest track */
int found_moov; /* when both 'moov' and 'mdat' sections has been found */
int found_mdat; /* we suppose we have enough data to read the file */
- int64_t mdat_size;
int64_t mdat_offset;
int total_streams;
- /* some streams listed here aren't presented to the ffmpeg API, since they aren't either video nor audio
- * but we need the info to be able to skip data from those streams in the 'mdat' section
- */
MOVStreamContext *streams[MAX_STREAMS];
int ctab_size;
@@ -325,7 +233,7 @@ static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
}
total_size += 8;
a.offset += 8;
- dprintf("type: %08x %.4s sz: %"PRIx64" %"PRIx64" %"PRIx64"\n", a.type, (char*)&a.type, a.size, atom.size, total_size);
+ dprintf(c->fc, "type: %08x %.4s sz: %"PRIx64" %"PRIx64" %"PRIx64"\n", a.type, (char*)&a.type, a.size, atom.size, total_size);
if (a.size == 1) { /* 64 bit extended size */
a.size = get_be64(pb) - 8;
a.offset += 8;
@@ -336,15 +244,14 @@ static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
if (a.size <= 8)
break;
}
+ a.size -= 8;
+ if(a.size < 0 || a.size > atom.size - total_size)
+ break;
+
for (i = 0; c->parse_table[i].type != 0L
&& c->parse_table[i].type != a.type; i++)
/* empty */;
- a.size -= 8;
-
- if(a.size < 0)
- break;
-
if (c->parse_table[i].type == 0) { /* skip leaf atoms data */
url_fskip(pb, a.size);
} else {
@@ -404,8 +311,8 @@ static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
ctype = get_le32(pb);
type = get_le32(pb); /* component subtype */
- dprintf("ctype= %c%c%c%c (0x%08lx)\n", *((char *)&ctype), ((char *)&ctype)[1], ((char *)&ctype)[2], ((char *)&ctype)[3], (long) ctype);
- dprintf("stype= %c%c%c%c\n", *((char *)&type), ((char *)&type)[1], ((char *)&type)[2], ((char *)&type)[3]);
+ dprintf(c->fc, "ctype= %c%c%c%c (0x%08lx)\n", *((char *)&ctype), ((char *)&ctype)[1], ((char *)&ctype)[2], ((char *)&ctype)[3], (long) ctype);
+ dprintf(c->fc, "stype= %c%c%c%c\n", *((char *)&type), ((char *)&type)[1], ((char *)&type)[2], ((char *)&type)[3]);
if(!ctype)
c->isom = 1;
if(type == MKTAG('v', 'i', 'd', 'e'))
@@ -442,12 +349,12 @@ static int mov_mp4_read_descr_len(ByteIOContext *pb)
return len;
}
-static int mov_mp4_read_descr(ByteIOContext *pb, int *tag)
+static int mov_mp4_read_descr(MOVContext *c, ByteIOContext *pb, int *tag)
{
int len;
*tag = get_byte(pb);
len = mov_mp4_read_descr_len(pb);
- dprintf("MPEG4 description: tag=0x%02x len=%d\n", *tag, len);
+ dprintf(c->fc, "MPEG4 description: tag=0x%02x len=%d\n", *tag, len);
return len;
}
@@ -459,14 +366,14 @@ static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
/* Well, broken but suffisant for some MP4 streams */
get_be32(pb); /* version + flags */
- len = mov_mp4_read_descr(pb, &tag);
+ len = mov_mp4_read_descr(c, pb, &tag);
if (tag == MP4ESDescrTag) {
get_be16(pb); /* ID */
get_byte(pb); /* priority */
} else
get_be16(pb); /* ID */
- len = mov_mp4_read_descr(pb, &tag);
+ len = mov_mp4_read_descr(c, pb, &tag);
if (tag == MP4DecConfigDescrTag) {
sc->esds.object_type_id = get_byte(pb);
sc->esds.stream_type = get_byte(pb);
@@ -474,11 +381,11 @@ static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
sc->esds.max_bitrate = get_be32(pb);
sc->esds.avg_bitrate = get_be32(pb);
- st->codec->codec_id= codec_get_id(ff_mov_obj_type, sc->esds.object_type_id);
- dprintf("esds object type id %d\n", sc->esds.object_type_id);
- len = mov_mp4_read_descr(pb, &tag);
+ st->codec->codec_id= codec_get_id(ff_mp4_obj_type, sc->esds.object_type_id);
+ dprintf(c->fc, "esds object type id %d\n", sc->esds.object_type_id);
+ len = mov_mp4_read_descr(c, pb, &tag);
if (tag == MP4DecSpecificDescrTag) {
- dprintf("Specific MPEG4 header len=%d\n", len);
+ dprintf(c->fc, "Specific MPEG4 header len=%d\n", len);
st->codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
if (st->codec->extradata) {
get_buffer(pb, st->codec->extradata, len);
@@ -504,7 +411,6 @@ static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
c->mdat_count++;
c->found_mdat=1;
c->mdat_offset = atom.offset;
- c->mdat_size = atom.size;
if(c->found_moov)
return 1; /* found both, just go */
url_fskip(pb, atom.size);
@@ -619,9 +525,9 @@ static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (st->codec->extradata) {
- strcpy(st->codec->extradata, "SVQ3"); // fake
+ memcpy(st->codec->extradata, "SVQ3", 4); // fake
get_buffer(pb, st->codec->extradata + 0x5a, atom.size);
- dprintf("Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a);
+ dprintf(c->fc, "Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a);
} else
url_fskip(pb, atom.size);
@@ -648,21 +554,18 @@ static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
return 0;
}
-static int mov_read_alac(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+/* FIXME modify qdm2/svq3/h264 decoders to take full atom as extradata */
+static int mov_read_extradata(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
-
- // currently ALAC decoder expect full atom header - so let's fake it
- // this should be fixed and just ALAC header should be passed
-
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
av_free(st->codec->extradata);
- st->codec->extradata_size = 36;
+ st->codec->extradata_size = atom.size + 8;
st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
-
if (st->codec->extradata) {
- strcpy(st->codec->extradata + 4, "alac"); // fake
- get_buffer(pb, st->codec->extradata + 8, 36 - 8);
- dprintf("Reading alac %d %s\n", st->codec->extradata_size, st->codec->extradata);
+ AV_WL32(st->codec->extradata + 4, atom.type);
+ get_buffer(pb, st->codec->extradata + 8, atom.size);
} else
url_fskip(pb, atom.size);
return 0;
@@ -692,27 +595,6 @@ static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
return 0;
}
-static int mov_read_jp2h(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
-{
- AVStream *st = c->fc->streams[c->fc->nb_streams-1];
-
- if((uint64_t)atom.size > (1<<30))
- return -1;
-
- av_free(st->codec->extradata);
-
- st->codec->extradata_size = atom.size + 8;
- st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
-
- /* pass all jp2h atom to codec */
- if (st->codec->extradata) {
- strcpy(st->codec->extradata + 4, "jp2h");
- get_buffer(pb, st->codec->extradata + 8, atom.size);
- } else
- url_fskip(pb, atom.size);
- return 0;
-}
-
static int mov_read_avcC(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
@@ -808,19 +690,19 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
}
st->codec->codec_tag = format;
- id = codec_get_id(mov_audio_tags, format);
+ id = codec_get_id(codec_movaudio_tags, format);
if (st->codec->codec_type != CODEC_TYPE_VIDEO && id > 0) {
st->codec->codec_type = CODEC_TYPE_AUDIO;
} else if (st->codec->codec_type != CODEC_TYPE_AUDIO && /* do not overwrite codec type */
format && format != MKTAG('m', 'p', '4', 's')) { /* skip old asf mpeg4 tag */
- id = codec_get_id(mov_video_tags, format);
+ id = codec_get_id(codec_movvideo_tags, format);
if (id <= 0)
id = codec_get_id(codec_bmp_tags, format);
if (id > 0)
st->codec->codec_type = CODEC_TYPE_VIDEO;
}
- dprintf("size=%d 4CC= %c%c%c%c codec_type=%d\n",
+ dprintf(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n",
size,
(format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff, (format >> 24) & 0xff,
st->codec->codec_type);
@@ -930,7 +812,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_be32(pb); /* vendor */
st->codec->channels = get_be16(pb); /* channel count */
- dprintf("audio channels %d\n", st->codec->channels);
+ dprintf(c->fc, "audio channels %d\n", st->codec->channels);
st->codec->bits_per_sample = get_be16(pb); /* sample size */
/* do we need to force to 16 for AMR ? */
@@ -958,12 +840,12 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
}
//Read QT version 1 fields. In version 0 theese dont exist
- dprintf("version =%d, isom =%d\n",version,c->isom);
+ dprintf(c->fc, "version =%d, isom =%d\n",version,c->isom);
if(!c->isom) {
if(version==1) {
- sc->sample_size_v1.den = get_be32(pb); /* samples per packet */
+ sc->samples_per_frame = get_be32(pb);
get_be32(pb); /* bytes per packet */
- sc->sample_size_v1.num = get_be32(pb); /* bytes per frame */
+ sc->bytes_per_frame = get_be32(pb);
get_be32(pb); /* bytes per sample */
} else if(version==2) {
get_be32(pb); /* sizeof struct only */
@@ -1012,7 +894,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
st->codec->width= 0; /* let decoder init width/height */
st->codec->height= 0;
break;
-#ifdef CONFIG_FAAD
+#ifdef CONFIG_LIBFAAD
case CODEC_ID_AAC:
#endif
#ifdef CONFIG_VORBIS_DECODER
@@ -1043,6 +925,7 @@ static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
st->codec->channels= 1; /* really needed */
break;
case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
st->codec->codec_type = CODEC_TYPE_AUDIO; /* force type after stsd for m1a hdlr */
st->need_parsing = 1;
break;
@@ -1098,7 +981,7 @@ static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
sc->keyframe_count = entries;
#ifdef DEBUG
- av_log(NULL, AV_LOG_DEBUG, "keyframe_count = %ld\n", sc->keyframe_count);
+ av_log(NULL, AV_LOG_DEBUG, "keyframe_count = %d\n", sc->keyframe_count);
#endif
sc->keyframes = av_malloc(entries * sizeof(long));
if (!sc->keyframes)
@@ -1133,7 +1016,7 @@ static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
return 0;
#ifdef DEBUG
- av_log(NULL, AV_LOG_DEBUG, "sample_size = %ld sample_count = %ld\n", sc->sample_size, sc->sample_count);
+ av_log(NULL, AV_LOG_DEBUG, "sample_size = %d sample_count = %d\n", sc->sample_size, sc->sample_count);
#endif
sc->sample_sizes = av_malloc(entries * sizeof(long));
if (!sc->sample_sizes)
@@ -1181,7 +1064,7 @@ av_log(NULL, AV_LOG_DEBUG, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1,
sc->time_rate= ff_gcd(sc->time_rate, sample_duration);
- dprintf("sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
+ dprintf(c->fc, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
duration+=(int64_t)sample_duration*sample_count;
total_sample_count+=sample_count;
@@ -1208,7 +1091,7 @@ static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
sc->ctts_count = entries;
sc->ctts_data = av_malloc(entries * sizeof(Time2Sample));
- dprintf("track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
+ dprintf(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
for(i=0; i<entries; i++) {
int count =get_be32(pb);
@@ -1380,7 +1263,7 @@ static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
get_be32(pb); /* Media time */
get_be32(pb); /* Media rate */
}
- dprintf("track[%i].edit_count = %i\n", c->fc->nb_streams-1, c->streams[c->fc->nb_streams-1]->edit_count);
+ dprintf(c->fc, "track[%i].edit_count = %i\n", c->fc->nb_streams-1, c->streams[c->fc->nb_streams-1]->edit_count);
return 0;
}
@@ -1391,9 +1274,10 @@ static const MOVParseTableEntry mov_default_parse_table[] = {
{ MKTAG( 'e', 'd', 't', 's' ), mov_read_default },
{ MKTAG( 'e', 'l', 's', 't' ), mov_read_elst },
{ MKTAG( 'e', 'n', 'd', 'a' ), mov_read_enda },
+{ MKTAG( 'f', 'i', 'e', 'l' ), mov_read_extradata },
{ MKTAG( 'f', 't', 'y', 'p' ), mov_read_ftyp },
{ MKTAG( 'h', 'd', 'l', 'r' ), mov_read_hdlr },
-{ MKTAG( 'j', 'p', '2', 'h' ), mov_read_jp2h },
+{ MKTAG( 'j', 'p', '2', 'h' ), mov_read_extradata },
{ MKTAG( 'm', 'd', 'a', 't' ), mov_read_mdat },
{ MKTAG( 'm', 'd', 'h', 'd' ), mov_read_mdhd },
{ MKTAG( 'm', 'd', 'i', 'a' ), mov_read_default },
@@ -1401,7 +1285,7 @@ static const MOVParseTableEntry mov_default_parse_table[] = {
{ MKTAG( 'm', 'o', 'o', 'v' ), mov_read_moov },
{ MKTAG( 'm', 'v', 'h', 'd' ), mov_read_mvhd },
{ MKTAG( 'S', 'M', 'I', ' ' ), mov_read_smi }, /* Sorenson extension ??? */
-{ MKTAG( 'a', 'l', 'a', 'c' ), mov_read_alac }, /* alac specific atom */
+{ MKTAG( 'a', 'l', 'a', 'c' ), mov_read_extradata }, /* alac specific atom */
{ MKTAG( 'a', 'v', 'c', 'C' ), mov_read_avcC },
{ MKTAG( 's', 't', 'b', 'l' ), mov_read_default },
{ MKTAG( 's', 't', 'c', 'o' ), mov_read_stco },
@@ -1443,7 +1327,7 @@ static int mov_probe(AVProbeData *p)
/* ignore invalid offset */
if ((offset + 8) > (unsigned int)p->buf_size)
return score;
- tag = LE_32(p->buf + offset + 4);
+ tag = AV_RL32(p->buf + offset + 4);
switch(tag) {
/* check for obvious tags */
case MKTAG( 'j', 'P', ' ', ' ' ): /* jpeg 2000 signature */
@@ -1461,7 +1345,7 @@ static int mov_probe(AVProbeData *p)
case MKTAG( 'f', 't', 'y', 'p' ):
case MKTAG( 's', 'k', 'i', 'p' ):
case MKTAG( 'u', 'u', 'i', 'd' ):
- offset = BE_32(p->buf+offset) + offset;
+ offset = AV_RB32(p->buf+offset) + offset;
/* if we only find those cause probedata is too small at least rate them */
score = AVPROBE_SCORE_MAX - 50;
break;
@@ -1478,16 +1362,16 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
MOVStreamContext *sc = st->priv_data;
offset_t current_offset;
int64_t current_dts = 0;
- int stts_index = 0;
- int stsc_index = 0;
- int stss_index = 0;
- int i, j, k;
+ unsigned int stts_index = 0;
+ unsigned int stsc_index = 0;
+ unsigned int stss_index = 0;
+ unsigned int i, j, k;
if (sc->sample_sizes || st->codec->codec_type == CODEC_TYPE_VIDEO || sc->dv_audio_container) {
- int keyframe, sample_size;
- int current_sample = 0;
- int stts_sample = 0;
- int distance = 0;
+ unsigned int current_sample = 0;
+ unsigned int stts_sample = 0;
+ unsigned int keyframe, sample_size;
+ unsigned int distance = 0;
st->nb_frames = sc->sample_count;
for (i = 0; i < sc->chunk_count; i++) {
@@ -1495,6 +1379,10 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
stsc_index++;
for (j = 0; j < sc->sample_to_chunk[stsc_index].count; j++) {
+ if (current_sample >= sc->sample_count) {
+ av_log(mov->fc, AV_LOG_ERROR, "wrong sample count\n");
+ goto out;
+ }
keyframe = !sc->keyframe_count || current_sample + 1 == sc->keyframes[stss_index];
if (keyframe) {
distance = 0;
@@ -1502,7 +1390,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
stss_index++;
}
sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample];
- dprintf("AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", size %d, distance %d, keyframe %d\n",
+ dprintf(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", size %d, distance %d, keyframe %d\n",
st->index, current_sample, current_offset, current_dts, sample_size, distance, keyframe);
av_add_index_entry(st, current_offset, current_dts, sample_size, distance, keyframe ? AVINDEX_KEYFRAME : 0);
current_offset += sample_size;
@@ -1510,8 +1398,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
current_dts += sc->stts_data[stts_index].duration / sc->time_rate;
distance++;
stts_sample++;
- if (current_sample + 1 < sc->sample_count)
- current_sample++;
+ current_sample++;
if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) {
stts_sample = 0;
stts_index++;
@@ -1519,7 +1406,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
}
}
} else { /* read whole chunk */
- int chunk_samples, chunk_size, chunk_duration;
+ unsigned int chunk_samples, chunk_size, chunk_duration;
for (i = 0; i < sc->chunk_count; i++) {
current_offset = sc->chunk_offsets[i];
@@ -1529,8 +1416,8 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
/* get chunk size */
if (sc->sample_size > 1 || st->codec->codec_id == CODEC_ID_PCM_U8 || st->codec->codec_id == CODEC_ID_PCM_S8)
chunk_size = chunk_samples * sc->sample_size;
- else if (sc->sample_size_v1.den > 0 && (chunk_samples * sc->sample_size_v1.num % sc->sample_size_v1.den == 0))
- chunk_size = chunk_samples * sc->sample_size_v1.num / sc->sample_size_v1.den;
+ else if (sc->samples_per_frame > 0 && (chunk_samples * sc->bytes_per_frame % sc->samples_per_frame == 0))
+ chunk_size = chunk_samples * sc->bytes_per_frame / sc->samples_per_frame;
else { /* workaround to find nearest next chunk offset */
chunk_size = INT_MAX;
for (j = 0; j < mov->total_streams; j++) {
@@ -1547,7 +1434,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
/* check for last chunk */
if (chunk_size == INT_MAX)
for (j = 0; j < mov->mdat_count; j++) {
- dprintf("mdat %d, offset %"PRIx64", size %"PRId64", current offset %"PRIx64"\n",
+ dprintf(mov->fc, "mdat %d, offset %"PRIx64", size %"PRId64", current offset %"PRIx64"\n",
j, mov->mdat_list[j].offset, mov->mdat_list[j].size, current_offset);
if (mov->mdat_list[j].offset <= current_offset && mov->mdat_list[j].offset + mov->mdat_list[j].size > current_offset)
chunk_size = mov->mdat_list[j].offset + mov->mdat_list[j].size - current_offset;
@@ -1573,12 +1460,13 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
}
}
}
- dprintf("AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", size %d, duration %d\n",
+ dprintf(mov->fc, "AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", size %d, duration %d\n",
st->index, i, current_offset, current_dts, chunk_size, chunk_duration);
assert(chunk_duration % sc->time_rate == 0);
current_dts += chunk_duration / sc->time_rate;
}
}
+ out:
/* adjust sample count to avindex entries */
sc->sample_count = st->nb_index_entries;
}
@@ -1605,7 +1493,7 @@ static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap)
err, mov->found_moov, mov->found_mdat, url_ftell(pb));
return -1;
}
- dprintf("on_parse_exit_offset=%d\n", (int) url_ftell(pb));
+ dprintf(mov->fc, "on_parse_exit_offset=%d\n", (int) url_ftell(pb));
/* some cleanup : make sure we are on the mdat atom */
if(!url_is_streamed(pb) && (url_ftell(pb) != mov->mdat_offset))
@@ -1615,19 +1503,29 @@ static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap)
for(i=0; i<mov->total_streams; i++) {
MOVStreamContext *sc = mov->streams[i];
-
+ AVStream *st = s->streams[i];
+ /* sanity checks */
+ if(!sc->stts_count || !sc->chunk_count || !sc->sample_to_chunk_sz ||
+ (!sc->sample_size && !sc->sample_count)){
+ av_log(s, AV_LOG_ERROR, "missing mandatory atoms, broken header\n");
+ sc->sample_count = 0; //ignore track
+ continue;
+ }
if(!sc->time_rate)
sc->time_rate=1;
if(!sc->time_scale)
sc->time_scale= mov->time_scale;
- av_set_pts_info(s->streams[i], 64, sc->time_rate, sc->time_scale);
+ av_set_pts_info(st, 64, sc->time_rate, sc->time_scale);
+
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO && sc->stts_count == 1)
+ st->codec->frame_size = sc->stts_data[0].duration;
- if(s->streams[i]->duration != AV_NOPTS_VALUE){
- assert(s->streams[i]->duration % sc->time_rate == 0);
- s->streams[i]->duration /= sc->time_rate;
+ if(st->duration != AV_NOPTS_VALUE){
+ assert(st->duration % sc->time_rate == 0);
+ st->duration /= sc->time_rate;
}
sc->ffindex = i;
- mov_build_index(mov, s->streams[i]);
+ mov_build_index(mov, st);
}
for(i=0; i<mov->total_streams; i++) {
@@ -1657,7 +1555,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
AVIndexEntry *current_sample = &s->streams[i]->index_entries[msc->current_sample];
int64_t dts = av_rescale(current_sample->timestamp * (int64_t)msc->time_rate, AV_TIME_BASE, msc->time_scale);
- dprintf("stream %d, sample %ld, dts %"PRId64"\n", i, msc->current_sample, dts);
+ dprintf(s, "stream %d, sample %ld, dts %"PRId64"\n", i, msc->current_sample, dts);
if (dts < best_dts) {
sample = current_sample;
best_dts = dts;
@@ -1676,7 +1574,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
#ifdef CONFIG_DV_DEMUXER
if (sc->dv_audio_container) {
dv_get_packet(mov->dv_demux, pkt);
- dprintf("dv audio pkt size %d\n", pkt->size);
+ dprintf(s, "dv audio pkt size %d\n", pkt->size);
} else {
#endif
url_fseek(&s->pb, sample->pos, SEEK_SET);
@@ -1705,7 +1603,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
}
pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0;
pkt->pos = sample->pos;
- dprintf("stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration);
+ dprintf(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration);
return 0;
}
@@ -1716,11 +1614,11 @@ static int mov_seek_stream(AVStream *st, int64_t timestamp, int flags)
int i;
sample = av_index_search_timestamp(st, timestamp, flags);
- dprintf("stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
+ dprintf(st->codec, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
if (sample < 0) /* not sure what to do */
return -1;
sc->current_sample = sample;
- dprintf("stream %d, found sample %ld\n", st->index, sc->current_sample);
+ dprintf(st->codec, "stream %d, found sample %ld\n", st->index, sc->current_sample);
/* adjust ctts index */
if (sc->ctts_data) {
time_sample = 0;
diff --git a/contrib/ffmpeg/libavformat/movenc.c b/contrib/ffmpeg/libavformat/movenc.c
index 736d1594a..e93416914 100644
--- a/contrib/ffmpeg/libavformat/movenc.c
+++ b/contrib/ffmpeg/libavformat/movenc.c
@@ -54,7 +54,6 @@ typedef struct MOVIndex {
long time;
int64_t trackDuration;
long sampleCount;
- long sampleDuration;
long sampleSize;
int hasKeyframes;
int hasBframes;
@@ -261,7 +260,7 @@ static int mov_write_esds_tag(ByteIOContext *pb, MOVTrack* track) // Basic
putDescr(pb, 0x04, 13 + decoderSpecificInfoLen);
// Object type indication
- put_byte(pb, codec_get_tag(ff_mov_obj_type, track->enc->codec_id));
+ put_byte(pb, codec_get_tag(ff_mp4_obj_type, track->enc->codec_id));
// the following fields is made of 6 bits to identify the streamtype (4 for video, 5 for audio)
// plus 1 bit to indicate upstream and 1 bit set to 1 (reserved)
@@ -323,25 +322,6 @@ static int mov_write_wave_tag(ByteIOContext *pb, MOVTrack* track)
return updateSize (pb, pos);
}
-static const CodecTag codec_movaudio_tags[] = {
- { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') },
- { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') },
- { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') },
- { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') },
- { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') },
- { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') },
- { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') },
- { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') },
- { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') },
- { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') },
- { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
- { CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
- { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
- { CODEC_ID_PCM_S32LE, MKTAG('i', 'n', '3', '2') },
- { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') },
- { CODEC_ID_NONE, 0 },
-};
-
static int mov_write_audio_tag(ByteIOContext *pb, MOVTrack* track)
{
offset_t pos = url_ftell(pb);
@@ -361,11 +341,20 @@ static int mov_write_audio_tag(ByteIOContext *pb, MOVTrack* track)
put_be16(pb, 0); /* Revision level */
put_be32(pb, 0); /* Reserved */
- put_be16(pb, track->mode == MODE_MOV ? track->enc->channels : 2); /* Number of channels */
- /* FIXME 8 bit for 'raw ' in mov */
- put_be16(pb, 16); /* Reserved */
+ if (track->mode == MODE_MOV) {
+ put_be16(pb, track->enc->channels);
+ if (track->enc->codec_id == CODEC_ID_PCM_U8 ||
+ track->enc->codec_id == CODEC_ID_PCM_S8)
+ put_be16(pb, 8); /* bits per sample */
+ else
+ put_be16(pb, 16);
+ put_be16(pb, track->audio_vbr ? -2 : 0); /* compression ID */
+ } else { /* reserved for mp4/3gp */
+ put_be16(pb, 2);
+ put_be16(pb, 16);
+ put_be16(pb, 0);
+ }
- put_be16(pb, track->mode == MODE_MOV && track->audio_vbr ? -2 : 0); /* compression ID */
put_be16(pb, 0); /* packet size (= 0) */
put_be16(pb, track->timescale); /* Time scale */
put_be16(pb, 0); /* Reserved */
@@ -481,7 +470,7 @@ static int mov_write_avcc_tag(ByteIOContext *pb, MOVTrack *track)
put_tag(pb, "avcC");
if (track->vosLen > 6) {
/* check for h264 start code */
- if (BE_32(track->vosData) == 0x00000001) {
+ if (AV_RB32(track->vosData) == 0x00000001) {
uint8_t *buf, *end;
uint32_t sps_size=0, pps_size=0;
uint8_t *sps=0, *pps=0;
@@ -494,7 +483,7 @@ static int mov_write_avcc_tag(ByteIOContext *pb, MOVTrack *track)
while (buf < end) {
unsigned int size;
uint8_t nal_type;
- size = BE_32(buf);
+ size = AV_RB32(buf);
nal_type = buf[4] & 0x1f;
if (nal_type == 7) { /* SPS */
sps = buf + 4;
@@ -527,22 +516,6 @@ static int mov_write_avcc_tag(ByteIOContext *pb, MOVTrack *track)
return updateSize(pb, pos);
}
-static const CodecTag codec_movvideo_tags[] = {
- { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') },
- { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') },
- { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
- { CODEC_ID_H263, MKTAG('h', '2', '6', '3') },
- { CODEC_ID_H263, MKTAG('s', '2', '6', '3') },
- { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
- /* special handling in mov_find_video_codec_tag */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC */
- { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL */
- { CODEC_ID_NONE, 0 },
-};
-
static int mov_find_video_codec_tag(AVFormatContext *s, MOVTrack *track)
{
int tag = track->enc->codec_tag;
@@ -1501,28 +1474,20 @@ static int mov_write_header(AVFormatContext *s)
if(st->codec->codec_type == CODEC_TYPE_VIDEO){
track->tag = mov_find_video_codec_tag(s, track);
track->timescale = st->codec->time_base.den;
- track->sampleDuration = st->codec->time_base.num;
av_set_pts_info(st, 64, 1, st->codec->time_base.den);
}else if(st->codec->codec_type == CODEC_TYPE_AUDIO){
track->tag = mov_find_audio_codec_tag(s, track);
track->timescale = st->codec->sample_rate;
- track->sampleDuration = st->codec->frame_size;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
- switch(track->enc->codec_id){
- case CODEC_ID_MP3:
- case CODEC_ID_AAC:
- case CODEC_ID_AMR_NB:
- case CODEC_ID_AMR_WB:
+ if(!st->codec->frame_size){
+ av_log(s, AV_LOG_ERROR, "track %d: codec frame size is not set\n", i);
+ return -1;
+ }else if(st->codec->frame_size > 1){ /* assume compressed audio */
track->audio_vbr = 1;
- break;
- default:
+ }else{
track->sampleSize = (av_get_bits_per_sample(st->codec->codec_id) >> 3) * st->codec->channels;
}
}
- if (!track->sampleDuration) {
- av_log(s, AV_LOG_ERROR, "track %d: sample duration is not set\n", i);
- return -1;
- }
}
mov_write_mdat_tag(pb, mov);
diff --git a/contrib/ffmpeg/libavformat/mp3.c b/contrib/ffmpeg/libavformat/mp3.c
index 723980c83..e86ea14c8 100644
--- a/contrib/ffmpeg/libavformat/mp3.c
+++ b/contrib/ffmpeg/libavformat/mp3.c
@@ -393,6 +393,7 @@ AVInputFormat mp3_demuxer = {
mp3_read_header,
mp3_read_packet,
mp3_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "mp2,mp3,m2a", /* XXX: use probe */
};
#endif
@@ -401,7 +402,7 @@ AVOutputFormat mp2_muxer = {
"mp2",
"MPEG audio layer 2",
"audio/x-mpeg",
-#ifdef CONFIG_MP3LAME
+#ifdef CONFIG_LIBMP3LAME
"mp2,m2a",
#else
"mp2,mp3,m2a",
diff --git a/contrib/ffmpeg/libavformat/mpc.c b/contrib/ffmpeg/libavformat/mpc.c
new file mode 100644
index 000000000..a28efb16d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpc.c
@@ -0,0 +1,231 @@
+/*
+ * Musepack demuxer
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+
+#define MPC_FRAMESIZE 1152
+#define DELAY_FRAMES 32
+
+static const int mpc_rate[4] = { 44100, 48000, 37800, 32000 };
+typedef struct {
+ int64_t pos;
+ int size, skip;
+}MPCFrame;
+
+typedef struct {
+ int ver;
+ uint32_t curframe, lastframe;
+ uint32_t fcount;
+ MPCFrame *frames;
+ int curbits;
+ int frames_noted;
+} MPCContext;
+
+static int mpc_probe(AVProbeData *p)
+{
+ const uint8_t *d = p->buf;
+ if (p->buf_size < 32)
+ return 0;
+ if (d[0] == 'M' && d[1] == 'P' && d[2] == '+' && (d[3] == 0x17 || d[3] == 0x7))
+ return AVPROBE_SCORE_MAX;
+ if (d[0] == 'I' && d[1] == 'D' && d[2] == '3')
+ return AVPROBE_SCORE_MAX / 2;
+ return 0;
+}
+
+static int mpc_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MPCContext *c = s->priv_data;
+ AVStream *st;
+ int t;
+
+ t = get_le24(&s->pb);
+ if(t != MKTAG('M', 'P', '+', 0)){
+ if(t != MKTAG('I', 'D', '3', 0)){
+ av_log(s, AV_LOG_ERROR, "Not a Musepack file\n");
+ return -1;
+ }
+ /* skip ID3 tags and try again */
+ url_fskip(&s->pb, 3);
+ t = get_byte(&s->pb) << 21;
+ t |= get_byte(&s->pb) << 14;
+ t |= get_byte(&s->pb) << 7;
+ t |= get_byte(&s->pb);
+ av_log(s, AV_LOG_DEBUG, "Skipping %d(%X) bytes of ID3 data\n", t, t);
+ url_fskip(&s->pb, t);
+ if(get_le24(&s->pb) != MKTAG('M', 'P', '+', 0)){
+ av_log(s, AV_LOG_ERROR, "Not a Musepack file\n");
+ return -1;
+ }
+ }
+ c->ver = get_byte(&s->pb);
+ if(c->ver != 0x07 && c->ver != 0x17){
+ av_log(s, AV_LOG_ERROR, "Can demux Musepack SV7, got version %02X\n", c->ver);
+ return -1;
+ }
+ c->fcount = get_le32(&s->pb);
+ if((int64_t)c->fcount * sizeof(MPCFrame) >= UINT_MAX){
+ av_log(s, AV_LOG_ERROR, "Too many frames, seeking is not possible\n");
+ return -1;
+ }
+ c->frames = av_malloc(c->fcount * sizeof(MPCFrame));
+ c->curframe = 0;
+ c->lastframe = -1;
+ c->curbits = 8;
+ c->frames_noted = 0;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_MUSEPACK7;
+ st->codec->channels = 2;
+ st->codec->bits_per_sample = 16;
+
+ st->codec->extradata_size = 16;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(&s->pb, st->codec->extradata, 16);
+ st->codec->sample_rate = mpc_rate[st->codec->extradata[2] & 3];
+ av_set_pts_info(st, 32, MPC_FRAMESIZE, st->codec->sample_rate);
+ /* scan for seekpoints */
+ s->start_time = 0;
+ s->duration = (int64_t)c->fcount * MPC_FRAMESIZE * AV_TIME_BASE / st->codec->sample_rate;
+
+ return 0;
+}
+
+static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MPCContext *c = s->priv_data;
+ int ret, size, size2, curbits, cur = c->curframe;
+ int64_t tmp, pos;
+
+ if (c->curframe >= c->fcount)
+ return -1;
+
+ if(c->curframe != c->lastframe + 1){
+ url_fseek(&s->pb, c->frames[c->curframe].pos, SEEK_SET);
+ c->curbits = c->frames[c->curframe].skip;
+ }
+ c->lastframe = c->curframe;
+ c->curframe++;
+ curbits = c->curbits;
+ pos = url_ftell(&s->pb);
+ tmp = get_le32(&s->pb);
+ if(curbits <= 12){
+ size2 = (tmp >> (12 - curbits)) & 0xFFFFF;
+ }else{
+ tmp = (tmp << 32) | get_le32(&s->pb);
+ size2 = (tmp >> (44 - curbits)) & 0xFFFFF;
+ }
+ curbits += 20;
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+ size = ((size2 + curbits + 31) & ~31) >> 3;
+ if(cur == c->frames_noted){
+ c->frames[cur].pos = pos;
+ c->frames[cur].size = size;
+ c->frames[cur].skip = curbits - 20;
+ av_add_index_entry(s->streams[0], cur, cur, size, 0, AVINDEX_KEYFRAME);
+ c->frames_noted++;
+ }
+ c->curbits = (curbits + size2) & 0x1F;
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->data[0] = curbits;
+ pkt->data[1] = (c->curframe > c->fcount);
+
+ pkt->stream_index = 0;
+ pkt->pts = cur;
+ ret = get_buffer(&s->pb, pkt->data + 4, size);
+ if(c->curbits)
+ url_fseek(&s->pb, -4, SEEK_CUR);
+ if(ret < size){
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret + 4;
+
+ return 0;
+}
+
+static int mpc_read_close(AVFormatContext *s)
+{
+ MPCContext *c = s->priv_data;
+
+ av_freep(&c->frames);
+ return 0;
+}
+
+/**
+ * Seek to the given position
+ * If position is unknown but is within the limits of file
+ * then packets are skipped unless desired position is reached
+ *
+ * Also this function makes use of the fact that timestamp == frameno
+ */
+static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ MPCContext *c = s->priv_data;
+ AVPacket pkt1, *pkt = &pkt1;
+ int ret;
+ int index = av_index_search_timestamp(st, timestamp - DELAY_FRAMES, flags);
+ uint32_t lastframe;
+
+ /* if found, seek there */
+ if (index >= 0){
+ c->curframe = st->index_entries[index].pos;
+ return 0;
+ }
+ /* if timestamp is out of bounds, return error */
+ if(timestamp < 0 || timestamp >= c->fcount)
+ return -1;
+ timestamp -= DELAY_FRAMES;
+ /* seek to the furthest known position and read packets until
+ we reach desired position */
+ lastframe = c->curframe;
+ if(c->frames_noted) c->curframe = c->frames_noted - 1;
+ while(c->curframe < timestamp){
+ ret = av_read_frame(s, pkt);
+ if (ret < 0){
+ c->curframe = lastframe;
+ return -1;
+ }
+ av_free_packet(pkt);
+ }
+ return 0;
+}
+
+
+AVInputFormat mpc_demuxer = {
+ "mpc",
+ "musepack",
+ sizeof(MPCContext),
+ mpc_probe,
+ mpc_read_header,
+ mpc_read_packet,
+ mpc_read_close,
+ mpc_read_seek,
+ .extensions = "mpc",
+};
diff --git a/contrib/ffmpeg/libavformat/mpeg.c b/contrib/ffmpeg/libavformat/mpeg.c
index 709ce16f1..ae47fa60a 100644
--- a/contrib/ffmpeg/libavformat/mpeg.c
+++ b/contrib/ffmpeg/libavformat/mpeg.c
@@ -513,7 +513,7 @@ static int mpeg_mux_init(AVFormatContext *ctx)
for(i=0;i<ctx->nb_streams;i++) {
av_free(ctx->streams[i]->priv_data);
}
- return -ENOMEM;
+ return AVERROR(ENOMEM);
}
static inline void put_timestamp(ByteIOContext *pb, int id, int64_t timestamp)
@@ -1445,14 +1445,18 @@ static int mpegps_read_pes_header(AVFormatContext *s,
{
MpegDemuxContext *m = s->priv_data;
int len, size, startcode, c, flags, header_len;
- int64_t pts, dts, last_pos;
+ int pes_ext, ext2_len, id_ext, skip;
+ int64_t pts, dts;
+ int64_t last_sync= url_ftell(&s->pb);
- last_pos = -1;
+ error_redo:
+ url_fseek(&s->pb, last_sync, SEEK_SET);
redo:
/* next start code (should be immediately after) */
m->header_state = 0xff;
size = MAX_SYNC_SIZE;
startcode = find_next_start_code(&s->pb, &size, &m->header_state);
+ last_sync = url_ftell(&s->pb);
//printf("startcode=%x pos=0x%"PRIx64"\n", startcode, url_ftell(&s->pb));
if (startcode < 0)
return AVERROR_IO;
@@ -1475,18 +1479,18 @@ static int mpegps_read_pes_header(AVFormatContext *s,
/* find matching stream */
if (!((startcode >= 0x1c0 && startcode <= 0x1df) ||
(startcode >= 0x1e0 && startcode <= 0x1ef) ||
- (startcode == 0x1bd)))
+ (startcode == 0x1bd) || (startcode == 0x1fd)))
goto redo;
if (ppos) {
*ppos = url_ftell(&s->pb) - 4;
}
len = get_be16(&s->pb);
- pts = AV_NOPTS_VALUE;
+ pts =
dts = AV_NOPTS_VALUE;
/* stuffing */
for(;;) {
if (len < 1)
- goto redo;
+ goto error_redo;
c = get_byte(&s->pb);
len--;
/* XXX: for mpeg1, should test only bit 7 */
@@ -1495,23 +1499,17 @@ static int mpegps_read_pes_header(AVFormatContext *s,
}
if ((c & 0xc0) == 0x40) {
/* buffer scale & size */
- if (len < 2)
- goto redo;
get_byte(&s->pb);
c = get_byte(&s->pb);
len -= 2;
}
- if ((c & 0xf0) == 0x20) {
- if (len < 4)
- goto redo;
+ if ((c & 0xe0) == 0x20) {
dts = pts = get_pts(&s->pb, c);
len -= 4;
- } else if ((c & 0xf0) == 0x30) {
- if (len < 9)
- goto redo;
- pts = get_pts(&s->pb, c);
- dts = get_pts(&s->pb, -1);
- len -= 9;
+ if (c & 0x10){
+ dts = get_pts(&s->pb, -1);
+ len -= 5;
+ }
} else if ((c & 0xc0) == 0x80) {
/* mpeg 2 PES */
#if 0 /* some streams have this field set for no apparent reason */
@@ -1524,45 +1522,64 @@ static int mpegps_read_pes_header(AVFormatContext *s,
header_len = get_byte(&s->pb);
len -= 2;
if (header_len > len)
- goto redo;
- if ((flags & 0xc0) == 0x80) {
+ goto error_redo;
+ len -= header_len;
+ if (flags & 0x80) {
dts = pts = get_pts(&s->pb, -1);
- if (header_len < 5)
- goto redo;
header_len -= 5;
- len -= 5;
- } if ((flags & 0xc0) == 0xc0) {
- pts = get_pts(&s->pb, -1);
- dts = get_pts(&s->pb, -1);
- if (header_len < 10)
- goto redo;
- header_len -= 10;
- len -= 10;
+ if (flags & 0x40) {
+ dts = get_pts(&s->pb, -1);
+ header_len -= 5;
+ }
}
- len -= header_len;
- while (header_len > 0) {
- get_byte(&s->pb);
+ if (flags & 0x01) { /* PES extension */
+ pes_ext = get_byte(&s->pb);
header_len--;
+ if (pes_ext & 0x40) { /* pack header - should be zero in PS */
+ goto error_redo;
+ }
+ /* Skip PES private data, program packet sequence counter and P-STD buffer */
+ skip = (pes_ext >> 4) & 0xb;
+ skip += skip & 0x9;
+ url_fskip(&s->pb, skip);
+ header_len -= skip;
+
+ if (pes_ext & 0x01) { /* PES extension 2 */
+ ext2_len = get_byte(&s->pb);
+ header_len--;
+ if ((ext2_len & 0x7f) > 0) {
+ id_ext = get_byte(&s->pb);
+ if ((id_ext & 0x80) == 0)
+ startcode = ((startcode & 0xff) << 8) | id_ext;
+ header_len--;
+ }
+ }
}
+ if(header_len < 0)
+ goto error_redo;
+ url_fskip(&s->pb, header_len);
}
else if( c!= 0xf )
goto redo;
if (startcode == PRIVATE_STREAM_1 && !m->psm_es_type[startcode & 0xff]) {
- if (len < 1)
- goto redo;
startcode = get_byte(&s->pb);
len--;
- if (startcode >= 0x80 && startcode <= 0xbf) {
+ if (startcode >= 0x80 && startcode <= 0xcf) {
/* audio: skip header */
- if (len < 3)
- goto redo;
get_byte(&s->pb);
get_byte(&s->pb);
get_byte(&s->pb);
len -= 3;
+ if (startcode >= 0xb0 && startcode <= 0xbf) {
+ /* MLP/TrueHD audio has a 4-byte header */
+ get_byte(&s->pb);
+ len--;
+ }
}
}
+ if(len<0)
+ goto error_redo;
if(dts != AV_NOPTS_VALUE && ppos){
int i;
for(i=0; i<s->nb_streams; i++){
@@ -1641,15 +1658,27 @@ static int mpegps_read_packet(AVFormatContext *s,
} else if (startcode >= 0x80 && startcode <= 0x87) {
type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_AC3;
- } else if (startcode >= 0x88 && startcode <= 0x9f) {
+ } else if ((startcode >= 0x88 && startcode <= 0x8f)
+ ||( startcode >= 0x98 && startcode <= 0x9f)) {
+ /* 0x90 - 0x97 is reserved for SDDS in DVD specs */
type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_DTS;
- } else if (startcode >= 0xa0 && startcode <= 0xbf) {
+ } else if (startcode >= 0xa0 && startcode <= 0xaf) {
type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_PCM_S16BE;
+ } else if (startcode >= 0xb0 && startcode <= 0xbf) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MLP;
+ } else if (startcode >= 0xc0 && startcode <= 0xcf) {
+ /* Used for both AC-3 and E-AC-3 in EVOB files */
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
} else if (startcode >= 0x20 && startcode <= 0x3f) {
type = CODEC_TYPE_SUBTITLE;
codec_id = CODEC_ID_DVD_SUBTITLE;
+ } else if (startcode >= 0xfd55 && startcode <= 0xfd5f) {
+ type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_VC1;
} else {
skip:
/* skip packet */
@@ -1667,7 +1696,7 @@ static int mpegps_read_packet(AVFormatContext *s,
found:
if(st->discard >= AVDISCARD_ALL)
goto skip;
- if (startcode >= 0xa0 && startcode <= 0xbf) {
+ if (startcode >= 0xa0 && startcode <= 0xaf) {
int b1, freq;
/* for LPCM, we just skip the header and consider it is raw
diff --git a/contrib/ffmpeg/libavformat/mpegts.c b/contrib/ffmpeg/libavformat/mpegts.c
index dd5f0adca..c466710ee 100644
--- a/contrib/ffmpeg/libavformat/mpegts.c
+++ b/contrib/ffmpeg/libavformat/mpegts.c
@@ -75,47 +75,62 @@ typedef struct MpegTSFilter {
typedef struct MpegTSService {
int running:1;
- int sid;
- char *provider_name;
- char *name;
+ int sid; /**< MPEG Program Number of stream */
+ char *provider_name; /**< DVB Network name, "" if not DVB stream */
+ char *name; /**< DVB Service name, "MPEG Program [sid]" if not DVB stream*/
} MpegTSService;
struct MpegTSContext {
/* user data */
AVFormatContext *stream;
- int raw_packet_size; /* raw packet size, including FEC if present */
- int auto_guess; /* if true, all pids are analized to find streams */
+ /** raw packet size, including FEC if present */
+ int raw_packet_size;
+ /** if true, all pids are analyzed to find streams */
+ int auto_guess;
int set_service_ret;
- int mpeg2ts_raw; /* force raw MPEG2 transport stream output, if possible */
- int mpeg2ts_compute_pcr; /* compute exact PCR for each transport stream packet */
+ /** force raw MPEG2 transport stream output, if possible */
+ int mpeg2ts_raw;
+ /** compute exact PCR for each transport stream packet */
+ int mpeg2ts_compute_pcr;
- /* used to estimate the exact PCR */
- int64_t cur_pcr;
- int pcr_incr;
- int pcr_pid;
+ int64_t cur_pcr; /**< used to estimate the exact PCR */
+ int pcr_incr; /**< used to estimate the exact PCR */
+ int pcr_pid; /**< used to estimate the exact PCR */
/* data needed to handle file based ts */
- int stop_parse; /* stop parsing loop */
- AVPacket *pkt; /* packet containing av data */
+ /** stop parsing loop */
+ int stop_parse;
+ /** packet containing Audio/Video data */
+ AVPacket *pkt;
/******************************************/
/* private mpegts data */
/* scan context */
MpegTSFilter *sdt_filter;
+ /** number of PMTs in the last PAT seen */
int nb_services;
+ /** list of PMTs in the last PAT seen */
MpegTSService **services;
/* set service context (XXX: allocated it ?) */
SetServiceCallback *set_service_cb;
void *set_service_opaque;
+ /** filter for the PAT */
MpegTSFilter *pat_filter;
+ /** filter for the PMT for the MPEG program number specified by req_sid */
MpegTSFilter *pmt_filter;
+ /** MPEG program number of stream we want to decode */
int req_sid;
+ /** filters for various streams specified by PMT + for the PAT and PMT */
MpegTSFilter *pids[NB_PID_MAX];
};
+/**
+ * Assembles PES packets out of TS packets, and then calls the "section_cb"
+ * function when they are complete.
+ */
static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
const uint8_t *buf, int buf_size, int is_start)
{
@@ -162,7 +177,7 @@ static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int
MpegTSSectionFilter *sec;
#ifdef DEBUG_SI
- printf("Filter: pid=0x%x\n", pid);
+ av_log(ts->stream, AV_LOG_DEBUG, "Filter: pid=0x%x\n", pid);
#endif
if (pid >= NB_PID_MAX || ts->pids[pid])
return NULL;
@@ -357,7 +372,8 @@ static MpegTSService *new_service(MpegTSContext *ts, int sid,
MpegTSService *service;
#ifdef DEBUG_SI
- printf("new_service: sid=0x%04x provider='%s' name='%s'\n",
+ av_log(ts->stream, AV_LOG_DEBUG, "new_service: "
+ "sid=0x%04x provider='%s' name='%s'\n",
sid, provider_name, name);
#endif
@@ -384,15 +400,16 @@ static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
char language[4];
#ifdef DEBUG_SI
- printf("PMT:\n");
- av_hex_dump(stdout, (uint8_t *)section, section_len);
+ av_log(ts->stream, AV_LOG_DEBUG, "PMT: len %i\n", section_len);
+ av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
p = section;
if (parse_section_header(h, &p, p_end) < 0)
return;
#ifdef DEBUG_SI
- printf("sid=0x%x sec_num=%d/%d\n", h->id, h->sec_num, h->last_sec_num);
+ av_log(ts->stream, AV_LOG_DEBUG, "sid=0x%x sec_num=%d/%d\n",
+ h->id, h->sec_num, h->last_sec_num);
#endif
if (h->tid != PMT_TID || (ts->req_sid >= 0 && h->id != ts->req_sid) )
return;
@@ -402,7 +419,7 @@ static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
return;
ts->pcr_pid = pcr_pid;
#ifdef DEBUG_SI
- printf("pcr_pid=0x%x\n", pcr_pid);
+ av_log(ts->stream, AV_LOG_DEBUG, "pcr_pid=0x%x\n", pcr_pid);
#endif
program_info_length = get16(&p, p_end) & 0xfff;
if (program_info_length < 0)
@@ -443,7 +460,8 @@ static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
if (desc_end > desc_list_end)
break;
#ifdef DEBUG_SI
- printf("tag: 0x%02x len=%d\n", desc_tag, desc_len);
+ av_log(ts->stream, AV_LOG_DEBUG, "tag: 0x%02x len=%d\n",
+ desc_tag, desc_len);
#endif
switch(desc_tag) {
case DVB_SUBT_DESCID:
@@ -473,7 +491,8 @@ static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
p = desc_list_end;
#ifdef DEBUG_SI
- printf("stream_type=%d pid=0x%x\n", stream_type, pid);
+ av_log(ts->stream, AV_LOG_DEBUG, "stream_type=%d pid=0x%x\n",
+ stream_type, pid);
#endif
/* now create ffmpeg stream */
@@ -484,6 +503,7 @@ static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
case STREAM_TYPE_VIDEO_MPEG2:
case STREAM_TYPE_VIDEO_MPEG4:
case STREAM_TYPE_VIDEO_H264:
+ case STREAM_TYPE_VIDEO_VC1:
case STREAM_TYPE_AUDIO_AAC:
case STREAM_TYPE_AUDIO_AC3:
case STREAM_TYPE_AUDIO_DTS:
@@ -524,8 +544,8 @@ static void pat_cb(void *opaque, const uint8_t *section, int section_len)
int sid, pmt_pid;
#ifdef DEBUG_SI
- printf("PAT:\n");
- av_hex_dump(stdout, (uint8_t *)section, section_len);
+ av_log(ts->stream, AV_LOG_DEBUG, "PAT:\n");
+ av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
p = section;
@@ -542,7 +562,7 @@ static void pat_cb(void *opaque, const uint8_t *section, int section_len)
if (pmt_pid < 0)
break;
#ifdef DEBUG_SI
- printf("sid=0x%x pid=0x%x\n", sid, pmt_pid);
+ av_log(ts->stream, AV_LOG_DEBUG, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
#endif
if (sid == 0x0000) {
/* NIT info */
@@ -573,8 +593,8 @@ static void pat_scan_cb(void *opaque, const uint8_t *section, int section_len)
char buf[256];
#ifdef DEBUG_SI
- printf("PAT:\n");
- av_hex_dump(stdout, (uint8_t *)section, section_len);
+ av_log(ts->stream, AV_LOG_DEBUG, "PAT:\n");
+ av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
p = section;
@@ -591,7 +611,7 @@ static void pat_scan_cb(void *opaque, const uint8_t *section, int section_len)
if (pmt_pid < 0)
break;
#ifdef DEBUG_SI
- printf("sid=0x%x pid=0x%x\n", sid, pmt_pid);
+ av_log(ts->stream, AV_LOG_DEBUG, "sid=0x%x pid=0x%x\n", sid, pmt_pid);
#endif
if (sid == 0x0000) {
/* NIT info */
@@ -634,8 +654,8 @@ static void sdt_cb(void *opaque, const uint8_t *section, int section_len)
char *name, *provider_name;
#ifdef DEBUG_SI
- printf("SDT:\n");
- av_hex_dump(stdout, (uint8_t *)section, section_len);
+ av_log(ts->stream, AV_LOG_DEBUG, "SDT:\n");
+ av_hex_dump_log(ts->stream, AV_LOG_DEBUG, (uint8_t *)section, section_len);
#endif
p_end = section + section_len - 4;
@@ -672,7 +692,8 @@ static void sdt_cb(void *opaque, const uint8_t *section, int section_len)
if (desc_end > desc_list_end)
break;
#ifdef DEBUG_SI
- printf("tag: 0x%02x len=%d\n", desc_tag, desc_len);
+ av_log(ts->stream, AV_LOG_DEBUG, "tag: 0x%02x len=%d\n",
+ desc_tag, desc_len);
#endif
switch(desc_tag) {
case 0x48:
@@ -785,7 +806,7 @@ static void mpegts_push_data(void *opaque,
/* we got all the PES or section header. We can now
decide */
#if 0
- av_hex_dump(pes->header, pes->data_index);
+ av_hex_dump_log(pes->stream, AV_LOG_DEBUG, pes->header, pes->data_index);
#endif
if (pes->header[0] == 0x00 && pes->header[1] == 0x00 &&
pes->header[2] == 0x01) {
@@ -793,7 +814,7 @@ static void mpegts_push_data(void *opaque,
code = pes->header[3] | 0x100;
if (!((code >= 0x1c0 && code <= 0x1df) ||
(code >= 0x1e0 && code <= 0x1ef) ||
- (code == 0x1bd)))
+ (code == 0x1bd) || (code == 0x1fd)))
goto skip;
if (!pes->st) {
/* allocate stream */
@@ -901,6 +922,10 @@ static AVStream* new_pes_av_stream(PESContext *pes, uint32_t code)
codec_type = CODEC_TYPE_VIDEO;
codec_id = CODEC_ID_H264;
break;
+ case STREAM_TYPE_VIDEO_VC1:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_VC1;
+ break;
case STREAM_TYPE_AUDIO_AAC:
codec_type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_AAC;
@@ -1234,7 +1259,7 @@ goto_auto_guess:
service = ts->services[i];
sid = service->sid;
#ifdef DEBUG_SI
- printf("tuning to '%s'\n", service->name);
+ av_log(ts->stream, AV_LOG_DEBUG, "tuning to '%s'\n", service->name);
#endif
/* now find the info for the first service if we found any,
@@ -1258,7 +1283,7 @@ goto_auto_guess:
}
#ifdef DEBUG_SI
- printf("tuning done\n");
+ av_log(ts->stream, AV_LOG_DEBUG, "tuning done\n");
#endif
}
s->ctx_flags |= AVFMTCTX_NOHEADER;
@@ -1309,7 +1334,7 @@ goto_auto_guess:
st->codec->bit_rate = s->bit_rate;
st->start_time = ts->cur_pcr;
#if 0
- printf("start=%0.3f pcr=%0.3f incr=%d\n",
+ av_log(ts->stream, AV_LOG_DEBUG, "start=%0.3f pcr=%0.3f incr=%d\n",
st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr);
#endif
}
@@ -1332,7 +1357,7 @@ static int mpegts_raw_read_packet(AVFormatContext *s,
uint8_t pcr_buf[12];
if (av_new_packet(pkt, TS_PACKET_SIZE) < 0)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
pkt->pos= url_ftell(&s->pb);
ret = read_packet(&s->pb, pkt->data, ts->raw_packet_size);
if (ret < 0) {
diff --git a/contrib/ffmpeg/libavformat/mpegts.h b/contrib/ffmpeg/libavformat/mpegts.h
index b3eb3cda7..771becbd4 100644
--- a/contrib/ffmpeg/libavformat/mpegts.h
+++ b/contrib/ffmpeg/libavformat/mpegts.h
@@ -46,13 +46,13 @@
#define STREAM_TYPE_AUDIO_AAC 0x0f
#define STREAM_TYPE_VIDEO_MPEG4 0x10
#define STREAM_TYPE_VIDEO_H264 0x1b
+#define STREAM_TYPE_VIDEO_VC1 0xea
#define STREAM_TYPE_AUDIO_AC3 0x81
#define STREAM_TYPE_AUDIO_DTS 0x8a
#define STREAM_TYPE_SUBTITLE_DVB 0x100
-unsigned int mpegts_crc32(const uint8_t *data, int len);
extern AVOutputFormat mpegts_muxer;
typedef struct MpegTSContext MpegTSContext;
diff --git a/contrib/ffmpeg/libavformat/mpegtsenc.c b/contrib/ffmpeg/libavformat/mpegtsenc.c
index 39868bea4..c521b68b8 100644
--- a/contrib/ffmpeg/libavformat/mpegtsenc.c
+++ b/contrib/ffmpeg/libavformat/mpegtsenc.c
@@ -143,6 +143,7 @@ typedef struct MpegTSWriteStream {
int cc;
int payload_index;
int64_t payload_pts;
+ int64_t payload_dts;
uint8_t payload[DEFAULT_PES_PAYLOAD_SIZE];
} MpegTSWriteStream;
@@ -396,6 +397,7 @@ static int mpegts_write_header(AVFormatContext *s)
ts_st->service = service;
ts_st->pid = DEFAULT_START_PID + i;
ts_st->payload_pts = AV_NOPTS_VALUE;
+ ts_st->payload_dts = AV_NOPTS_VALUE;
/* update PCR pid by using the first video stream */
if (st->codec->codec_type == CODEC_TYPE_VIDEO &&
service->pcr_pid == 0x1fff)
@@ -460,15 +462,29 @@ static void retransmit_si_info(AVFormatContext *s)
}
}
+static void write_pts(uint8_t *q, int fourbits, int64_t pts)
+{
+ int val;
+
+ val = fourbits << 4 | (((pts >> 30) & 0x07) << 1) | 1;
+ *q++ = val;
+ val = (((pts >> 15) & 0x7fff) << 1) | 1;
+ *q++ = val >> 8;
+ *q++ = val;
+ val = (((pts) & 0x7fff) << 1) | 1;
+ *q++ = val >> 8;
+ *q++ = val;
+}
+
/* NOTE: pes_data contains all the PES packet */
static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
const uint8_t *payload, int payload_size,
- int64_t pts)
+ int64_t pts, int64_t dts)
{
MpegTSWriteStream *ts_st = st->priv_data;
uint8_t buf[TS_PACKET_SIZE];
uint8_t *q;
- int val, is_start, len, header_len, write_pcr, private_code;
+ int val, is_start, len, header_len, write_pcr, private_code, flags;
int afc_len, stuffing_len;
int64_t pcr = -1; /* avoid warning */
@@ -527,13 +543,19 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
private_code = 0x20;
}
}
- if (pts != AV_NOPTS_VALUE)
- header_len = 8;
- else
- header_len = 3;
+ header_len = 0;
+ flags = 0;
+ if (pts != AV_NOPTS_VALUE) {
+ header_len += 5;
+ flags |= 0x80;
+ }
+ if (dts != AV_NOPTS_VALUE) {
+ header_len += 5;
+ flags |= 0x40;
+ }
+ len = payload_size + header_len + 3;
if (private_code != 0)
- header_len++;
- len = payload_size + header_len;
+ len++;
*q++ = len >> 8;
*q++ = len;
val = 0x80;
@@ -541,21 +563,15 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
if (st->codec->codec_type == CODEC_TYPE_SUBTITLE)
val |= 0x04;
*q++ = val;
+ *q++ = flags;
+ *q++ = header_len;
if (pts != AV_NOPTS_VALUE) {
- *q++ = 0x80; /* PTS only */
- *q++ = 0x05; /* header len */
- val = (0x02 << 4) |
- (((pts >> 30) & 0x07) << 1) | 1;
- *q++ = val;
- val = (((pts >> 15) & 0x7fff) << 1) | 1;
- *q++ = val >> 8;
- *q++ = val;
- val = (((pts) & 0x7fff) << 1) | 1;
- *q++ = val >> 8;
- *q++ = val;
- } else {
- *q++ = 0x00;
- *q++ = 0x00;
+ write_pts(q, flags >> 6, pts);
+ q += 5;
+ }
+ if (dts != AV_NOPTS_VALUE) {
+ write_pts(q, 1, dts);
+ q += 5;
}
if (private_code != 0)
*q++ = private_code;
@@ -607,7 +623,7 @@ static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) {
/* for subtitle, a single PES packet must be generated */
- mpegts_write_pes(s, st, buf, size, pkt->pts);
+ mpegts_write_pes(s, st, buf, size, pkt->pts, AV_NOPTS_VALUE);
return 0;
}
@@ -622,10 +638,13 @@ static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
ts_st->payload_index += len;
if (ts_st->payload_pts == AV_NOPTS_VALUE)
ts_st->payload_pts = pkt->pts;
+ if (ts_st->payload_dts == AV_NOPTS_VALUE)
+ ts_st->payload_dts = pkt->dts;
if (ts_st->payload_index >= max_payload_size) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
- ts_st->payload_pts);
+ ts_st->payload_pts, ts_st->payload_dts);
ts_st->payload_pts = AV_NOPTS_VALUE;
+ ts_st->payload_dts = AV_NOPTS_VALUE;
ts_st->payload_index = 0;
}
}
@@ -646,7 +665,7 @@ static int mpegts_write_end(AVFormatContext *s)
ts_st = st->priv_data;
if (ts_st->payload_index > 0) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
- ts_st->payload_pts);
+ ts_st->payload_pts, ts_st->payload_dts);
}
}
put_flush_packet(&s->pb);
diff --git a/contrib/ffmpeg/libavformat/mxf.c b/contrib/ffmpeg/libavformat/mxf.c
index b20679943..677e023f0 100644
--- a/contrib/ffmpeg/libavformat/mxf.c
+++ b/contrib/ffmpeg/libavformat/mxf.c
@@ -46,10 +46,12 @@
//#define DEBUG
#include "avformat.h"
+#include "aes.h"
typedef uint8_t UID[16];
enum MXFMetadataSetType {
+ AnyType,
MaterialPackage,
SourcePackage,
SourceClip,
@@ -59,8 +61,16 @@ enum MXFMetadataSetType {
Descriptor,
Track,
EssenceContainerData,
+ CryptoContext,
};
+typedef struct MXFCryptoContext {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID context_uid;
+ UID source_container_ul;
+} MXFCryptoContext;
+
typedef struct MXFStructuralComponent {
UID uid;
enum MXFMetadataSetType type;
@@ -132,15 +142,11 @@ typedef struct {
typedef struct MXFContext {
UID *packages_refs;
int packages_count;
- UID *essence_container_data_sets_refs;
- int essence_container_data_sets_count;
- UID *essence_containers_uls; /* Universal Labels SMPTE RP224 */
- int essence_containers_uls_count;
- UID operational_pattern_ul;
- UID content_storage_uid;
MXFMetadataSet **metadata_sets;
int metadata_sets_count;
+ const uint8_t *sync_key;
AVFormatContext *fc;
+ struct AVAES *aesc;
} MXFContext;
typedef struct KLVPacket {
@@ -167,33 +173,34 @@ typedef struct MXFDataDefinitionUL {
typedef struct MXFMetadataReadTableEntry {
const UID key;
- int (*read)(MXFContext *mxf, KLVPacket *klv);
+ int (*read)();
+ int ctx_size;
+ enum MXFMetadataSetType type;
} MXFMetadataReadTableEntry;
/* partial keys to match */
static const uint8_t mxf_header_partition_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02 };
static const uint8_t mxf_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01 };
+/* complete keys to match */
+static const uint8_t mxf_encrypted_triplet_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x04,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x7e,0x01,0x00 };
+static const uint8_t mxf_encrypted_essence_container[] = { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x0b,0x01,0x00 };
#define IS_KLV_KEY(x, y) (!memcmp(x, y, sizeof(y)))
-#define PRINT_KEY(s, x) dprintf("%s %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", s, \
+#define PRINT_KEY(pc, s, x) dprintf(pc, "%s %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", s, \
(x)[0], (x)[1], (x)[2], (x)[3], (x)[4], (x)[5], (x)[6], (x)[7], (x)[8], (x)[9], (x)[10], (x)[11], (x)[12], (x)[13], (x)[14], (x)[15])
static int64_t klv_decode_ber_length(ByteIOContext *pb)
{
- int64_t size = 0;
- uint8_t length = get_byte(pb);
- int type = length >> 7;
-
- if (type) { /* long form */
- int bytes_num = length & 0x7f;
+ uint64_t size = get_byte(pb);
+ if (size & 0x80) { /* long form */
+ int bytes_num = size & 0x7f;
/* SMPTE 379M 5.3.4 guarantee that bytes_num must not exceed 8 bytes */
if (bytes_num > 8)
return -1;
+ size = 0;
while (bytes_num--)
size = size << 8 | get_byte(pb);
- } else {
- size = length & 0x7f;
}
return size;
}
@@ -212,7 +219,7 @@ static int mxf_get_stream_index(AVFormatContext *s, KLVPacket *klv)
for (i = 0; i < s->nb_streams; i++) {
MXFTrack *track = s->streams[i]->priv_data;
- /* SMPTE 379M 7.3 */
+ /* SMPTE 379M 7.3 */
if (!memcmp(klv->key + sizeof(mxf_essence_element_key), track->track_number, sizeof(track->track_number)))
return i;
}
@@ -249,8 +256,66 @@ static int mxf_get_d10_aes3_packet(ByteIOContext *pb, AVStream *st, AVPacket *pk
return 0;
}
+static int mxf_decrypt_triplet(AVFormatContext *s, AVPacket *pkt, KLVPacket *klv)
+{
+ static const uint8_t checkv[16] = {0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b};
+ MXFContext *mxf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t end = url_ftell(pb) + klv->length;
+ uint64_t size;
+ uint64_t orig_size;
+ uint64_t plaintext_size;
+ uint8_t ivec[16];
+ uint8_t tmpbuf[16];
+ int index;
+
+ if (!mxf->aesc && s->key && s->keylen == 16) {
+ mxf->aesc = av_malloc(av_aes_size);
+ av_aes_init(mxf->aesc, s->key, 128, 1);
+ }
+ // crypto context
+ url_fskip(pb, klv_decode_ber_length(pb));
+ // plaintext offset
+ klv_decode_ber_length(pb);
+ plaintext_size = get_be64(pb);
+ // source klv key
+ klv_decode_ber_length(pb);
+ get_buffer(pb, klv->key, 16);
+ if (!IS_KLV_KEY(klv, mxf_essence_element_key)) goto err_out;
+ index = mxf_get_stream_index(s, klv);
+ if (index < 0) goto err_out;
+ // source size
+ klv_decode_ber_length(pb);
+ orig_size = get_be64(pb);
+ if (orig_size < plaintext_size) goto err_out;
+ // enc. code
+ size = klv_decode_ber_length(pb);
+ if (size < 32 || size - 32 < orig_size) goto err_out;
+ get_buffer(pb, ivec, 16);
+ get_buffer(pb, tmpbuf, 16);
+ if (mxf->aesc)
+ av_aes_crypt(mxf->aesc, tmpbuf, tmpbuf, 1, ivec, 1);
+ if (memcmp(tmpbuf, checkv, 16))
+ av_log(s, AV_LOG_ERROR, "probably incorrect decryption key\n");
+ size -= 32;
+ av_get_packet(pb, pkt, size);
+ size -= plaintext_size;
+ if (mxf->aesc)
+ av_aes_crypt(mxf->aesc, &pkt->data[plaintext_size],
+ &pkt->data[plaintext_size], size >> 4, ivec, 1);
+ pkt->size = orig_size;
+ pkt->stream_index = index;
+ url_fskip(pb, end - url_ftell(pb));
+ return 0;
+
+err_out:
+ url_fskip(pb, end - url_ftell(pb));
+ return -1;
+}
+
static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
+ MXFContext *mxf = s->priv_data;
KLVPacket klv;
while (!url_feof(&s->pb)) {
@@ -259,8 +324,17 @@ static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
return -1;
}
#ifdef DEBUG
- PRINT_KEY("read packet", klv.key);
+ PRINT_KEY(s, "read packet", klv.key);
#endif
+ if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key)) {
+ int res = mxf_decrypt_triplet(s, pkt, &klv);
+ mxf->sync_key = mxf_encrypted_triplet_key;
+ if (res < 0) {
+ av_log(s, AV_LOG_ERROR, "invalid encoded triplet\n");
+ return -1;
+ }
+ return 0;
+ }
if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
int index = mxf_get_stream_index(s, &klv);
if (index < 0) {
@@ -292,291 +366,132 @@ static int mxf_add_metadata_set(MXFContext *mxf, void *metadata_set)
return 0;
}
-static int mxf_read_metadata_preface(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_cryptographic_context(MXFCryptoContext *cryptocontext, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
-
- switch (tag) {
- case 0x3B03:
- get_buffer(pb, mxf->content_storage_uid, 16);
- break;
- case 0x3B09:
- get_buffer(pb, mxf->operational_pattern_ul, 16);
- break;
- case 0x3B0A:
- mxf->essence_containers_uls_count = get_be32(pb);
- if (mxf->essence_containers_uls_count >= UINT_MAX / sizeof(UID))
- return -1;
- mxf->essence_containers_uls = av_malloc(mxf->essence_containers_uls_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)mxf->essence_containers_uls, mxf->essence_containers_uls_count * sizeof(UID));
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0xFFFE:
+ get_buffer(pb, cryptocontext->context_uid, 16);
+ break;
+ case 0xFFFD:
+ get_buffer(pb, cryptocontext->source_container_ul, 16);
+ break;
}
return 0;
}
-static int mxf_read_metadata_content_storage(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_content_storage(MXFContext *mxf, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
-
- dprintf("tag 0x%04X, size %d\n", tag, size);
- switch (tag) {
- case 0x1901:
- mxf->packages_count = get_be32(pb);
- if (mxf->packages_count >= UINT_MAX / sizeof(UID))
- return -1;
- mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
- break;
- case 0x1902:
- mxf->essence_container_data_sets_count = get_be32(pb);
- if (mxf->essence_container_data_sets_count >= UINT_MAX / sizeof(UID))
- return -1;
- mxf->essence_container_data_sets_refs = av_malloc(mxf->essence_container_data_sets_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)mxf->essence_container_data_sets_refs, mxf->essence_container_data_sets_count * sizeof(UID));
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch (tag) {
+ case 0x1901:
+ mxf->packages_count = get_be32(pb);
+ if (mxf->packages_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
+ break;
}
return 0;
}
-static int mxf_read_metadata_source_clip(MXFContext *mxf, KLVPacket *klv)
-{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFStructuralComponent *source_clip = av_mallocz(sizeof(*source_clip));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
-
- bytes_read += size + 4;
- dprintf("tag 0x%04X, size %d\n", tag, size);
- if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
- av_log(mxf->fc, AV_LOG_ERROR, "local tag 0x%04X with 0 size\n", tag);
- continue;
- }
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, source_clip->uid, 16);
- break;
- case 0x0202:
- source_clip->duration = get_be64(pb);
- break;
- case 0x1201:
- source_clip->start_position = get_be64(pb);
- break;
- case 0x1101:
- /* UMID, only get last 16 bytes */
- url_fskip(pb, 16);
- get_buffer(pb, source_clip->source_package_uid, 16);
- break;
- case 0x1102:
- source_clip->source_track_id = get_be32(pb);
- break;
- default:
- url_fskip(pb, size);
- }
- }
- source_clip->type = SourceClip;
- return mxf_add_metadata_set(mxf, source_clip);
-}
-
-static int mxf_read_metadata_material_package(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_source_clip(MXFStructuralComponent *source_clip, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFPackage *package = av_mallocz(sizeof(*package));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* KLV specified by 0x53 */
-
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, package->uid, 16);
- break;
- case 0x4403:
- package->tracks_count = get_be32(pb);
- if (package->tracks_count >= UINT_MAX / sizeof(UID))
- return -1;
- package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0x0202:
+ source_clip->duration = get_be64(pb);
+ break;
+ case 0x1201:
+ source_clip->start_position = get_be64(pb);
+ break;
+ case 0x1101:
+ /* UMID, only get last 16 bytes */
+ url_fskip(pb, 16);
+ get_buffer(pb, source_clip->source_package_uid, 16);
+ break;
+ case 0x1102:
+ source_clip->source_track_id = get_be32(pb);
+ break;
}
- package->type = MaterialPackage;
- return mxf_add_metadata_set(mxf, package);
+ return 0;
}
-static int mxf_read_metadata_track(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_material_package(MXFPackage *package, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFTrack *track = av_mallocz(sizeof(*track));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* KLV specified by 0x53 */
-
- dprintf("tag 0x%04X, size %d\n", tag, size);
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, track->uid, 16);
- break;
- case 0x4801:
- track->track_id = get_be32(pb);
- break;
- case 0x4804:
- get_buffer(pb, track->track_number, 4);
- break;
- case 0x4B01:
- track->edit_rate.den = get_be32(pb);
- track->edit_rate.num = get_be32(pb);
- break;
- case 0x4803:
- get_buffer(pb, track->sequence_ref, 16);
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0x4403:
+ package->tracks_count = get_be32(pb);
+ if (package->tracks_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
+ break;
}
- track->type = Track;
- return mxf_add_metadata_set(mxf, track);
+ return 0;
}
-static int mxf_read_metadata_sequence(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_track(MXFTrack *track, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFSequence *sequence = av_mallocz(sizeof(*sequence));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* KLV specified by 0x53 */
-
- dprintf("tag 0x%04X, size %d\n", tag, size);
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, sequence->uid, 16);
- break;
- case 0x0202:
- sequence->duration = get_be64(pb);
- break;
- case 0x0201:
- get_buffer(pb, sequence->data_definition_ul, 16);
- break;
- case 0x1001:
- sequence->structural_components_count = get_be32(pb);
- if (sequence->structural_components_count >= UINT_MAX / sizeof(UID))
- return -1;
- sequence->structural_components_refs = av_malloc(sequence->structural_components_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)sequence->structural_components_refs, sequence->structural_components_count * sizeof(UID));
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0x4801:
+ track->track_id = get_be32(pb);
+ break;
+ case 0x4804:
+ get_buffer(pb, track->track_number, 4);
+ break;
+ case 0x4B01:
+ track->edit_rate.den = get_be32(pb);
+ track->edit_rate.num = get_be32(pb);
+ break;
+ case 0x4803:
+ get_buffer(pb, track->sequence_ref, 16);
+ break;
}
- sequence->type = Sequence;
- return mxf_add_metadata_set(mxf, sequence);
+ return 0;
}
-static int mxf_read_metadata_source_package(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_sequence(MXFSequence *sequence, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFPackage *package = av_mallocz(sizeof(*package));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* KLV specified by 0x53 */
-
- dprintf("tag 0x%04X, size %d\n", tag, size);
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, package->uid, 16);
- break;
- case 0x4403:
- package->tracks_count = get_be32(pb);
- if (package->tracks_count >= UINT_MAX / sizeof(UID))
- return -1;
- package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
- break;
- case 0x4401:
- /* UMID, only get last 16 bytes */
- url_fskip(pb, 16);
- get_buffer(pb, package->package_uid, 16);
- break;
- case 0x4701:
- get_buffer(pb, package->descriptor_ref, 16);
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0x0202:
+ sequence->duration = get_be64(pb);
+ break;
+ case 0x0201:
+ get_buffer(pb, sequence->data_definition_ul, 16);
+ break;
+ case 0x1001:
+ sequence->structural_components_count = get_be32(pb);
+ if (sequence->structural_components_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ sequence->structural_components_refs = av_malloc(sequence->structural_components_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)sequence->structural_components_refs, sequence->structural_components_count * sizeof(UID));
+ break;
}
- package->type = SourcePackage;
- return mxf_add_metadata_set(mxf, package);
+ return 0;
}
-static int mxf_read_metadata_multiple_descriptor(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_source_package(MXFPackage *package, ByteIOContext *pb, int tag)
{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFDescriptor *descriptor = av_mallocz(sizeof(*descriptor));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* KLV specified by 0x53 */
-
- dprintf("tag 0x%04X, size %d\n", tag, size);
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, descriptor->uid, 16);
- break;
- case 0x3F01:
- descriptor->sub_descriptors_count = get_be32(pb);
- if (descriptor->sub_descriptors_count >= UINT_MAX / sizeof(UID))
- return -1;
- descriptor->sub_descriptors_refs = av_malloc(descriptor->sub_descriptors_count * sizeof(UID));
- url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
- get_buffer(pb, (uint8_t *)descriptor->sub_descriptors_refs, descriptor->sub_descriptors_count * sizeof(UID));
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0x4403:
+ package->tracks_count = get_be32(pb);
+ if (package->tracks_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
+ break;
+ case 0x4401:
+ /* UMID, only get last 16 bytes */
+ url_fskip(pb, 16);
+ get_buffer(pb, package->package_uid, 16);
+ break;
+ case 0x4701:
+ get_buffer(pb, package->descriptor_ref, 16);
+ break;
}
- descriptor->type = MultipleDescriptor;
- return mxf_add_metadata_set(mxf, descriptor);
+ return 0;
}
static void mxf_read_metadata_pixel_layout(ByteIOContext *pb, MXFDescriptor *descriptor)
@@ -585,7 +500,7 @@ static void mxf_read_metadata_pixel_layout(ByteIOContext *pb, MXFDescriptor *des
do {
code = get_byte(pb);
- dprintf("pixel layout: code 0x%x\n", code);
+ dprintf(NULL, "pixel layout: code 0x%x\n", code);
switch (code) {
case 0x52: /* R */
descriptor->bits_per_sample += get_byte(pb);
@@ -602,68 +517,59 @@ static void mxf_read_metadata_pixel_layout(ByteIOContext *pb, MXFDescriptor *des
} while (code != 0); /* SMPTE 377M E.2.46 */
}
-static int mxf_read_metadata_generic_descriptor(MXFContext *mxf, KLVPacket *klv)
+static int mxf_read_metadata_generic_descriptor(MXFDescriptor *descriptor, ByteIOContext *pb, int tag, int size)
{
- ByteIOContext *pb = &mxf->fc->pb;
- MXFDescriptor *descriptor = av_mallocz(sizeof(*descriptor));
- int bytes_read = 0;
-
- while (bytes_read < klv->length) {
- int tag = get_be16(pb);
- int size = get_be16(pb); /* KLV specified by 0x53 */
-
- dprintf("tag 0x%04X, size %d\n", tag, size);
- switch (tag) {
- case 0x3C0A:
- get_buffer(pb, descriptor->uid, 16);
- break;
- case 0x3004:
- get_buffer(pb, descriptor->essence_container_ul, 16);
- break;
- case 0x3006:
- descriptor->linked_track_id = get_be32(pb);
- break;
- case 0x3201: /* PictureEssenceCoding */
- get_buffer(pb, descriptor->essence_codec_ul, 16);
- break;
- case 0x3203:
- descriptor->width = get_be32(pb);
- break;
- case 0x3202:
- descriptor->height = get_be32(pb);
- break;
- case 0x320E:
- descriptor->aspect_ratio.num = get_be32(pb);
- descriptor->aspect_ratio.den = get_be32(pb);
- break;
- case 0x3D03:
- descriptor->sample_rate.num = get_be32(pb);
- descriptor->sample_rate.den = get_be32(pb);
- break;
- case 0x3D06: /* SoundEssenceCompression */
- get_buffer(pb, descriptor->essence_codec_ul, 16);
- break;
- case 0x3D07:
- descriptor->channels = get_be32(pb);
- break;
- case 0x3D01:
- descriptor->bits_per_sample = get_be32(pb);
- break;
- case 0x3401:
- mxf_read_metadata_pixel_layout(pb, descriptor);
- break;
- case 0x8201: /* Private tag used by SONY C0023S01.mxf */
- descriptor->extradata = av_malloc(size);
- descriptor->extradata_size = size;
- get_buffer(pb, descriptor->extradata, size);
- break;
- default:
- url_fskip(pb, size);
- }
- bytes_read += size + 4;
+ switch(tag) {
+ case 0x3F01:
+ descriptor->sub_descriptors_count = get_be32(pb);
+ if (descriptor->sub_descriptors_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ descriptor->sub_descriptors_refs = av_malloc(descriptor->sub_descriptors_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)descriptor->sub_descriptors_refs, descriptor->sub_descriptors_count * sizeof(UID));
+ break;
+ case 0x3004:
+ get_buffer(pb, descriptor->essence_container_ul, 16);
+ break;
+ case 0x3006:
+ descriptor->linked_track_id = get_be32(pb);
+ break;
+ case 0x3201: /* PictureEssenceCoding */
+ get_buffer(pb, descriptor->essence_codec_ul, 16);
+ break;
+ case 0x3203:
+ descriptor->width = get_be32(pb);
+ break;
+ case 0x3202:
+ descriptor->height = get_be32(pb);
+ break;
+ case 0x320E:
+ descriptor->aspect_ratio.num = get_be32(pb);
+ descriptor->aspect_ratio.den = get_be32(pb);
+ break;
+ case 0x3D03:
+ descriptor->sample_rate.num = get_be32(pb);
+ descriptor->sample_rate.den = get_be32(pb);
+ break;
+ case 0x3D06: /* SoundEssenceCompression */
+ get_buffer(pb, descriptor->essence_codec_ul, 16);
+ break;
+ case 0x3D07:
+ descriptor->channels = get_be32(pb);
+ break;
+ case 0x3D01:
+ descriptor->bits_per_sample = get_be32(pb);
+ break;
+ case 0x3401:
+ mxf_read_metadata_pixel_layout(pb, descriptor);
+ break;
+ case 0x8201: /* Private tag used by SONY C0023S01.mxf */
+ descriptor->extradata = av_malloc(size);
+ descriptor->extradata_size = size;
+ get_buffer(pb, descriptor->extradata, size);
+ break;
}
- descriptor->type = Descriptor;
- return mxf_add_metadata_set(mxf, descriptor);
+ return 0;
}
/* SMPTE RP224 http://www.smpte-ra.org/mdd/index.html */
@@ -741,14 +647,15 @@ static enum CodecType mxf_get_codec_type(const MXFDataDefinitionUL *uls, UID *ui
return uls->type;
}
-static void *mxf_resolve_strong_ref(MXFContext *mxf, UID *strong_ref)
+static void *mxf_resolve_strong_ref(MXFContext *mxf, UID *strong_ref, enum MXFMetadataSetType type)
{
int i;
if (!strong_ref)
return NULL;
for (i = 0; i < mxf->metadata_sets_count; i++) {
- if (!memcmp(*strong_ref, mxf->metadata_sets[i]->uid, 16)) {
+ if (!memcmp(*strong_ref, mxf->metadata_sets[i]->uid, 16) &&
+ (type == AnyType || mxf->metadata_sets[i]->type == type)) {
return mxf->metadata_sets[i];
}
}
@@ -761,17 +668,11 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
MXFPackage *temp_package = NULL;
int i, j, k;
- dprintf("metadata sets count %d\n", mxf->metadata_sets_count);
+ dprintf(mxf->fc, "metadata sets count %d\n", mxf->metadata_sets_count);
/* TODO: handle multiple material packages (OP3x) */
for (i = 0; i < mxf->packages_count; i++) {
- if (!(temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i]))) {
- av_log(mxf->fc, AV_LOG_ERROR, "could not resolve package strong ref\n");
- return -1;
- }
- if (temp_package->type == MaterialPackage) {
- material_package = temp_package;
- break;
- }
+ material_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i], MaterialPackage);
+ if (material_package) break;
}
if (!material_package) {
av_log(mxf->fc, AV_LOG_ERROR, "no material package found\n");
@@ -785,16 +686,17 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
MXFTrack *temp_track = NULL;
MXFDescriptor *descriptor = NULL;
MXFStructuralComponent *component = NULL;
+ UID *essence_container_ul = NULL;
const MXFCodecUL *codec_ul = NULL;
const MXFCodecUL *container_ul = NULL;
AVStream *st;
- if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i]))) {
+ if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track strong ref\n");
continue;
}
- if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref))) {
+ if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track sequence strong ref\n");
return -1;
}
@@ -802,15 +704,14 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
/* TODO: handle multiple source clips */
for (j = 0; j < material_track->sequence->structural_components_count; j++) {
/* TODO: handle timecode component */
- component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j]);
- if (!component || component->type != SourceClip)
+ component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j], SourceClip);
+ if (!component)
continue;
for (k = 0; k < mxf->packages_count; k++) {
- if (!(temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k]))) {
- av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
- return -1;
- }
+ temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k], SourcePackage);
+ if (!temp_package)
+ continue;
if (!memcmp(temp_package->package_uid, component->source_package_uid, 16)) {
source_package = temp_package;
break;
@@ -821,7 +722,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
break;
}
for (k = 0; k < source_package->tracks_count; k++) {
- if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k]))) {
+ if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k], Track))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
return -1;
}
@@ -846,21 +747,21 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
st->start_time = component->start_position;
av_set_pts_info(st, 64, material_track->edit_rate.num, material_track->edit_rate.den);
- if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref))) {
+ if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref, Sequence))) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
return -1;
}
#ifdef DEBUG
- PRINT_KEY("data definition ul", source_track->sequence->data_definition_ul);
+ PRINT_KEY(mxf->fc, "data definition ul", source_track->sequence->data_definition_ul);
#endif
st->codec->codec_type = mxf_get_codec_type(mxf_data_definition_uls, &source_track->sequence->data_definition_ul);
- source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref);
+ source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref, AnyType);
if (source_package->descriptor) {
if (source_package->descriptor->type == MultipleDescriptor) {
for (j = 0; j < source_package->descriptor->sub_descriptors_count; j++) {
- MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j]);
+ MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j], Descriptor);
if (!sub_descriptor) {
av_log(mxf->fc, AV_LOG_ERROR, "could not resolve sub descriptor strong ref\n");
@@ -871,7 +772,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
break;
}
}
- } else
+ } else if (source_package->descriptor->type == Descriptor)
descriptor = source_package->descriptor;
}
if (!descriptor) {
@@ -879,9 +780,22 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
continue;
}
#ifdef DEBUG
- PRINT_KEY("essence codec ul", descriptor->essence_codec_ul);
- PRINT_KEY("essence container ul", descriptor->essence_container_ul);
+ PRINT_KEY(mxf->fc, "essence codec ul", descriptor->essence_codec_ul);
+ PRINT_KEY(mxf->fc, "essence container ul", descriptor->essence_container_ul);
#endif
+ essence_container_ul = &descriptor->essence_container_ul;
+ /* HACK: replacing the original key with mxf_encrypted_essence_container
+ * is not allowed according to s429-6, try to find correct information anyway */
+ if (IS_KLV_KEY(essence_container_ul, mxf_encrypted_essence_container)) {
+ av_log(mxf->fc, AV_LOG_INFO, "broken encrypted mxf file\n");
+ for (k = 0; k < mxf->metadata_sets_count; k++) {
+ MXFMetadataSet *metadata = mxf->metadata_sets[k];
+ if (metadata->type == CryptoContext) {
+ essence_container_ul = &((MXFCryptoContext *)metadata)->source_container_ul;
+ break;
+ }
+ }
+ }
/* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */
codec_ul = mxf_get_codec_ul(mxf_codec_uls, &descriptor->essence_codec_ul);
st->codec->codec_id = codec_ul->id;
@@ -890,7 +804,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
st->codec->extradata_size = descriptor->extradata_size;
}
if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
- container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, &descriptor->essence_container_ul);
+ container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, essence_container_ul);
if (st->codec->codec_id == CODEC_ID_NONE)
st->codec->codec_id = container_ul->id;
st->codec->width = descriptor->width;
@@ -898,7 +812,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
st->codec->bits_per_sample = descriptor->bits_per_sample; /* Uncompressed */
st->need_parsing = 2; /* only parse headers */
} else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
- container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, &descriptor->essence_container_ul);
+ container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, essence_container_ul);
if (st->codec->codec_id == CODEC_ID_NONE)
st->codec->codec_id = container_ul->id;
st->codec->channels = descriptor->channels;
@@ -922,7 +836,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
}
}
if (container_ul && container_ul->wrapping == Clip) {
- dprintf("stream %d: clip wrapped essence\n", st->index);
+ dprintf(mxf->fc, "stream %d: clip wrapped essence\n", st->index);
st->need_parsing = 1;
}
}
@@ -930,22 +844,22 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
}
static const MXFMetadataReadTableEntry mxf_metadata_read_table[] = {
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x2F,0x00 }, mxf_read_metadata_preface },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x18,0x00 }, mxf_read_metadata_content_storage },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x37,0x00 }, mxf_read_metadata_source_package },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x36,0x00 }, mxf_read_metadata_material_package },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0F,0x00 }, mxf_read_metadata_sequence },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x11,0x00 }, mxf_read_metadata_source_clip },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x44,0x00 }, mxf_read_metadata_multiple_descriptor },
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x42,0x00 }, mxf_read_metadata_generic_descriptor }, /* Generic Sound */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x28,0x00 }, mxf_read_metadata_generic_descriptor }, /* CDCI */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x29,0x00 }, mxf_read_metadata_generic_descriptor }, /* RGBA */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 }, mxf_read_metadata_generic_descriptor }, /* MPEG 2 Video */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 }, mxf_read_metadata_generic_descriptor }, /* Wave */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 }, mxf_read_metadata_generic_descriptor }, /* AES3 */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3A,0x00 }, mxf_read_metadata_track }, /* Static Track */
- { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3B,0x00 }, mxf_read_metadata_track }, /* Generic Track */
- { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, NULL },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x18,0x00 }, mxf_read_metadata_content_storage, 0, AnyType },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x37,0x00 }, mxf_read_metadata_source_package, sizeof(MXFPackage), SourcePackage },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x36,0x00 }, mxf_read_metadata_material_package, sizeof(MXFPackage), MaterialPackage },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0F,0x00 }, mxf_read_metadata_sequence, sizeof(MXFSequence), Sequence },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x11,0x00 }, mxf_read_metadata_source_clip, sizeof(MXFStructuralComponent), SourceClip },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x44,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), MultipleDescriptor },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x42,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* Generic Sound */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x28,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* CDCI */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x29,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* RGBA */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* MPEG 2 Video */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* Wave */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 }, mxf_read_metadata_generic_descriptor, sizeof(MXFDescriptor), Descriptor }, /* AES3 */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3A,0x00 }, mxf_read_metadata_track, sizeof(MXFTrack), Track }, /* Static Track */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3B,0x00 }, mxf_read_metadata_track, sizeof(MXFTrack), Track }, /* Generic Track */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x04,0x01,0x02,0x02,0x00,0x00 }, mxf_read_metadata_cryptographic_context, sizeof(MXFCryptoContext), CryptoContext },
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, NULL, 0, AnyType },
};
static int mxf_read_sync(ByteIOContext *pb, const uint8_t *key, unsigned size)
@@ -961,11 +875,38 @@ static int mxf_read_sync(ByteIOContext *pb, const uint8_t *key, unsigned size)
return i == size;
}
+static int mxf_read_local_tags(MXFContext *mxf, KLVPacket *klv, int (*read_child)(), int ctx_size, enum MXFMetadataSetType type)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFMetadataSet *ctx = ctx_size ? av_mallocz(ctx_size) : mxf;
+ uint64_t klv_end= url_ftell(pb) + klv->length;
+
+ while (url_ftell(pb) + 4 < klv_end) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+ uint64_t next= url_ftell(pb) + size;
+
+ if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
+ av_log(mxf->fc, AV_LOG_ERROR, "local tag 0x%04X with 0 size\n", tag);
+ continue;
+ }
+ if(ctx_size && tag == 0x3C0A)
+ get_buffer(pb, ctx->uid, 16);
+ else
+ read_child(ctx, pb, tag, size);
+
+ url_fseek(pb, next, SEEK_SET);
+ }
+ if (ctx_size) ctx->type = type;
+ return ctx_size ? mxf_add_metadata_set(mxf, ctx) : 0;
+}
+
static int mxf_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
MXFContext *mxf = s->priv_data;
KLVPacket klv;
+ mxf->sync_key = mxf_essence_element_key;
if (!mxf_read_sync(&s->pb, mxf_header_partition_pack_key, 14)) {
av_log(s, AV_LOG_ERROR, "could not find header partition pack key\n");
return -1;
@@ -973,31 +914,32 @@ static int mxf_read_header(AVFormatContext *s, AVFormatParameters *ap)
url_fseek(&s->pb, -14, SEEK_CUR);
mxf->fc = s;
while (!url_feof(&s->pb)) {
- const MXFMetadataReadTableEntry *function;
+ const MXFMetadataReadTableEntry *metadata;
if (klv_read_packet(&klv, &s->pb) < 0) {
av_log(s, AV_LOG_ERROR, "error reading KLV packet\n");
return -1;
}
#ifdef DEBUG
- PRINT_KEY("read header", klv.key);
+ PRINT_KEY(s, "read header", klv.key);
#endif
- if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
+ if (IS_KLV_KEY(klv.key, mxf_encrypted_triplet_key) ||
+ IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
/* FIXME avoid seek */
url_fseek(&s->pb, klv.offset, SEEK_SET);
break;
}
- for (function = mxf_metadata_read_table; function->read; function++) {
- if (IS_KLV_KEY(klv.key, function->key)) {
- if (function->read(mxf, &klv) < 0) {
+ for (metadata = mxf_metadata_read_table; metadata->read; metadata++) {
+ if (IS_KLV_KEY(klv.key, metadata->key)) {
+ if (mxf_read_local_tags(mxf, &klv, metadata->read, metadata->ctx_size, metadata->type) < 0) {
av_log(s, AV_LOG_ERROR, "error reading header metadata\n");
return -1;
}
break;
}
}
- if (!function->read)
+ if (!metadata->read)
url_fskip(&s->pb, klv.length);
}
return mxf_parse_structural_metadata(mxf);
@@ -1009,8 +951,6 @@ static int mxf_read_close(AVFormatContext *s)
int i;
av_freep(&mxf->packages_refs);
- av_freep(&mxf->essence_container_data_sets_refs);
- av_freep(&mxf->essence_containers_uls);
for (i = 0; i < mxf->metadata_sets_count; i++) {
switch (mxf->metadata_sets[i]->type) {
case MultipleDescriptor:
@@ -1029,6 +969,7 @@ static int mxf_read_close(AVFormatContext *s)
av_freep(&mxf->metadata_sets[i]);
}
av_freep(&mxf->metadata_sets);
+ av_freep(&mxf->aesc);
return 0;
}
@@ -1052,6 +993,7 @@ static int mxf_probe(AVProbeData *p) {
/* XXX: use MXF Index */
static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
{
+ MXFContext *mxf = s->priv_data;
AVStream *st = s->streams[stream_index];
int64_t seconds;
@@ -1061,7 +1003,7 @@ static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
sample_time = 0;
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
url_fseek(&s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET);
- if (!mxf_read_sync(&s->pb, mxf_essence_element_key, 12))
+ if (!mxf_read_sync(&s->pb, mxf->sync_key, 12))
return -1;
/* found KLV key */
diff --git a/contrib/ffmpeg/libavformat/base64.h b/contrib/ffmpeg/libavformat/network.h
index 03d43afe4..3aa8ba836 100644
--- a/contrib/ffmpeg/libavformat/base64.h
+++ b/contrib/ffmpeg/libavformat/network.h
@@ -1,6 +1,5 @@
/*
- * Base64.c
- * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ * Copyright (c) 2007 The FFmpeg Project.
*
* This file is part of FFmpeg.
*
@@ -19,6 +18,20 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-int av_base64_decode(uint8_t * out, const char *in, int out_length); // param order as strncpy()
-char *av_base64_encode(uint8_t * src, int len); // src is not a string, it's data.
+#ifndef NETWORK_H
+#define NETWORK_H
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#include <netdb.h>
+
+#if !defined(HAVE_INET_ATON)
+/* in os_support.c */
+int inet_aton (const char * str, struct in_addr * add);
+#endif
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/nsvdec.c b/contrib/ffmpeg/libavformat/nsvdec.c
index 9a5fe97f8..2753edbd9 100644
--- a/contrib/ffmpeg/libavformat/nsvdec.c
+++ b/contrib/ffmpeg/libavformat/nsvdec.c
@@ -183,26 +183,26 @@ typedef struct {
//DVDemuxContext* dv_demux;
} NSVContext;
-static const CodecTag nsv_codec_video_tags[] = {
+static const AVCodecTag nsv_codec_video_tags[] = {
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') },
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
{ CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') },
{ CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
{ CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
/*
{ CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') },
{ CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') },
- { CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') },
- { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
- { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
*/
{ CODEC_ID_XVID, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */
{ CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') },
{ 0, 0 },
};
-static const CodecTag nsv_codec_audio_tags[] = {
+static const AVCodecTag nsv_codec_audio_tags[] = {
{ CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
{ CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') },
{ CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') }, /* _CUTTED__MUXED_2 Heads - Out Of The City.nsv */
@@ -602,13 +602,15 @@ null_chunk_retry:
pkt = &nsv->ahead[NSV_ST_VIDEO];
av_get_packet(pb, pkt, vsize);
pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO;
- pkt->dts = nst->frame_offset++;
+ pkt->dts = nst->frame_offset;
pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
/*
for (i = 0; i < MIN(8, vsize); i++)
PRINT(("NSV video: [%d] = %02x\n", i, pkt->data[i]));
*/
}
+ ((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset++;
+
if (asize/*st[NSV_ST_AUDIO]*/) {
nst = st[NSV_ST_AUDIO]->priv_data;
pkt = &nsv->ahead[NSV_ST_AUDIO];
diff --git a/contrib/ffmpeg/libavformat/nut.c b/contrib/ffmpeg/libavformat/nut.c
index df64caf15..995149951 100644
--- a/contrib/ffmpeg/libavformat/nut.c
+++ b/contrib/ffmpeg/libavformat/nut.c
@@ -1443,7 +1443,7 @@ AVOutputFormat nut_muxer = {
sizeof(NUTContext),
#ifdef CONFIG_LIBVORBIS
CODEC_ID_VORBIS,
-#elif defined(CONFIG_MP3LAME)
+#elif defined(CONFIG_LIBMP3LAME)
CODEC_ID_MP3,
#else
CODEC_ID_MP2, /* AC3 needs liba52 decoder */
diff --git a/contrib/ffmpeg/libavformat/nutdec.c b/contrib/ffmpeg/libavformat/nutdec.c
index 7e0f8cd93..c0f331c27 100644
--- a/contrib/ffmpeg/libavformat/nutdec.c
+++ b/contrib/ffmpeg/libavformat/nutdec.c
@@ -29,18 +29,13 @@
static uint64_t get_v(ByteIOContext *bc){
uint64_t val = 0;
+ int tmp;
- for(;;)
- {
- int tmp = get_byte(bc);
-
- if (tmp&0x80)
- val= (val<<7) + tmp - 0x80;
- else{
- return (val<<7) + tmp;
- }
- }
- return -1;
+ do{
+ tmp = get_byte(bc);
+ val= (val<<7) + (tmp&127);
+ }while(tmp&128);
+ return val;
}
static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
@@ -109,6 +104,8 @@ static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_ch
// start= url_ftell(bc) - 8;
size= get_v(bc);
+ if(size > 4096)
+ get_be32(bc); //FIXME check this
init_checksum(bc, calculate_checksum ? av_crc04C11DB7_update : NULL, 0);
@@ -740,13 +737,12 @@ static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
uint64_t tmp= nut->next_startcode;
nut->next_startcode=0;
- if (url_feof(bc))
- return -1;
-
if(tmp){
pos-=8;
}else{
frame_code = get_byte(bc);
+ if(url_feof(bc))
+ return -1;
if(frame_code == 'N'){
tmp= frame_code;
for(i=1; i<8; i++)
diff --git a/contrib/ffmpeg/libavformat/nuv.c b/contrib/ffmpeg/libavformat/nuv.c
index 3b96eb940..7e04222ee 100644
--- a/contrib/ffmpeg/libavformat/nuv.c
+++ b/contrib/ffmpeg/libavformat/nuv.c
@@ -154,7 +154,7 @@ static int nuv_header(AVFormatContext *s, AVFormatParameters *ap) {
vst->codec->height = height;
vst->codec->bits_per_sample = 10;
vst->codec->sample_aspect_ratio = av_d2q(aspect, 10000);
- vst->r_frame_rate = av_d2q(1.0 / fps, 10000);
+ vst->r_frame_rate = av_d2q(fps, 60000);
av_set_pts_info(vst, 32, 1, 1000);
} else
ctx->v_id = -1;
@@ -190,7 +190,7 @@ static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
if (ret <= 0)
return ret ? ret : -1;
frametype = hdr[0];
- size = PKTSIZE(LE_32(&hdr[8]));
+ size = PKTSIZE(AV_RL32(&hdr[8]));
switch (frametype) {
case NUV_VIDEO:
case NUV_EXTRADATA:
@@ -203,7 +203,7 @@ static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
if (ret < 0)
return ret;
pkt->pos = url_ftell(pb);
- pkt->pts = LE_32(&hdr[4]);
+ pkt->pts = AV_RL32(&hdr[4]);
pkt->stream_index = ctx->v_id;
memcpy(pkt->data, hdr, HDRSIZE);
ret = get_buffer(pb, pkt->data + HDRSIZE, size);
@@ -215,7 +215,7 @@ static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
break;
}
ret = av_get_packet(pb, pkt, size);
- pkt->pts = LE_32(&hdr[4]);
+ pkt->pts = AV_RL32(&hdr[4]);
pkt->stream_index = ctx->a_id;
return ret;
case NUV_SEEKP:
diff --git a/contrib/ffmpeg/libavformat/ogg.c b/contrib/ffmpeg/libavformat/ogg.c
index 369fa4639..c98bb2273 100644
--- a/contrib/ffmpeg/libavformat/ogg.c
+++ b/contrib/ffmpeg/libavformat/ogg.c
@@ -155,8 +155,8 @@ static int ogg_write_trailer(AVFormatContext *avfcontext) {
AVOutputFormat ogg_muxer = {
"ogg",
- "Ogg Vorbis",
- "audio/x-vorbis",
+ "Ogg format",
+ "application/ogg",
"ogg",
sizeof(OggContext),
CODEC_ID_VORBIS,
diff --git a/contrib/ffmpeg/libavformat/ogg2.c b/contrib/ffmpeg/libavformat/ogg2.c
index 1e5d38620..8ca7b2d13 100644
--- a/contrib/ffmpeg/libavformat/ogg2.c
+++ b/contrib/ffmpeg/libavformat/ogg2.c
@@ -67,8 +67,8 @@ ogg_write_trailer (AVFormatContext * avfcontext)
AVOutputFormat ogg_muxer = {
"ogg",
- "Ogg Vorbis",
- "audio/x-vorbis",
+ "Ogg format",
+ "application/ogg",
"ogg",
sizeof (OggContext),
CODEC_ID_VORBIS,
@@ -90,6 +90,7 @@ ogg_save (AVFormatContext * s)
ost->pos = url_ftell (&s->pb);;
ost->curidx = ogg->curidx;
ost->next = ogg->state;
+ ost->nstreams = ogg->nstreams;
memcpy(ost->streams, ogg->streams, ogg->nstreams * sizeof(*ogg->streams));
for (i = 0; i < ogg->nstreams; i++){
@@ -123,8 +124,9 @@ ogg_restore (AVFormatContext * s, int discard)
url_fseek (bc, ost->pos, SEEK_SET);
ogg->curidx = ost->curidx;
- memcpy (ogg->streams, ost->streams,
- ogg->nstreams * sizeof (*ogg->streams));
+ ogg->nstreams = ost->nstreams;
+ memcpy(ogg->streams, ost->streams,
+ ost->nstreams * sizeof(*ogg->streams));
}
av_free (ost);
@@ -482,7 +484,8 @@ ogg_get_length (AVFormatContext * s)
url_fseek (&s->pb, end, SEEK_SET);
while (!ogg_read_page (s, &i)){
- if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
+ if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0 &&
+ ogg->streams[i].codec)
idx = i;
}
diff --git a/contrib/ffmpeg/libavformat/ogg2.h b/contrib/ffmpeg/libavformat/ogg2.h
index dd6f24aab..6b7c6b22e 100644
--- a/contrib/ffmpeg/libavformat/ogg2.h
+++ b/contrib/ffmpeg/libavformat/ogg2.h
@@ -57,6 +57,7 @@ typedef struct ogg_state {
uint64_t pos;
int curidx;
struct ogg_state *next;
+ int nstreams;
ogg_stream_t streams[1];
} ogg_state_t;
diff --git a/contrib/ffmpeg/libavformat/os_support.c b/contrib/ffmpeg/libavformat/os_support.c
index a66c867f0..7a4be8fa7 100644
--- a/contrib/ffmpeg/libavformat/os_support.c
+++ b/contrib/ffmpeg/libavformat/os_support.c
@@ -36,17 +36,25 @@
#endif
#include <time.h>
+#ifndef HAVE_SYS_POLL_H
+#if defined(__MINGW32__)
+#include <winsock2.h>
+#else
+#include <sys/select.h>
+#endif
+#endif
+
/**
* gets the current time in micro seconds.
*/
int64_t av_gettime(void)
{
#if defined(CONFIG_WINCE)
- return timeGetTime() * int64_t_C(1000);
+ return timeGetTime() * INT64_C(1000);
#elif defined(__MINGW32__)
struct timeb tb;
_ftime(&tb);
- return ((int64_t)tb.time * int64_t_C(1000) + (int64_t)tb.millitm) * int64_t_C(1000);
+ return ((int64_t)tb.time * INT64_C(1000) + (int64_t)tb.millitm) * INT64_C(1000);
#else
struct timeval tv;
gettimeofday(&tv,NULL);
@@ -70,7 +78,7 @@ struct tm *localtime_r(const time_t *t, struct tm *tp)
#if !defined(HAVE_INET_ATON) && defined(CONFIG_NETWORK)
#include <stdlib.h>
#include <strings.h>
-#include "barpainet.h"
+#include "network.h"
int inet_aton (const char * str, struct in_addr * add)
{
@@ -94,3 +102,66 @@ done:
return 1;
}
#endif /* !defined(HAVE_INET_ATON) && defined(CONFIG_NETWORK) */
+
+#ifdef CONFIG_FFSERVER
+#ifndef HAVE_SYS_POLL_H
+int poll(struct pollfd *fds, nfds_t numfds, int timeout)
+{
+ fd_set read_set;
+ fd_set write_set;
+ fd_set exception_set;
+ nfds_t i;
+ int n;
+ int rc;
+
+ FD_ZERO(&read_set);
+ FD_ZERO(&write_set);
+ FD_ZERO(&exception_set);
+
+ n = -1;
+ for(i = 0; i < numfds; i++) {
+ if (fds[i].fd < 0)
+ continue;
+ if (fds[i].fd >= FD_SETSIZE) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (fds[i].events & POLLIN) FD_SET(fds[i].fd, &read_set);
+ if (fds[i].events & POLLOUT) FD_SET(fds[i].fd, &write_set);
+ if (fds[i].events & POLLERR) FD_SET(fds[i].fd, &exception_set);
+
+ if (fds[i].fd > n)
+ n = fds[i].fd;
+ };
+
+ if (n == -1)
+ /* Hey!? Nothing to poll, in fact!!! */
+ return 0;
+
+ if (timeout < 0)
+ rc = select(n+1, &read_set, &write_set, &exception_set, NULL);
+ else {
+ struct timeval tv;
+
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = 1000 * (timeout % 1000);
+ rc = select(n+1, &read_set, &write_set, &exception_set, &tv);
+ };
+
+ if (rc < 0)
+ return rc;
+
+ for(i = 0; i < (nfds_t) n; i++) {
+ fds[i].revents = 0;
+
+ if (FD_ISSET(fds[i].fd, &read_set)) fds[i].revents |= POLLIN;
+ if (FD_ISSET(fds[i].fd, &write_set)) fds[i].revents |= POLLOUT;
+ if (FD_ISSET(fds[i].fd, &exception_set)) fds[i].revents |= POLLERR;
+ };
+
+ return rc;
+}
+#endif /* HAVE_SYS_POLL_H */
+#endif /* CONFIG_FFSERVER */
+
diff --git a/contrib/ffmpeg/libavformat/os_support.h b/contrib/ffmpeg/libavformat/os_support.h
index e76a9aaaf..424d6dabd 100644
--- a/contrib/ffmpeg/libavformat/os_support.h
+++ b/contrib/ffmpeg/libavformat/os_support.h
@@ -26,22 +26,46 @@
* @file os_support.h
* miscellaneous OS support macros and functions.
*
+ * - socklen_t typedef (BeOS, Innotek libc)
* - usleep() (Win32, BeOS, OS/2)
+ * - lseek() (Win32)
* - floatf() (OS/2)
* - strcasecmp() (OS/2)
+ * - closesocket()
+ * - poll() (BeOS, MinGW)
*/
+#if defined(__BEOS__) || defined(__INNOTEK_LIBC__)
+typedef int socklen_t;
+#endif
+
#ifdef __MINGW32__
__declspec(dllimport) void __stdcall Sleep(unsigned long dwMilliseconds);
// # include <windows.h>
# define usleep(t) Sleep((t) / 1000)
+# include <fcntl.h>
+# define lseek(f,p,w) _lseeki64((f), (p), (w))
+# define HAVE_CLOSESOCKET 1
#endif
#ifdef __BEOS__
-# ifndef usleep
+# include <sys/socket.h>
+# include <netinet/in.h>
+ /* not net_server ? */
+# if IPPROTO_TCP != 6
+# define HAVE_CLOSESOCKET 1
+# endif
+# include <BeBuild.h>
+ /* R5 didn't have usleep, fake it. Haiku and Zeta has it now. */
+# if B_BEOS_VERSION <= B_BEOS_VERSION_5
# include <OS.h>
+ /* doesn't set errno but that's enough */
# define usleep(t) snooze((bigtime_t)(t))
# endif
+# ifndef SA_RESTART
+# warning SA_RESTART not implemented; ffserver might misbehave.
+# define SA_RESTART 0
+# endif
#endif
#if defined(CONFIG_OS2)
@@ -50,4 +74,38 @@ static inline int usleep(unsigned int t) { return _sleep2(t / 1000); }
static inline int strcasecmp(const char* s1, const char* s2) { return stricmp(s1,s2); }
#endif
+/* most of the time closing a socket is just closing an fd */
+#if HAVE_CLOSESOCKET != 1
+#define closesocket close
+#endif
+
+#ifdef CONFIG_FFSERVER
+#ifndef HAVE_SYS_POLL_H
+typedef unsigned long nfds_t;
+
+struct pollfd {
+ int fd;
+ short events; /* events to look for */
+ short revents; /* events that occured */
+};
+
+/* events & revents */
+#define POLLIN 0x0001 /* any readable data available */
+#define POLLOUT 0x0002 /* file descriptor is writeable */
+#define POLLRDNORM POLLIN
+#define POLLWRNORM POLLOUT
+#define POLLRDBAND 0x0008 /* priority readable data */
+#define POLLWRBAND 0x0010 /* priority data can be written */
+#define POLLPRI 0x0020 /* high priority readable data */
+
+/* revents only */
+#define POLLERR 0x0004 /* errors pending */
+#define POLLHUP 0x0080 /* disconnected */
+#define POLLNVAL 0x1000 /* invalid file descriptor */
+
+
+extern int poll(struct pollfd *fds, nfds_t numfds, int timeout);
+#endif /* HAVE_SYS_POLL_H */
+#endif /* CONFIG_FFSERVER */
+
#endif /* _OS_SUPPORT_H */
diff --git a/contrib/ffmpeg/libavformat/png.c b/contrib/ffmpeg/libavformat/png.c
deleted file mode 100644
index d62bf540a..000000000
--- a/contrib/ffmpeg/libavformat/png.c
+++ /dev/null
@@ -1,889 +0,0 @@
-/*
- * PNG image format
- * Copyright (c) 2003 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-
-/* TODO:
- * - add 2, 4 and 16 bit depth support
- * - use filters when generating a png (better compression)
- */
-
-#ifdef CONFIG_ZLIB
-#include <zlib.h>
-
-//#define DEBUG
-
-#define PNG_COLOR_MASK_PALETTE 1
-#define PNG_COLOR_MASK_COLOR 2
-#define PNG_COLOR_MASK_ALPHA 4
-
-#define PNG_COLOR_TYPE_GRAY 0
-#define PNG_COLOR_TYPE_PALETTE (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)
-#define PNG_COLOR_TYPE_RGB (PNG_COLOR_MASK_COLOR)
-#define PNG_COLOR_TYPE_RGB_ALPHA (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_ALPHA)
-#define PNG_COLOR_TYPE_GRAY_ALPHA (PNG_COLOR_MASK_ALPHA)
-
-#define PNG_FILTER_VALUE_NONE 0
-#define PNG_FILTER_VALUE_SUB 1
-#define PNG_FILTER_VALUE_UP 2
-#define PNG_FILTER_VALUE_AVG 3
-#define PNG_FILTER_VALUE_PAETH 4
-
-#define PNG_IHDR 0x0001
-#define PNG_IDAT 0x0002
-#define PNG_ALLIMAGE 0x0004
-#define PNG_PLTE 0x0008
-
-#define NB_PASSES 7
-
-#define IOBUF_SIZE 4096
-
-typedef struct PNGDecodeState {
- int state;
- int width, height;
- int bit_depth;
- int color_type;
- int compression_type;
- int interlace_type;
- int filter_type;
- int channels;
- int bits_per_pixel;
- int bpp;
-
- uint8_t *image_buf;
- int image_linesize;
- uint32_t palette[256];
- uint8_t *crow_buf;
- uint8_t *last_row;
- uint8_t *tmp_row;
- int pass;
- int crow_size; /* compressed row size (include filter type) */
- int row_size; /* decompressed row size */
- int pass_row_size; /* decompress row size of the current pass */
- int y;
- z_stream zstream;
-} PNGDecodeState;
-
-static const uint8_t pngsig[8] = {137, 80, 78, 71, 13, 10, 26, 10};
-
-/* Mask to determine which y pixels are valid in a pass */
-static const uint8_t png_pass_ymask[NB_PASSES] = {
- 0x80, 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55,
-};
-
-/* Mask to determine which y pixels can be written in a pass */
-static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
- 0xff, 0xff, 0x0f, 0xcc, 0x33, 0xff, 0x55,
-};
-
-/* minimum x value */
-static const uint8_t png_pass_xmin[NB_PASSES] = {
- 0, 4, 0, 2, 0, 1, 0
-};
-
-/* x shift to get row width */
-static const uint8_t png_pass_xshift[NB_PASSES] = {
- 3, 3, 2, 2, 1, 1, 0
-};
-
-/* Mask to determine which pixels are valid in a pass */
-static const uint8_t png_pass_mask[NB_PASSES] = {
- 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff
-};
-
-/* Mask to determine which pixels to overwrite while displaying */
-static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
- 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
-};
-
-static int png_probe(AVProbeData *pd)
-{
- if (pd->buf_size >= 8 &&
- memcmp(pd->buf, pngsig, 8) == 0)
- return AVPROBE_SCORE_MAX;
- else
- return 0;
-}
-
-static void *png_zalloc(void *opaque, unsigned int items, unsigned int size)
-{
- return av_malloc(items * size);
-}
-
-static void png_zfree(void *opaque, void *ptr)
-{
- av_free(ptr);
-}
-
-static int png_get_nb_channels(int color_type)
-{
- int channels;
- channels = 1;
- if ((color_type & (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)) ==
- PNG_COLOR_MASK_COLOR)
- channels = 3;
- if (color_type & PNG_COLOR_MASK_ALPHA)
- channels++;
- return channels;
-}
-
-/* compute the row size of an interleaved pass */
-static int png_pass_row_size(int pass, int bits_per_pixel, int width)
-{
- int shift, xmin, pass_width;
-
- xmin = png_pass_xmin[pass];
- if (width <= xmin)
- return 0;
- shift = png_pass_xshift[pass];
- pass_width = (width - xmin + (1 << shift) - 1) >> shift;
- return (pass_width * bits_per_pixel + 7) >> 3;
-}
-
-/* NOTE: we try to construct a good looking image at each pass. width
- is the original image width. We also do pixel format convertion at
- this stage */
-static void png_put_interlaced_row(uint8_t *dst, int width,
- int bits_per_pixel, int pass,
- int color_type, const uint8_t *src)
-{
- int x, mask, dsp_mask, j, src_x, b, bpp;
- uint8_t *d;
- const uint8_t *s;
-
- mask = png_pass_mask[pass];
- dsp_mask = png_pass_dsp_mask[pass];
- switch(bits_per_pixel) {
- case 1:
- /* we must intialize the line to zero before writing to it */
- if (pass == 0)
- memset(dst, 0, (width + 7) >> 3);
- src_x = 0;
- for(x = 0; x < width; x++) {
- j = (x & 7);
- if ((dsp_mask << j) & 0x80) {
- b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
- dst[x >> 3] |= b << (7 - j);
- }
- if ((mask << j) & 0x80)
- src_x++;
- }
- break;
- default:
- bpp = bits_per_pixel >> 3;
- d = dst;
- s = src;
- if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
- for(x = 0; x < width; x++) {
- j = x & 7;
- if ((dsp_mask << j) & 0x80) {
- *(uint32_t *)d = (s[3] << 24) | (s[0] << 16) | (s[1] << 8) | s[2];
- }
- d += bpp;
- if ((mask << j) & 0x80)
- s += bpp;
- }
- } else {
- for(x = 0; x < width; x++) {
- j = x & 7;
- if ((dsp_mask << j) & 0x80) {
- memcpy(d, s, bpp);
- }
- d += bpp;
- if ((mask << j) & 0x80)
- s += bpp;
- }
- }
- break;
- }
-}
-
-static void png_get_interlaced_row(uint8_t *dst, int row_size,
- int bits_per_pixel, int pass,
- const uint8_t *src, int width)
-{
- int x, mask, dst_x, j, b, bpp;
- uint8_t *d;
- const uint8_t *s;
-
- mask = png_pass_mask[pass];
- switch(bits_per_pixel) {
- case 1:
- memset(dst, 0, row_size);
- dst_x = 0;
- for(x = 0; x < width; x++) {
- j = (x & 7);
- if ((mask << j) & 0x80) {
- b = (src[x >> 3] >> (7 - j)) & 1;
- dst[dst_x >> 3] |= b << (7 - (dst_x & 7));
- dst_x++;
- }
- }
- break;
- default:
- bpp = bits_per_pixel >> 3;
- d = dst;
- s = src;
- for(x = 0; x < width; x++) {
- j = x & 7;
- if ((mask << j) & 0x80) {
- memcpy(d, s, bpp);
- d += bpp;
- }
- s += bpp;
- }
- break;
- }
-}
-
-/* XXX: optimize */
-/* NOTE: 'dst' can be equal to 'last' */
-static void png_filter_row(uint8_t *dst, int filter_type,
- uint8_t *src, uint8_t *last, int size, int bpp)
-{
- int i, p;
-
- switch(filter_type) {
- case PNG_FILTER_VALUE_NONE:
- memcpy(dst, src, size);
- break;
- case PNG_FILTER_VALUE_SUB:
- for(i = 0; i < bpp; i++) {
- dst[i] = src[i];
- }
- for(i = bpp; i < size; i++) {
- p = dst[i - bpp];
- dst[i] = p + src[i];
- }
- break;
- case PNG_FILTER_VALUE_UP:
- for(i = 0; i < size; i++) {
- p = last[i];
- dst[i] = p + src[i];
- }
- break;
- case PNG_FILTER_VALUE_AVG:
- for(i = 0; i < bpp; i++) {
- p = (last[i] >> 1);
- dst[i] = p + src[i];
- }
- for(i = bpp; i < size; i++) {
- p = ((dst[i - bpp] + last[i]) >> 1);
- dst[i] = p + src[i];
- }
- break;
- case PNG_FILTER_VALUE_PAETH:
- for(i = 0; i < bpp; i++) {
- p = last[i];
- dst[i] = p + src[i];
- }
- for(i = bpp; i < size; i++) {
- int a, b, c, pa, pb, pc;
-
- a = dst[i - bpp];
- b = last[i];
- c = last[i - bpp];
-
- p = b - c;
- pc = a - c;
-
- pa = abs(p);
- pb = abs(pc);
- pc = abs(p + pc);
-
- if (pa <= pb && pa <= pc)
- p = a;
- else if (pb <= pc)
- p = b;
- else
- p = c;
- dst[i] = p + src[i];
- }
- break;
- }
-}
-
-static void convert_from_rgba32(uint8_t *dst, const uint8_t *src, int width)
-{
- uint8_t *d;
- int j;
- unsigned int v;
-
- d = dst;
- for(j = 0; j < width; j++) {
- v = ((const uint32_t *)src)[j];
- d[0] = v >> 16;
- d[1] = v >> 8;
- d[2] = v;
- d[3] = v >> 24;
- d += 4;
- }
-}
-
-static void convert_to_rgba32(uint8_t *dst, const uint8_t *src, int width)
-{
- int j;
- unsigned int r, g, b, a;
-
- for(j = 0;j < width; j++) {
- r = src[0];
- g = src[1];
- b = src[2];
- a = src[3];
- *(uint32_t *)dst = (a << 24) | (r << 16) | (g << 8) | b;
- dst += 4;
- src += 4;
- }
-}
-
-/* process exactly one decompressed row */
-static void png_handle_row(PNGDecodeState *s)
-{
- uint8_t *ptr, *last_row;
- int got_line;
-
- if (!s->interlace_type) {
- ptr = s->image_buf + s->image_linesize * s->y;
- /* need to swap bytes correctly for RGB_ALPHA */
- if (s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
- png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
- s->last_row, s->row_size, s->bpp);
- memcpy(s->last_row, s->tmp_row, s->row_size);
- convert_to_rgba32(ptr, s->tmp_row, s->width);
- } else {
- /* in normal case, we avoid one copy */
- if (s->y == 0)
- last_row = s->last_row;
- else
- last_row = ptr - s->image_linesize;
-
- png_filter_row(ptr, s->crow_buf[0], s->crow_buf + 1,
- last_row, s->row_size, s->bpp);
- }
- s->y++;
- if (s->y == s->height) {
- s->state |= PNG_ALLIMAGE;
- }
- } else {
- got_line = 0;
- for(;;) {
- ptr = s->image_buf + s->image_linesize * s->y;
- if ((png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
- /* if we already read one row, it is time to stop to
- wait for the next one */
- if (got_line)
- break;
- png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
- s->last_row, s->pass_row_size, s->bpp);
- memcpy(s->last_row, s->tmp_row, s->pass_row_size);
- got_line = 1;
- }
- if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
- /* NOTE: rgba32 is handled directly in png_put_interlaced_row */
- png_put_interlaced_row(ptr, s->width, s->bits_per_pixel, s->pass,
- s->color_type, s->last_row);
- }
- s->y++;
- if (s->y == s->height) {
- for(;;) {
- if (s->pass == NB_PASSES - 1) {
- s->state |= PNG_ALLIMAGE;
- goto the_end;
- } else {
- s->pass++;
- s->y = 0;
- s->pass_row_size = png_pass_row_size(s->pass,
- s->bits_per_pixel,
- s->width);
- s->crow_size = s->pass_row_size + 1;
- if (s->pass_row_size != 0)
- break;
- /* skip pass if empty row */
- }
- }
- }
- }
- the_end: ;
- }
-}
-
-static int png_decode_idat(PNGDecodeState *s, ByteIOContext *f, int length)
-{
- uint8_t buf[IOBUF_SIZE];
- int buf_size;
- int ret;
- while (length > 0) {
- /* read the buffer */
- buf_size = IOBUF_SIZE;
- if (buf_size > length)
- buf_size = length;
- ret = get_buffer(f, buf, buf_size);
- if (ret != buf_size)
- return -1;
- s->zstream.avail_in = buf_size;
- s->zstream.next_in = buf;
- /* decode one line if possible */
- while (s->zstream.avail_in > 0) {
- ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
- if (ret != Z_OK && ret != Z_STREAM_END) {
- return -1;
- }
- if (s->zstream.avail_out == 0) {
- if (!(s->state & PNG_ALLIMAGE)) {
- png_handle_row(s);
- }
- s->zstream.avail_out = s->crow_size;
- s->zstream.next_out = s->crow_buf;
- }
- }
- length -= buf_size;
- }
- return 0;
-}
-
-static int png_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- AVImageInfo info1, *info = &info1;
- PNGDecodeState s1, *s = &s1;
- uint32_t tag, length;
- int ret, crc;
- uint8_t buf[8];
-
- /* check signature */
- ret = get_buffer(f, buf, 8);
- if (ret != 8)
- return -1;
- if (memcmp(buf, pngsig, 8) != 0)
- return -1;
- memset(s, 0, sizeof(PNGDecodeState));
- /* init the zlib */
- s->zstream.zalloc = png_zalloc;
- s->zstream.zfree = png_zfree;
- s->zstream.opaque = NULL;
- ret = inflateInit(&s->zstream);
- if (ret != Z_OK)
- return -1;
- for(;;) {
- if (url_feof(f))
- goto fail;
- length = get_be32(f);
- if (length > 0x7fffffff)
- goto fail;
- tag = get_le32(f);
-#ifdef DEBUG
- printf("png: tag=%c%c%c%c length=%u\n",
- (tag & 0xff),
- ((tag >> 8) & 0xff),
- ((tag >> 16) & 0xff),
- ((tag >> 24) & 0xff), length);
-#endif
- switch(tag) {
- case MKTAG('I', 'H', 'D', 'R'):
- if (length != 13)
- goto fail;
- s->width = get_be32(f);
- s->height = get_be32(f);
- s->bit_depth = get_byte(f);
- s->color_type = get_byte(f);
- s->compression_type = get_byte(f);
- s->filter_type = get_byte(f);
- s->interlace_type = get_byte(f);
- crc = get_be32(f);
- s->state |= PNG_IHDR;
-#ifdef DEBUG
- printf("width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
- s->width, s->height, s->bit_depth, s->color_type,
- s->compression_type, s->filter_type, s->interlace_type);
-#endif
- break;
- case MKTAG('I', 'D', 'A', 'T'):
- if (!(s->state & PNG_IHDR))
- goto fail;
- if (!(s->state & PNG_IDAT)) {
- /* init image info */
- info->width = s->width;
- info->height = s->height;
- info->interleaved = (s->interlace_type != 0);
-
- s->channels = png_get_nb_channels(s->color_type);
- s->bits_per_pixel = s->bit_depth * s->channels;
- s->bpp = (s->bits_per_pixel + 7) >> 3;
- s->row_size = (info->width * s->bits_per_pixel + 7) >> 3;
-
- if (s->bit_depth == 8 &&
- s->color_type == PNG_COLOR_TYPE_RGB) {
- info->pix_fmt = PIX_FMT_RGB24;
- } else if (s->bit_depth == 8 &&
- s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
- info->pix_fmt = PIX_FMT_RGBA32;
- } else if (s->bit_depth == 8 &&
- s->color_type == PNG_COLOR_TYPE_GRAY) {
- info->pix_fmt = PIX_FMT_GRAY8;
- } else if (s->bit_depth == 1 &&
- s->color_type == PNG_COLOR_TYPE_GRAY) {
- info->pix_fmt = PIX_FMT_MONOBLACK;
- } else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
- info->pix_fmt = PIX_FMT_PAL8;
- } else {
- goto fail;
- }
- ret = alloc_cb(opaque, info);
- if (ret)
- goto the_end;
-
- /* compute the compressed row size */
- if (!s->interlace_type) {
- s->crow_size = s->row_size + 1;
- } else {
- s->pass = 0;
- s->pass_row_size = png_pass_row_size(s->pass,
- s->bits_per_pixel,
- s->width);
- s->crow_size = s->pass_row_size + 1;
- }
-#ifdef DEBUG
- printf("row_size=%d crow_size =%d\n",
- s->row_size, s->crow_size);
-#endif
- s->image_buf = info->pict.data[0];
- s->image_linesize = info->pict.linesize[0];
- /* copy the palette if needed */
- if (s->color_type == PNG_COLOR_TYPE_PALETTE)
- memcpy(info->pict.data[1], s->palette, 256 * sizeof(uint32_t));
- /* empty row is used if differencing to the first row */
- s->last_row = av_mallocz(s->row_size);
- if (!s->last_row)
- goto fail;
- if (s->interlace_type ||
- s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
- s->tmp_row = av_malloc(s->row_size);
- if (!s->tmp_row)
- goto fail;
- }
- /* compressed row */
- s->crow_buf = av_malloc(s->row_size + 1);
- if (!s->crow_buf)
- goto fail;
- s->zstream.avail_out = s->crow_size;
- s->zstream.next_out = s->crow_buf;
- }
- s->state |= PNG_IDAT;
- if (png_decode_idat(s, f, length) < 0)
- goto fail;
- /* skip crc */
- crc = get_be32(f);
- break;
- case MKTAG('P', 'L', 'T', 'E'):
- {
- int n, i, r, g, b;
-
- if ((length % 3) != 0 || length > 256 * 3)
- goto skip_tag;
- /* read the palette */
- n = length / 3;
- for(i=0;i<n;i++) {
- r = get_byte(f);
- g = get_byte(f);
- b = get_byte(f);
- s->palette[i] = (0xff << 24) | (r << 16) | (g << 8) | b;
- }
- for(;i<256;i++) {
- s->palette[i] = (0xff << 24);
- }
- s->state |= PNG_PLTE;
- crc = get_be32(f);
- }
- break;
- case MKTAG('t', 'R', 'N', 'S'):
- {
- int v, i;
-
- /* read the transparency. XXX: Only palette mode supported */
- if (s->color_type != PNG_COLOR_TYPE_PALETTE ||
- length > 256 ||
- !(s->state & PNG_PLTE))
- goto skip_tag;
- for(i=0;i<length;i++) {
- v = get_byte(f);
- s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
- }
- crc = get_be32(f);
- }
- break;
- case MKTAG('I', 'E', 'N', 'D'):
- if (!(s->state & PNG_ALLIMAGE))
- goto fail;
- crc = get_be32(f);
- goto exit_loop;
- default:
- /* skip tag */
- skip_tag:
- url_fskip(f, length + 4);
- break;
- }
- }
- exit_loop:
- ret = 0;
- the_end:
- inflateEnd(&s->zstream);
- av_free(s->crow_buf);
- av_free(s->last_row);
- av_free(s->tmp_row);
- return ret;
- fail:
- ret = -1;
- goto the_end;
-}
-
-static void png_write_chunk(ByteIOContext *f, uint32_t tag,
- const uint8_t *buf, int length)
-{
- uint32_t crc;
- uint8_t tagbuf[4];
-
- put_be32(f, length);
- crc = crc32(0, Z_NULL, 0);
- tagbuf[0] = tag;
- tagbuf[1] = tag >> 8;
- tagbuf[2] = tag >> 16;
- tagbuf[3] = tag >> 24;
- crc = crc32(crc, tagbuf, 4);
- put_le32(f, tag);
- if (length > 0) {
- crc = crc32(crc, buf, length);
- put_buffer(f, buf, length);
- }
- put_be32(f, crc);
-}
-
-/* XXX: use avcodec generic function ? */
-static void to_be32(uint8_t *p, uint32_t v)
-{
- p[0] = v >> 24;
- p[1] = v >> 16;
- p[2] = v >> 8;
- p[3] = v;
-}
-
-typedef struct PNGEncodeState {
- ByteIOContext *f;
- z_stream zstream;
- uint8_t buf[IOBUF_SIZE];
-} PNGEncodeState;
-
-
-/* XXX: do filtering */
-static int png_write_row(PNGEncodeState *s, const uint8_t *data, int size)
-{
- int ret;
-
- s->zstream.avail_in = size;
- s->zstream.next_in = (uint8_t *)data;
- while (s->zstream.avail_in > 0) {
- ret = deflate(&s->zstream, Z_NO_FLUSH);
- if (ret != Z_OK)
- return -1;
- if (s->zstream.avail_out == 0) {
- png_write_chunk(s->f, MKTAG('I', 'D', 'A', 'T'), s->buf, IOBUF_SIZE);
- s->zstream.avail_out = IOBUF_SIZE;
- s->zstream.next_out = s->buf;
- }
- }
- return 0;
-}
-
-static int png_write(ByteIOContext *f, AVImageInfo *info)
-{
- PNGEncodeState s1, *s = &s1;
- int bit_depth, color_type, y, len, row_size, ret, is_progressive;
- int bits_per_pixel, pass_row_size;
- uint8_t *ptr;
- uint8_t *crow_buf = NULL;
- uint8_t *tmp_buf = NULL;
-
- s->f = f;
- is_progressive = info->interleaved;
- switch(info->pix_fmt) {
- case PIX_FMT_RGBA32:
- bit_depth = 8;
- color_type = PNG_COLOR_TYPE_RGB_ALPHA;
- break;
- case PIX_FMT_RGB24:
- bit_depth = 8;
- color_type = PNG_COLOR_TYPE_RGB;
- break;
- case PIX_FMT_GRAY8:
- bit_depth = 8;
- color_type = PNG_COLOR_TYPE_GRAY;
- break;
- case PIX_FMT_MONOBLACK:
- bit_depth = 1;
- color_type = PNG_COLOR_TYPE_GRAY;
- break;
- case PIX_FMT_PAL8:
- bit_depth = 8;
- color_type = PNG_COLOR_TYPE_PALETTE;
- break;
- default:
- return -1;
- }
- bits_per_pixel = png_get_nb_channels(color_type) * bit_depth;
- row_size = (info->width * bits_per_pixel + 7) >> 3;
-
- s->zstream.zalloc = png_zalloc;
- s->zstream.zfree = png_zfree;
- s->zstream.opaque = NULL;
- ret = deflateInit2(&s->zstream, Z_DEFAULT_COMPRESSION,
- Z_DEFLATED, 15, 8, Z_DEFAULT_STRATEGY);
- if (ret != Z_OK)
- return -1;
- crow_buf = av_malloc(row_size + 1);
- if (!crow_buf)
- goto fail;
- if (is_progressive) {
- tmp_buf = av_malloc(row_size + 1);
- if (!tmp_buf)
- goto fail;
- }
-
- /* write png header */
- put_buffer(f, pngsig, 8);
-
- to_be32(s->buf, info->width);
- to_be32(s->buf + 4, info->height);
- s->buf[8] = bit_depth;
- s->buf[9] = color_type;
- s->buf[10] = 0; /* compression type */
- s->buf[11] = 0; /* filter type */
- s->buf[12] = is_progressive; /* interlace type */
-
- png_write_chunk(f, MKTAG('I', 'H', 'D', 'R'), s->buf, 13);
-
- /* put the palette if needed */
- if (color_type == PNG_COLOR_TYPE_PALETTE) {
- int has_alpha, alpha, i;
- unsigned int v;
- uint32_t *palette;
- uint8_t *alpha_ptr;
-
- palette = (uint32_t *)info->pict.data[1];
- ptr = s->buf;
- alpha_ptr = s->buf + 256 * 3;
- has_alpha = 0;
- for(i = 0; i < 256; i++) {
- v = palette[i];
- alpha = v >> 24;
- if (alpha != 0xff)
- has_alpha = 1;
- *alpha_ptr++ = alpha;
- ptr[0] = v >> 16;
- ptr[1] = v >> 8;
- ptr[2] = v;
- ptr += 3;
- }
- png_write_chunk(f, MKTAG('P', 'L', 'T', 'E'), s->buf, 256 * 3);
- if (has_alpha) {
- png_write_chunk(f, MKTAG('t', 'R', 'N', 'S'), s->buf + 256 * 3, 256);
- }
- }
-
- /* now put each row */
- s->zstream.avail_out = IOBUF_SIZE;
- s->zstream.next_out = s->buf;
- if (is_progressive) {
- uint8_t *ptr1;
- int pass;
-
- for(pass = 0; pass < NB_PASSES; pass++) {
- /* NOTE: a pass is completely omited if no pixels would be
- output */
- pass_row_size = png_pass_row_size(pass, bits_per_pixel, info->width);
- if (pass_row_size > 0) {
- for(y = 0; y < info->height; y++) {
- if ((png_pass_ymask[pass] << (y & 7)) & 0x80) {
- ptr = info->pict.data[0] + y * info->pict.linesize[0];
- if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
- convert_from_rgba32(tmp_buf, ptr, info->width);
- ptr1 = tmp_buf;
- } else {
- ptr1 = ptr;
- }
- png_get_interlaced_row(crow_buf + 1, pass_row_size,
- bits_per_pixel, pass,
- ptr1, info->width);
- crow_buf[0] = PNG_FILTER_VALUE_NONE;
- png_write_row(s, crow_buf, pass_row_size + 1);
- }
- }
- }
- }
- } else {
- for(y = 0; y < info->height; y++) {
- ptr = info->pict.data[0] + y * info->pict.linesize[0];
- if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
- convert_from_rgba32(crow_buf + 1, ptr, info->width);
- else
- memcpy(crow_buf + 1, ptr, row_size);
- crow_buf[0] = PNG_FILTER_VALUE_NONE;
- png_write_row(s, crow_buf, row_size + 1);
- }
- }
- /* compress last bytes */
- for(;;) {
- ret = deflate(&s->zstream, Z_FINISH);
- if (ret == Z_OK || ret == Z_STREAM_END) {
- len = IOBUF_SIZE - s->zstream.avail_out;
- if (len > 0) {
- png_write_chunk(f, MKTAG('I', 'D', 'A', 'T'), s->buf, len);
- }
- s->zstream.avail_out = IOBUF_SIZE;
- s->zstream.next_out = s->buf;
- if (ret == Z_STREAM_END)
- break;
- } else {
- goto fail;
- }
- }
- png_write_chunk(f, MKTAG('I', 'E', 'N', 'D'), NULL, 0);
-
- put_flush_packet(f);
- ret = 0;
- the_end:
- av_free(crow_buf);
- av_free(tmp_buf);
- deflateEnd(&s->zstream);
- return ret;
- fail:
- ret = -1;
- goto the_end;
-}
-
-AVImageFormat png_image_format = {
- "png",
- "png",
- png_probe,
- png_read,
- (1 << PIX_FMT_RGBA32) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_GRAY8) |
- (1 << PIX_FMT_MONOBLACK) | (1 << PIX_FMT_PAL8),
- png_write,
- AVIMAGE_INTERLEAVED,
-};
-#endif
diff --git a/contrib/ffmpeg/libavformat/pnm.c b/contrib/ffmpeg/libavformat/pnm.c
deleted file mode 100644
index ade5d7c5d..000000000
--- a/contrib/ffmpeg/libavformat/pnm.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * PNM image format
- * Copyright (c) 2002, 2003 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-
-static inline int pnm_space(int c)
-{
- return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
-}
-
-static void pnm_get(ByteIOContext *f, char *str, int buf_size)
-{
- char *s;
- int c;
-
- /* skip spaces and comments */
- for(;;) {
- c = url_fgetc(f);
- if (c == '#') {
- do {
- c = url_fgetc(f);
- } while (c != '\n' && c != URL_EOF);
- } else if (!pnm_space(c)) {
- break;
- }
- }
-
- s = str;
- while (c != URL_EOF && !pnm_space(c)) {
- if ((s - str) < buf_size - 1)
- *s++ = c;
- c = url_fgetc(f);
- }
- *s = '\0';
-}
-
-static int pnm_read1(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque,
- int allow_yuv)
-{
- int i, n, linesize, h;
- char buf1[32];
- unsigned char *ptr;
- AVImageInfo info1, *info = &info1;
- int ret;
-
- pnm_get(f, buf1, sizeof(buf1));
- if (!strcmp(buf1, "P4")) {
- info->pix_fmt = PIX_FMT_MONOWHITE;
- } else if (!strcmp(buf1, "P5")) {
- if (allow_yuv)
- info->pix_fmt = PIX_FMT_YUV420P;
- else
- info->pix_fmt = PIX_FMT_GRAY8;
- } else if (!strcmp(buf1, "P6")) {
- info->pix_fmt = PIX_FMT_RGB24;
- } else {
- return AVERROR_INVALIDDATA;
- }
- pnm_get(f, buf1, sizeof(buf1));
- info->width = atoi(buf1);
- if (info->width <= 0)
- return AVERROR_INVALIDDATA;
- pnm_get(f, buf1, sizeof(buf1));
- info->height = atoi(buf1);
- if (info->height <= 0)
- return AVERROR_INVALIDDATA;
- if (info->pix_fmt != PIX_FMT_MONOWHITE) {
- pnm_get(f, buf1, sizeof(buf1));
- }
-
- /* more check if YUV420 */
- if (info->pix_fmt == PIX_FMT_YUV420P) {
- if ((info->width & 1) != 0)
- return AVERROR_INVALIDDATA;
- h = (info->height * 2);
- if ((h % 3) != 0)
- return AVERROR_INVALIDDATA;
- h /= 3;
- info->height = h;
- }
-
- ret = alloc_cb(opaque, info);
- if (ret)
- return ret;
-
- switch(info->pix_fmt) {
- default:
- return AVERROR_INVALIDDATA;
- case PIX_FMT_RGB24:
- n = info->width * 3;
- goto do_read;
- case PIX_FMT_GRAY8:
- n = info->width;
- goto do_read;
- case PIX_FMT_MONOWHITE:
- n = (info->width + 7) >> 3;
- do_read:
- ptr = info->pict.data[0];
- linesize = info->pict.linesize[0];
- for(i = 0; i < info->height; i++) {
- get_buffer(f, ptr, n);
- ptr += linesize;
- }
- break;
- case PIX_FMT_YUV420P:
- {
- unsigned char *ptr1, *ptr2;
-
- n = info->width;
- ptr = info->pict.data[0];
- linesize = info->pict.linesize[0];
- for(i = 0; i < info->height; i++) {
- get_buffer(f, ptr, n);
- ptr += linesize;
- }
- ptr1 = info->pict.data[1];
- ptr2 = info->pict.data[2];
- n >>= 1;
- h = info->height >> 1;
- for(i = 0; i < h; i++) {
- get_buffer(f, ptr1, n);
- get_buffer(f, ptr2, n);
- ptr1 += info->pict.linesize[1];
- ptr2 += info->pict.linesize[2];
- }
- }
- break;
- }
- return 0;
-}
-
-static int pnm_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- return pnm_read1(f, alloc_cb, opaque, 0);
-}
-
-static int pgmyuv_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- return pnm_read1(f, alloc_cb, opaque, 1);
-}
-
-static int pnm_write(ByteIOContext *pb, AVImageInfo *info)
-{
- int i, h, h1, c, n, linesize;
- char buf[100];
- uint8_t *ptr, *ptr1, *ptr2;
-
- h = info->height;
- h1 = h;
- switch(info->pix_fmt) {
- case PIX_FMT_MONOWHITE:
- c = '4';
- n = (info->width + 7) >> 3;
- break;
- case PIX_FMT_GRAY8:
- c = '5';
- n = info->width;
- break;
- case PIX_FMT_RGB24:
- c = '6';
- n = info->width * 3;
- break;
- case PIX_FMT_YUV420P:
- c = '5';
- n = info->width;
- h1 = (h * 3) / 2;
- break;
- default:
- return AVERROR_INVALIDDATA;
- }
- snprintf(buf, sizeof(buf),
- "P%c\n%d %d\n",
- c, info->width, h1);
- put_buffer(pb, buf, strlen(buf));
- if (info->pix_fmt != PIX_FMT_MONOWHITE) {
- snprintf(buf, sizeof(buf),
- "%d\n", 255);
- put_buffer(pb, buf, strlen(buf));
- }
-
- ptr = info->pict.data[0];
- linesize = info->pict.linesize[0];
- for(i=0;i<h;i++) {
- put_buffer(pb, ptr, n);
- ptr += linesize;
- }
-
- if (info->pix_fmt == PIX_FMT_YUV420P) {
- h >>= 1;
- n >>= 1;
- ptr1 = info->pict.data[1];
- ptr2 = info->pict.data[2];
- for(i=0;i<h;i++) {
- put_buffer(pb, ptr1, n);
- put_buffer(pb, ptr2, n);
- ptr1 += info->pict.linesize[1];
- ptr2 += info->pict.linesize[2];
- }
- }
- put_flush_packet(pb);
- return 0;
-}
-
-static int pam_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- int i, n, linesize, h, w, depth, maxval;
- char buf1[32], tuple_type[32];
- unsigned char *ptr;
- AVImageInfo info1, *info = &info1;
- int ret;
-
- pnm_get(f, buf1, sizeof(buf1));
- if (strcmp(buf1, "P7") != 0)
- return AVERROR_INVALIDDATA;
- w = -1;
- h = -1;
- maxval = -1;
- depth = -1;
- tuple_type[0] = '\0';
- for(;;) {
- pnm_get(f, buf1, sizeof(buf1));
- if (!strcmp(buf1, "WIDTH")) {
- pnm_get(f, buf1, sizeof(buf1));
- w = strtol(buf1, NULL, 10);
- } else if (!strcmp(buf1, "HEIGHT")) {
- pnm_get(f, buf1, sizeof(buf1));
- h = strtol(buf1, NULL, 10);
- } else if (!strcmp(buf1, "DEPTH")) {
- pnm_get(f, buf1, sizeof(buf1));
- depth = strtol(buf1, NULL, 10);
- } else if (!strcmp(buf1, "MAXVAL")) {
- pnm_get(f, buf1, sizeof(buf1));
- maxval = strtol(buf1, NULL, 10);
- } else if (!strcmp(buf1, "TUPLETYPE")) {
- pnm_get(f, buf1, sizeof(buf1));
- pstrcpy(tuple_type, sizeof(tuple_type), buf1);
- } else if (!strcmp(buf1, "ENDHDR")) {
- break;
- } else {
- return AVERROR_INVALIDDATA;
- }
- }
- /* check that all tags are present */
- if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0')
- return AVERROR_INVALIDDATA;
- info->width = w;
- info->height = h;
- if (depth == 1) {
- if (maxval == 1)
- info->pix_fmt = PIX_FMT_MONOWHITE;
- else
- info->pix_fmt = PIX_FMT_GRAY8;
- } else if (depth == 3) {
- info->pix_fmt = PIX_FMT_RGB24;
- } else if (depth == 4) {
- info->pix_fmt = PIX_FMT_RGBA32;
- } else {
- return AVERROR_INVALIDDATA;
- }
- ret = alloc_cb(opaque, info);
- if (ret)
- return ret;
-
- switch(info->pix_fmt) {
- default:
- return AVERROR_INVALIDDATA;
- case PIX_FMT_RGB24:
- n = info->width * 3;
- goto do_read;
- case PIX_FMT_GRAY8:
- n = info->width;
- goto do_read;
- case PIX_FMT_MONOWHITE:
- n = (info->width + 7) >> 3;
- do_read:
- ptr = info->pict.data[0];
- linesize = info->pict.linesize[0];
- for(i = 0; i < info->height; i++) {
- get_buffer(f, ptr, n);
- ptr += linesize;
- }
- break;
- case PIX_FMT_RGBA32:
- ptr = info->pict.data[0];
- linesize = info->pict.linesize[0];
- for(i = 0; i < info->height; i++) {
- int j, r, g, b, a;
-
- for(j = 0;j < w; j++) {
- r = get_byte(f);
- g = get_byte(f);
- b = get_byte(f);
- a = get_byte(f);
- ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
- }
- ptr += linesize;
- }
- break;
- }
- return 0;
-}
-
-static int pam_write(ByteIOContext *pb, AVImageInfo *info)
-{
- int i, h, w, n, linesize, depth, maxval;
- const char *tuple_type;
- char buf[100];
- uint8_t *ptr;
-
- h = info->height;
- w = info->width;
- switch(info->pix_fmt) {
- case PIX_FMT_MONOWHITE:
- n = (info->width + 7) >> 3;
- depth = 1;
- maxval = 1;
- tuple_type = "BLACKANDWHITE";
- break;
- case PIX_FMT_GRAY8:
- n = info->width;
- depth = 1;
- maxval = 255;
- tuple_type = "GRAYSCALE";
- break;
- case PIX_FMT_RGB24:
- n = info->width * 3;
- depth = 3;
- maxval = 255;
- tuple_type = "RGB";
- break;
- case PIX_FMT_RGBA32:
- n = info->width * 4;
- depth = 4;
- maxval = 255;
- tuple_type = "RGB_ALPHA";
- break;
- default:
- return AVERROR_INVALIDDATA;
- }
- snprintf(buf, sizeof(buf),
- "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
- w, h, depth, maxval, tuple_type);
- put_buffer(pb, buf, strlen(buf));
-
- ptr = info->pict.data[0];
- linesize = info->pict.linesize[0];
-
- if (info->pix_fmt == PIX_FMT_RGBA32) {
- int j;
- unsigned int v;
-
- for(i=0;i<h;i++) {
- for(j=0;j<w;j++) {
- v = ((uint32_t *)ptr)[j];
- put_byte(pb, (v >> 16) & 0xff);
- put_byte(pb, (v >> 8) & 0xff);
- put_byte(pb, (v) & 0xff);
- put_byte(pb, (v >> 24) & 0xff);
- }
- ptr += linesize;
- }
- } else {
- for(i=0;i<h;i++) {
- put_buffer(pb, ptr, n);
- ptr += linesize;
- }
- }
- put_flush_packet(pb);
- return 0;
-}
-
-static int pnm_probe(AVProbeData *pd)
-{
- const char *p = pd->buf;
- if (pd->buf_size >= 8 &&
- p[0] == 'P' &&
- p[1] >= '4' && p[1] <= '6' &&
- pnm_space(p[2]) )
- return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
- else
- return 0;
-}
-
-static int pgmyuv_probe(AVProbeData *pd)
-{
- if (match_ext(pd->filename, "pgmyuv"))
- return AVPROBE_SCORE_MAX;
- else
- return 0;
-}
-
-static int pam_probe(AVProbeData *pd)
-{
- const char *p = pd->buf;
- if (pd->buf_size >= 8 &&
- p[0] == 'P' &&
- p[1] == '7' &&
- p[2] == '\n')
- return AVPROBE_SCORE_MAX;
- else
- return 0;
-}
-
-AVImageFormat pnm_image_format = {
- "pnm",
- NULL,
- pnm_probe,
- pnm_read,
- 0,
- NULL,
-};
-
-AVImageFormat pbm_image_format = {
- "pbm",
- "pbm",
- NULL,
- NULL,
- (1 << PIX_FMT_MONOWHITE),
- pnm_write,
-};
-
-AVImageFormat pgm_image_format = {
- "pgm",
- "pgm",
- NULL,
- NULL,
- (1 << PIX_FMT_GRAY8),
- pnm_write,
-};
-
-AVImageFormat ppm_image_format = {
- "ppm",
- "ppm",
- NULL,
- NULL,
- (1 << PIX_FMT_RGB24),
- pnm_write,
-};
-
-AVImageFormat pam_image_format = {
- "pam",
- "pam",
- pam_probe,
- pam_read,
- (1 << PIX_FMT_MONOWHITE) | (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) |
- (1 << PIX_FMT_RGBA32),
- pam_write,
-};
-
-AVImageFormat pgmyuv_image_format = {
- "pgmyuv",
- "pgmyuv",
- pgmyuv_probe,
- pgmyuv_read,
- (1 << PIX_FMT_YUV420P),
- pnm_write,
-};
diff --git a/contrib/ffmpeg/libavformat/psxstr.c b/contrib/ffmpeg/libavformat/psxstr.c
index b03f65750..2f1a3dc73 100644
--- a/contrib/ffmpeg/libavformat/psxstr.c
+++ b/contrib/ffmpeg/libavformat/psxstr.c
@@ -92,8 +92,8 @@ static int str_probe(AVProbeData *p)
if (p->buf_size < 0x38)
return 0;
- if ((LE_32(&p->buf[0]) == RIFF_TAG) &&
- (LE_32(&p->buf[8]) == CDXA_TAG)) {
+ if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
+ (AV_RL32(&p->buf[8]) == CDXA_TAG)) {
/* RIFF header seen; skip 0x2C bytes */
start = RIFF_HEADER_SIZE;
@@ -143,7 +143,7 @@ static int str_read_header(AVFormatContext *s,
/* skip over any RIFF header */
if (get_buffer(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
return AVERROR_IO;
- if (LE_32(&sector[0]) == RIFF_TAG)
+ if (AV_RL32(&sector[0]) == RIFF_TAG)
start = RIFF_HEADER_SIZE;
else
start = 0;
@@ -168,12 +168,12 @@ static int str_read_header(AVFormatContext *s,
/* check if this channel gets to be the dominant video channel */
if (str->video_channel == -1) {
/* qualify the magic number */
- if (LE_32(&sector[0x18]) != STR_MAGIC)
+ if (AV_RL32(&sector[0x18]) != STR_MAGIC)
break;
str->video_channel = channel;
str->channels[channel].type = STR_VIDEO;
- str->channels[channel].width = LE_16(&sector[0x28]);
- str->channels[channel].height = LE_16(&sector[0x2A]);
+ str->channels[channel].width = AV_RL16(&sector[0x28]);
+ str->channels[channel].height = AV_RL16(&sector[0x2A]);
/* allocate a new AVStream */
st = av_new_stream(s, 0);
@@ -273,9 +273,9 @@ static int str_read_packet(AVFormatContext *s,
/* check if this the video channel we care about */
if (channel == str->video_channel) {
- int current_sector = LE_16(&sector[0x1C]);
- int sector_count = LE_16(&sector[0x1E]);
- int frame_size = LE_32(&sector[0x24]);
+ int current_sector = AV_RL16(&sector[0x1C]);
+ int sector_count = AV_RL16(&sector[0x1E]);
+ int frame_size = AV_RL32(&sector[0x24]);
int bytes_to_copy;
// printf("%d %d %d\n",current_sector,sector_count,frame_size);
/* if this is the first sector of the frame, allocate a pkt */
diff --git a/contrib/ffmpeg/libavformat/qtpalette.h b/contrib/ffmpeg/libavformat/qtpalette.h
index ef4ccfa91..183600fde 100644
--- a/contrib/ffmpeg/libavformat/qtpalette.h
+++ b/contrib/ffmpeg/libavformat/qtpalette.h
@@ -2,6 +2,22 @@
* Default Palettes for Quicktime Files
* Automatically generated from a utility derived from XAnim:
* http://xanim.va.pubnix.com/home.html
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef QTPALETTE_H
diff --git a/contrib/ffmpeg/libavformat/raw.c b/contrib/ffmpeg/libavformat/raw.c
index e1ccbcd6d..73a20379a 100644
--- a/contrib/ffmpeg/libavformat/raw.c
+++ b/contrib/ffmpeg/libavformat/raw.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
+#include "ac3.h"
#ifdef CONFIG_MUXERS
/* simple formats */
@@ -336,9 +337,9 @@ static int mpegvideo_probe(AVProbeData *p)
case PICTURE_START_CODE: pic++; break;
case SLICE_START_CODE: slice++; break;
case PACK_START_CODE: pspack++; break;
- case VIDEO_ID:
- case AUDIO_ID: pes++; break;
}
+ if ((code & 0x1f0) == VIDEO_ID) pes++;
+ else if((code & 0x1e0) == AUDIO_ID) pes++;
}
}
if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !pes)
@@ -406,6 +407,37 @@ static int h261_probe(AVProbeData *p)
return 0;
}
+static int ac3_probe(AVProbeData *p)
+{
+ int max_frames, first_frames, frames;
+ uint8_t *buf, *buf2, *end;
+ AC3HeaderInfo hdr;
+
+ if(p->buf_size < 7)
+ return 0;
+
+ max_frames = 0;
+ buf = p->buf;
+ end = buf + FFMIN(4096, p->buf_size - 7);
+
+ for(; buf < end; buf++) {
+ buf2 = buf;
+
+ for(frames = 0; buf2 < end; frames++) {
+ if(ff_ac3_parse_header(buf2, &hdr) < 0)
+ break;
+ buf2 += hdr.frame_size;
+ }
+ max_frames = FFMAX(max_frames, frames);
+ if(buf == p->buf)
+ first_frames = frames;
+ }
+ if (first_frames>=3) return AVPROBE_SCORE_MAX * 3 / 4;
+ else if(max_frames>=3) return AVPROBE_SCORE_MAX / 2;
+ else if(max_frames>=1) return 1;
+ else return 0;
+}
+
AVInputFormat shorten_demuxer = {
"shn",
"raw shorten",
@@ -414,6 +446,7 @@ AVInputFormat shorten_demuxer = {
shorten_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "shn",
};
@@ -425,6 +458,7 @@ AVInputFormat flac_demuxer = {
flac_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "flac",
};
@@ -448,10 +482,11 @@ AVInputFormat ac3_demuxer = {
"ac3",
"raw ac3",
0,
- NULL,
+ ac3_probe,
ac3_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "ac3",
};
@@ -479,6 +514,7 @@ AVInputFormat dts_demuxer = {
dts_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "dts",
};
@@ -490,6 +526,7 @@ AVInputFormat aac_demuxer = {
aac_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "aac",
};
@@ -501,6 +538,7 @@ AVInputFormat h261_demuxer = {
video_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "h261",
.value = CODEC_ID_H261,
};
@@ -529,6 +567,7 @@ AVInputFormat h263_demuxer = {
video_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
// .extensions = "h263", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_H263,
};
@@ -557,6 +596,7 @@ AVInputFormat m4v_demuxer = {
video_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "m4v", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_MPEG4,
};
@@ -585,6 +625,7 @@ AVInputFormat h264_demuxer = {
video_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "h26l,h264,264", //FIXME remove after writing mpeg4_probe
.value = CODEC_ID_H264,
};
@@ -613,6 +654,7 @@ AVInputFormat mpegvideo_demuxer = {
video_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.value = CODEC_ID_MPEG1VIDEO,
};
@@ -656,6 +698,7 @@ AVInputFormat mjpeg_demuxer = {
video_read_header,
raw_read_partial_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "mjpg,mjpeg",
.value = CODEC_ID_MJPEG,
};
@@ -668,6 +711,7 @@ AVInputFormat ingenient_demuxer = {
video_read_header,
ingenient_read_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "cgi", // FIXME
.value = CODEC_ID_MJPEG,
};
@@ -688,6 +732,18 @@ AVOutputFormat mjpeg_muxer = {
};
#endif //CONFIG_MUXERS
+AVInputFormat vc1_demuxer = {
+ "vc1",
+ "raw vc1",
+ 0,
+ NULL /* vc1_probe */,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "vc1",
+ .value = CODEC_ID_VC1,
+};
+
/* pcm formats */
#define PCMINPUTDEF(name, long_name, ext, codec) \
@@ -700,6 +756,7 @@ AVInputFormat pcm_ ## name ## _demuxer = {\
raw_read_packet,\
raw_read_close,\
pcm_read_seek,\
+ .flags= AVFMT_GENERIC_INDEX,\
.extensions = ext,\
.value = codec,\
};
@@ -797,6 +854,7 @@ AVInputFormat rawvideo_demuxer = {
raw_read_header,
rawvideo_read_packet,
raw_read_close,
+ .flags= AVFMT_GENERIC_INDEX,
.extensions = "yuv,cif,qcif",
.value = CODEC_ID_RAWVIDEO,
};
diff --git a/contrib/ffmpeg/libavformat/riff.c b/contrib/ffmpeg/libavformat/riff.c
index d315c66af..4a5553fa4 100644
--- a/contrib/ffmpeg/libavformat/riff.c
+++ b/contrib/ffmpeg/libavformat/riff.c
@@ -22,10 +22,11 @@
#include "avformat.h"
#include "avcodec.h"
#include "riff.h"
+#include "allformats.h" // for asf_muxer
/* Note: when encoding, the first matching tag is used, so order is
important if multiple tags possible for a given codec. */
-const CodecTag codec_bmp_tags[] = {
+const AVCodecTag codec_bmp_tags[] = {
{ CODEC_ID_H264, MKTAG('H', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('h', '2', '6', '4') },
{ CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
@@ -42,10 +43,10 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_H263P, MKTAG('U', '2', '6', '3') },
{ CODEC_ID_H263P, MKTAG('v', 'i', 'v', '1') },
- { CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4')},
- { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X'), .invalid_asf = 1 },
- { CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0'), .invalid_asf = 1 },
- { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4') },
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') },
+ { CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0') },
+ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
{ CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
{ CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
{ CODEC_ID_MPEG4, MKTAG(0x04, 0, 0, 0) }, /* some broken avi use this */
@@ -60,7 +61,7 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') },
- { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3'), .invalid_asf = 1 }, /* default signature when using MSMPEG4 */
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3') }, /* default signature when using MSMPEG4 */
{ CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
/* added based on MPlayer */
@@ -101,6 +102,9 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') },
{ CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') }, /* Pegasus lossless JPEG */
{ CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - decoder */
+ { CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') },
+ { CODEC_ID_MJPEG, MKTAG('I', 'J', 'P', 'G') },
+ { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') },
{ CODEC_ID_JPEGLS, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - encoder */
{ CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') },
{ CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') },
@@ -118,6 +122,8 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
{ CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
{ CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
{ CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
{ CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') },
{ CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') },
@@ -150,6 +156,7 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
{ CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
{ CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
+ { CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') },
{ CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
{ CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
{ CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
@@ -163,10 +170,11 @@ const CodecTag codec_bmp_tags[] = {
{ CODEC_ID_CAVS, MKTAG('C', 'A', 'V', 'S') },
{ CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') },
{ CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') },
+ { CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') },
{ CODEC_ID_NONE, 0 },
};
-const CodecTag codec_wav_tags[] = {
+const AVCodecTag codec_wav_tags[] = {
{ CODEC_ID_MP2, 0x50 },
{ CODEC_ID_MP3, 0x55 },
{ CODEC_ID_AC3, 0x2000 },
@@ -195,6 +203,7 @@ const CodecTag codec_wav_tags[] = {
{ CODEC_ID_TRUESPEECH, 0x22 },
{ CODEC_ID_FLAC, 0xF1AC },
{ CODEC_ID_IMC, 0x401 },
+ { CODEC_ID_GSM_MS, 0x31 },
/* FIXME: All of the IDs below are not 16 bit and thus illegal. */
// for NuppelVideo (nuv.c)
@@ -204,7 +213,7 @@ const CodecTag codec_wav_tags[] = {
{ 0, 0 },
};
-unsigned int codec_get_tag(const CodecTag *tags, int id)
+unsigned int codec_get_tag(const AVCodecTag *tags, int id)
{
while (tags->id != CODEC_ID_NONE) {
if (tags->id == id)
@@ -214,17 +223,7 @@ unsigned int codec_get_tag(const CodecTag *tags, int id)
return 0;
}
-unsigned int codec_get_asf_tag(const CodecTag *tags, unsigned int id)
-{
- while (tags->id != CODEC_ID_NONE) {
- if (!tags->invalid_asf && tags->id == id)
- return tags->tag;
- tags++;
- }
- return 0;
-}
-
-enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag)
+enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
{
while (tags->id != CODEC_ID_NONE) {
if( toupper((tag >> 0)&0xFF) == toupper((tags->tag >> 0)&0xFF)
@@ -237,6 +236,26 @@ enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag)
return CODEC_ID_NONE;
}
+unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
+{
+ int i;
+ for(i=0; tags && tags[i]; i++){
+ int tag= codec_get_tag(tags[i], id);
+ if(tag) return tag;
+ }
+ return 0;
+}
+
+enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
+{
+ int i;
+ for(i=0; tags && tags[i]; i++){
+ enum CodecID id= codec_get_id(tags[i], tag);
+ if(id!=CODEC_ID_NONE) return id;
+ }
+ return CODEC_ID_NONE;
+}
+
unsigned int codec_get_bmp_tag(int id)
{
return codec_get_tag(codec_bmp_tags, id);
@@ -283,8 +302,6 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
int hdrsize = 18;
if(!enc->codec_tag || enc->codec_tag > 0xffff)
- enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
- if(!enc->codec_tag || enc->codec_tag > 0xffff)
return -1;
put_le16(pb, enc->codec_tag);
@@ -294,7 +311,7 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
enc->codec_id == CODEC_ID_PCM_ALAW ||
enc->codec_id == CODEC_ID_PCM_MULAW) {
bps = 8;
- } else if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
+ } else if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3 || enc->codec_id == CODEC_ID_GSM_MS) {
bps = 0;
} else if (enc->codec_id == CODEC_ID_ADPCM_IMA_WAV || enc->codec_id == CODEC_ID_ADPCM_MS || enc->codec_id == CODEC_ID_ADPCM_G726 || enc->codec_id == CODEC_ID_ADPCM_YAMAHA) { //
bps = 4;
@@ -305,8 +322,11 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
} else {
bps = 16;
}
+ if(bps != enc->bits_per_sample && enc->bits_per_sample){
+ av_log(enc, AV_LOG_WARNING, "requested bits_per_sample (%d) and actually stored (%d) differ\n", enc->bits_per_sample, bps);
+ }
- if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
+ if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3 || enc->codec_id == CODEC_ID_GSM_MS) {
blkalign = enc->frame_size; //this is wrong, but seems many demuxers dont work if this is set correctly
//blkalign = 144 * enc->bit_rate/enc->sample_rate;
} else if (enc->codec_id == CODEC_ID_ADPCM_G726) { //
@@ -345,10 +365,14 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
put_le16(pb, 16); /* fwHeadFlags */
put_le32(pb, 0); /* dwPTSLow */
put_le32(pb, 0); /* dwPTSHigh */
+ } else if (enc->codec_id == CODEC_ID_GSM_MS) {
+ put_le16(pb, 2); /* wav_extra_size */
+ hdrsize += 2;
+ put_le16(pb, enc->frame_size); /* wSamplesPerBlock */
} else if (enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) {
put_le16(pb, 2); /* wav_extra_size */
hdrsize += 2;
- put_le16(pb, ((enc->block_align - 4 * enc->channels) / (4 * enc->channels)) * 8 + 1); /* wSamplesPerBlock */
+ put_le16(pb, enc->frame_size); /* wSamplesPerBlock */
} else if(enc->extradata_size){
put_le16(pb, enc->extradata_size);
put_buffer(pb, enc->extradata, enc->extradata_size);
@@ -365,7 +389,7 @@ int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
}
/* BITMAPINFOHEADER header */
-void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf)
+void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf)
{
put_le32(pb, 40 + enc->extradata_size); /* size */
put_le32(pb, enc->width);
@@ -374,7 +398,7 @@ void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags
put_le16(pb, enc->bits_per_sample ? enc->bits_per_sample : 24); /* depth */
/* compression type */
- put_le32(pb, for_asf ? (enc->codec_tag ? enc->codec_tag : codec_get_asf_tag(tags, enc->codec_id)) : enc->codec_tag); //
+ put_le32(pb, enc->codec_tag);
put_le32(pb, enc->width * enc->height * 3);
put_le32(pb, 0);
put_le32(pb, 0);
@@ -411,22 +435,30 @@ void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
codec->bits_per_sample = 8;
}else
codec->bits_per_sample = get_le16(pb);
- codec->codec_id = wav_codec_get_id(id, codec->bits_per_sample);
-
- if (size > 16) { /* We're obviously dealing with WAVEFORMATEX */
- codec->extradata_size = get_le16(pb);
- if (codec->extradata_size > 0) {
- if (codec->extradata_size > size - 18)
- codec->extradata_size = size - 18;
+ if (size >= 18) { /* We're obviously dealing with WAVEFORMATEX */
+ int cbSize = get_le16(pb); /* cbSize */
+ size -= 18;
+ cbSize = FFMIN(size, cbSize);
+ if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */
+ codec->bits_per_sample = get_le16(pb);
+ get_le32(pb); /* dwChannelMask */
+ id = get_le32(pb); /* 4 first bytes of GUID */
+ url_fskip(pb, 12); /* skip end of GUID */
+ cbSize -= 22;
+ size -= 22;
+ }
+ codec->extradata_size = cbSize;
+ if (cbSize > 0) {
codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
get_buffer(pb, codec->extradata, codec->extradata_size);
- } else
- codec->extradata_size = 0;
+ size -= cbSize;
+ }
/* It is possible for the chunk to contain garbage at the end */
- if (size - codec->extradata_size - 18 > 0)
- url_fskip(pb, size - codec->extradata_size - 18);
+ if (size > 0)
+ url_fskip(pb, size);
}
+ codec->codec_id = wav_codec_get_id(id, codec->bits_per_sample);
}
diff --git a/contrib/ffmpeg/libavformat/riff.h b/contrib/ffmpeg/libavformat/riff.h
index 240855a8b..45c72dde6 100644
--- a/contrib/ffmpeg/libavformat/riff.h
+++ b/contrib/ffmpeg/libavformat/riff.h
@@ -19,33 +19,37 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/**
+ * @file riff.h
+ * internal header for RIFF based (de)muxers
+ * do NOT include this in end user applications
+ */
+
#ifndef FF_RIFF_H
#define FF_RIFF_H
offset_t start_tag(ByteIOContext *pb, const char *tag);
void end_tag(ByteIOContext *pb, offset_t start);
-typedef struct CodecTag {
+typedef struct AVCodecTag {
int id;
unsigned int tag;
- unsigned int invalid_asf : 1;
-} CodecTag;
+} AVCodecTag;
-void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf);
+void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const AVCodecTag *tags, int for_asf);
int put_wav_header(ByteIOContext *pb, AVCodecContext *enc);
int wav_codec_get_id(unsigned int tag, int bps);
void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size);
-extern const CodecTag codec_bmp_tags[];
-extern const CodecTag codec_wav_tags[];
+extern const AVCodecTag codec_bmp_tags[];
+extern const AVCodecTag codec_wav_tags[];
-unsigned int codec_get_tag(const CodecTag *tags, int id);
-enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag);
-unsigned int codec_get_bmp_tag(int id);
-unsigned int codec_get_wav_tag(int id);
-enum CodecID codec_get_bmp_id(unsigned int tag);
-enum CodecID codec_get_wav_id(unsigned int tag);
-unsigned int codec_get_asf_tag(const CodecTag *tags, unsigned int id);
+unsigned int codec_get_tag(const AVCodecTag *tags, int id);
+enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag);
+unsigned int codec_get_bmp_tag(int id) attribute_deprecated; //use av_codec_get_tag
+unsigned int codec_get_wav_tag(int id) attribute_deprecated; //use av_codec_get_tag
+enum CodecID codec_get_bmp_id(unsigned int tag) attribute_deprecated; //use av_codec_get_id
+enum CodecID codec_get_wav_id(unsigned int tag) attribute_deprecated; //use av_codec_get_id
void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale);
#endif
diff --git a/contrib/ffmpeg/libavformat/rm.c b/contrib/ffmpeg/libavformat/rm.c
index b4ddf1b02..ad2f5ff27 100644
--- a/contrib/ffmpeg/libavformat/rm.c
+++ b/contrib/ffmpeg/libavformat/rm.c
@@ -954,9 +954,8 @@ resync:
len=len2;
rm->remaining_len-= len;
av_get_packet(pb, pkt, len);
- }
- if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ } else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
if ((st->codec->codec_id == CODEC_ID_RA_288) ||
(st->codec->codec_id == CODEC_ID_COOK)) {
int x;
@@ -1008,7 +1007,9 @@ resync:
}
} else
av_get_packet(pb, pkt, len);
- }
+
+ } else
+ av_get_packet(pb, pkt, len);
if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
|| st->discard >= AVDISCARD_ALL){
diff --git a/contrib/ffmpeg/libavformat/rtp.c b/contrib/ffmpeg/libavformat/rtp.c
index 37a286289..493a89cf3 100644
--- a/contrib/ffmpeg/libavformat/rtp.c
+++ b/contrib/ffmpeg/libavformat/rtp.c
@@ -23,15 +23,7 @@
#include "bitstream.h"
#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
-#include <netdb.h>
+#include "network.h"
#include "rtp_internal.h"
#include "rtp_h264.h"
@@ -215,7 +207,6 @@ int rtp_get_codec_info(AVCodecContext *codec, int payload_type)
return -1;
}
-/* return < 0 if unknown payload type */
int rtp_get_payload_type(AVCodecContext *codec)
{
int i, payload_type;
@@ -344,11 +335,6 @@ static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp, uint32
}
#endif
-/**
- * some rtp servers assume client is dead if they don't hear from them...
- * so we send a Receiver Report to the provided ByteIO context
- * (we don't have access to the rtcp handle from here)
- */
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
{
ByteIOContext pb;
@@ -508,7 +494,7 @@ static int rtp_parse_mp4_au(RTPDemuxContext *s, const uint8_t *buf)
/* decode the first 2 bytes where are stored the AUHeader sections
length in bits */
- au_headers_length = BE_16(buf);
+ au_headers_length = AV_RB16(buf);
if (au_headers_length > RTP_MAX_PACKET_LENGTH)
return -1;
diff --git a/contrib/ffmpeg/libavformat/rtp.h b/contrib/ffmpeg/libavformat/rtp.h
index 60ccc50ee..fec763051 100644
--- a/contrib/ffmpeg/libavformat/rtp.h
+++ b/contrib/ffmpeg/libavformat/rtp.h
@@ -26,6 +26,8 @@
int rtp_init(void);
int rtp_get_codec_info(AVCodecContext *codec, int payload_type);
+
+/** return < 0 if unknown payload type */
int rtp_get_payload_type(AVCodecContext *codec);
typedef struct RTPDemuxContext RTPDemuxContext;
@@ -42,17 +44,24 @@ int rtp_get_local_port(URLContext *h);
int rtp_set_remote_url(URLContext *h, const char *uri);
void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd);
+/**
+ * some rtp servers assume client is dead if they don't hear from them...
+ * so we send a Receiver Report to the provided ByteIO context
+ * (we don't have access to the rtcp handle from here)
+ */
+int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count);
+
extern URLProtocol rtp_protocol;
#define RTP_PT_PRIVATE 96
#define RTP_VERSION 2
-#define RTP_MAX_SDES 256 /* maximum text length for SDES */
+#define RTP_MAX_SDES 256 /**< maximum text length for SDES */
/* RTCP paquets use 0.5 % of the bandwidth */
#define RTCP_TX_RATIO_NUM 5
#define RTCP_TX_RATIO_DEN 1000
-/* Structure listing usefull vars to parse RTP packet payload*/
+/** Structure listing useful vars to parse RTP packet payload*/
typedef struct rtp_payload_data_s
{
int sizelength;
@@ -63,7 +72,7 @@ typedef struct rtp_payload_data_s
int objecttype;
char *mode;
- /* mpeg 4 AU headers */
+ /** mpeg 4 AU headers */
struct AUHeaders {
int size;
int index;
diff --git a/contrib/ffmpeg/libavformat/rtp_h264.c b/contrib/ffmpeg/libavformat/rtp_h264.c
index 2568e9ea5..d38e8780d 100644
--- a/contrib/ffmpeg/libavformat/rtp_h264.c
+++ b/contrib/ffmpeg/libavformat/rtp_h264.c
@@ -41,16 +41,8 @@
#include "bitstream.h"
#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
+#include "network.h"
#include <assert.h>
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
-#include <netdb.h>
#include "rtp_internal.h"
#include "rtp_h264.h"
@@ -172,7 +164,9 @@ static int h264_handle_packet(RTPDemuxContext * s,
const uint8_t * buf,
int len)
{
-// h264_rtp_extra_data *data = s->dynamic_protocol_context;
+#ifdef DEBUG
+ h264_rtp_extra_data *data = s->dynamic_protocol_context;
+#endif
uint8_t nal = buf[0];
uint8_t type = (nal & 0x1f);
int result= 0;
@@ -213,7 +207,7 @@ static int h264_handle_packet(RTPDemuxContext * s,
int src_len= len;
do {
- uint16_t nal_size = BE_16(src); // this going to be a problem if unaligned (can it be?)
+ uint16_t nal_size = AV_RB16(src); // this going to be a problem if unaligned (can it be?)
// consume the length of the aggregate...
src += 2;
diff --git a/contrib/ffmpeg/libavformat/rtpproto.c b/contrib/ffmpeg/libavformat/rtpproto.c
index d31c509c2..4d32e667d 100644
--- a/contrib/ffmpeg/libavformat/rtpproto.c
+++ b/contrib/ffmpeg/libavformat/rtpproto.c
@@ -22,15 +22,7 @@
#include <unistd.h>
#include <stdarg.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
-#include <netdb.h>
+#include "network.h"
#include <fcntl.h>
#define RTP_TX_BUF_SIZE (64 * 1024)
@@ -96,7 +88,7 @@ static void build_udp_url(char *buf, int buf_size,
if (local_port >= 0)
url_add_option(buf, buf_size, "localport=%d", local_port);
if (multicast)
- url_add_option(buf, buf_size, "multicast=1", multicast);
+ url_add_option(buf, buf_size, "multicast=1");
if (ttl >= 0)
url_add_option(buf, buf_size, "ttl=%d", ttl);
}
@@ -121,7 +113,7 @@ static int rtp_open(URLContext *h, const char *uri, int flags)
s = av_mallocz(sizeof(RTPContext));
if (!s)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
h->priv_data = s;
url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
diff --git a/contrib/ffmpeg/libavformat/rtsp.c b/contrib/ffmpeg/libavformat/rtsp.c
index 787cdd685..7d4c6bf78 100644
--- a/contrib/ffmpeg/libavformat/rtsp.c
+++ b/contrib/ffmpeg/libavformat/rtsp.c
@@ -20,15 +20,9 @@
*/
#include "avformat.h"
-#include <unistd.h> /* for select() prototype */
#include <sys/time.h>
-#include <netinet/in.h>
-#include <sys/socket.h>
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
+#include <unistd.h> /* for select() prototype */
+#include "network.h"
#include "rtp_internal.h"
@@ -823,7 +817,6 @@ static void rtsp_send_cmd(AVFormatContext *s,
*content_ptr = content;
}
-/* useful for modules: set RTSP callback function */
void rtsp_set_callback(FFRTSPCallback *rtsp_cb)
{
diff --git a/contrib/ffmpeg/libavformat/rtsp.h b/contrib/ffmpeg/libavformat/rtsp.h
index c08aaa6ac..481e2ba49 100644
--- a/contrib/ffmpeg/libavformat/rtsp.h
+++ b/contrib/ffmpeg/libavformat/rtsp.h
@@ -21,12 +21,7 @@
#ifndef RTSP_H
#define RTSP_H
-/* RTSP handling */
-enum RTSPStatusCode {
-#define DEF(n, c, s) c = n,
#include "rtspcodes.h"
-#undef DEF
-};
enum RTSPProtocol {
RTSP_PROTOCOL_RTP_UDP = 0,
@@ -43,27 +38,27 @@ enum RTSPProtocol {
#define RTSP_RTP_PORT_MAX 10000
typedef struct RTSPTransportField {
- int interleaved_min, interleaved_max; /* interleave ids, if TCP transport */
- int port_min, port_max; /* RTP ports */
- int client_port_min, client_port_max; /* RTP ports */
- int server_port_min, server_port_max; /* RTP ports */
- int ttl; /* ttl value */
- uint32_t destination; /* destination IP address */
+ int interleaved_min, interleaved_max; /**< interleave ids, if TCP transport */
+ int port_min, port_max; /**< RTP ports */
+ int client_port_min, client_port_max; /**< RTP ports */
+ int server_port_min, server_port_max; /**< RTP ports */
+ int ttl; /**< ttl value */
+ uint32_t destination; /**< destination IP address */
enum RTSPProtocol protocol;
} RTSPTransportField;
typedef struct RTSPHeader {
int content_length;
- enum RTSPStatusCode status_code; /* response code from server */
+ enum RTSPStatusCode status_code; /**< response code from server */
int nb_transports;
- /* in AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
+ /** in AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
int64_t range_start, range_end;
RTSPTransportField transports[RTSP_MAX_TRANSPORTS];
- int seq; /* sequence number */
+ int seq; /**< sequence number */
char session_id[512];
} RTSPHeader;
-/* the callback can be used to extend the connection setup/teardown step */
+/** the callback can be used to extend the connection setup/teardown step */
enum RTSPCallbackAction {
RTSP_ACTION_SERVER_SETUP,
RTSP_ACTION_SERVER_TEARDOWN,
@@ -81,6 +76,7 @@ typedef int FFRTSPCallback(enum RTSPCallbackAction action,
char *buf, int buf_size,
void *arg);
+/** useful for modules: set RTSP callback function */
void rtsp_set_callback(FFRTSPCallback *rtsp_cb);
int rtsp_init(void);
diff --git a/contrib/ffmpeg/libavformat/rtspcodes.h b/contrib/ffmpeg/libavformat/rtspcodes.h
index f7aab31c9..74cfb5d5b 100644
--- a/contrib/ffmpeg/libavformat/rtspcodes.h
+++ b/contrib/ffmpeg/libavformat/rtspcodes.h
@@ -18,14 +18,19 @@
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-DEF(200, RTSP_STATUS_OK, "OK")
-DEF(405, RTSP_STATUS_METHOD, "Method Not Allowed")
-DEF(453, RTSP_STATUS_BANDWIDTH, "Not Enough Bandwidth")
-DEF(454, RTSP_STATUS_SESSION, "Session Not Found")
-DEF(455, RTSP_STATUS_STATE, "Method Not Valid in This State")
-DEF(459, RTSP_STATUS_AGGREGATE, "Aggregate operation not allowed")
-DEF(460, RTSP_STATUS_ONLY_AGGREGATE, "Only aggregate operation allowed")
-DEF(461, RTSP_STATUS_TRANSPORT, "Unsupported transport")
-DEF(500, RTSP_STATUS_INTERNAL, "Internal Server Error")
-DEF(503, RTSP_STATUS_SERVICE, "Service Unavailable")
-DEF(505, RTSP_STATUS_VERSION, "RTSP Version not supported")
+
+/** RTSP handling */
+enum RTSPStatusCode {
+RTSP_STATUS_OK =200, /**< OK */
+RTSP_STATUS_METHOD =405, /**< Method Not Allowed */
+RTSP_STATUS_BANDWIDTH =453, /**< Not Enough Bandwidth */
+RTSP_STATUS_SESSION =454, /**< Session Not Found */
+RTSP_STATUS_STATE =455, /**< Method Not Valid in This State */
+RTSP_STATUS_AGGREGATE =459, /**< Aggregate operation not allowed */
+RTSP_STATUS_ONLY_AGGREGATE =460, /**< Only aggregate operation allowed */
+RTSP_STATUS_TRANSPORT =461, /**< Unsupported transport */
+RTSP_STATUS_INTERNAL =500, /**< Internal Server Error */
+RTSP_STATUS_SERVICE =503, /**< Service Unavailable */
+RTSP_STATUS_VERSION =505, /**< RTSP Version not supported */
+};
+
diff --git a/contrib/ffmpeg/libavformat/segafilm.c b/contrib/ffmpeg/libavformat/segafilm.c
index 4feb97262..b5375ccf7 100644
--- a/contrib/ffmpeg/libavformat/segafilm.c
+++ b/contrib/ffmpeg/libavformat/segafilm.c
@@ -58,8 +58,6 @@ typedef struct FilmDemuxContext {
unsigned int base_clock;
unsigned int version;
- int cvid_extra_bytes; /* the number of bytes thrown into the Cinepak
- * chunk header to throw off decoders */
/* buffer used for interleaving stereo PCM data */
unsigned char *stereo_buffer;
@@ -71,7 +69,7 @@ static int film_probe(AVProbeData *p)
if (p->buf_size < 4)
return 0;
- if (BE_32(&p->buf[0]) != FILM_TAG)
+ if (AV_RB32(&p->buf[0]) != FILM_TAG)
return 0;
return AVPROBE_SCORE_MAX;
@@ -95,8 +93,8 @@ static int film_read_header(AVFormatContext *s,
/* load the main FILM header */
if (get_buffer(pb, scratch, 16) != 16)
return AVERROR_IO;
- data_offset = BE_32(&scratch[4]);
- film->version = BE_32(&scratch[8]);
+ data_offset = AV_RB32(&scratch[4]);
+ film->version = AV_RB32(&scratch[8]);
/* load the FDSC chunk */
if (film->version == 0) {
@@ -112,7 +110,7 @@ static int film_read_header(AVFormatContext *s,
/* normal Saturn .cpk files; 32-byte header */
if (get_buffer(pb, scratch, 32) != 32)
return AVERROR_IO;
- film->audio_samplerate = BE_16(&scratch[24]);;
+ film->audio_samplerate = AV_RB16(&scratch[24]);;
film->audio_channels = scratch[21];
film->audio_bits = scratch[22];
if (film->audio_bits == 8)
@@ -123,16 +121,11 @@ static int film_read_header(AVFormatContext *s,
film->audio_type = 0;
}
- if (BE_32(&scratch[0]) != FDSC_TAG)
+ if (AV_RB32(&scratch[0]) != FDSC_TAG)
return AVERROR_INVALIDDATA;
- film->cvid_extra_bytes = 0;
- if (BE_32(&scratch[8]) == CVID_TAG) {
+ if (AV_RB32(&scratch[8]) == CVID_TAG) {
film->video_type = CODEC_ID_CINEPAK;
- if (film->version)
- film->cvid_extra_bytes = 2;
- else
- film->cvid_extra_bytes = 6; /* Lemmings 3DO case */
} else
film->video_type = 0;
@@ -145,8 +138,8 @@ static int film_read_header(AVFormatContext *s,
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = film->video_type;
st->codec->codec_tag = 0; /* no fourcc */
- st->codec->width = BE_32(&scratch[16]);
- st->codec->height = BE_32(&scratch[12]);
+ st->codec->width = AV_RB32(&scratch[16]);
+ st->codec->height = AV_RB32(&scratch[12]);
}
if (film->audio_type) {
@@ -169,10 +162,10 @@ static int film_read_header(AVFormatContext *s,
/* load the sample table */
if (get_buffer(pb, scratch, 16) != 16)
return AVERROR_IO;
- if (BE_32(&scratch[0]) != STAB_TAG)
+ if (AV_RB32(&scratch[0]) != STAB_TAG)
return AVERROR_INVALIDDATA;
- film->base_clock = BE_32(&scratch[8]);
- film->sample_count = BE_32(&scratch[12]);
+ film->base_clock = AV_RB32(&scratch[8]);
+ film->sample_count = AV_RB32(&scratch[12]);
if(film->sample_count >= UINT_MAX / sizeof(film_sample_t))
return -1;
film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t));
@@ -188,9 +181,9 @@ static int film_read_header(AVFormatContext *s,
return AVERROR_IO;
}
film->sample_table[i].sample_offset =
- data_offset + BE_32(&scratch[0]);
- film->sample_table[i].sample_size = BE_32(&scratch[4]);
- if (BE_32(&scratch[8]) == 0xFFFFFFFF) {
+ data_offset + AV_RB32(&scratch[0]);
+ film->sample_table[i].sample_size = AV_RB32(&scratch[4]);
+ if (AV_RB32(&scratch[8]) == 0xFFFFFFFF) {
film->sample_table[i].stream = film->audio_stream_index;
film->sample_table[i].pts = audio_frame_counter;
film->sample_table[i].pts *= film->base_clock;
@@ -200,7 +193,7 @@ static int film_read_header(AVFormatContext *s,
(film->audio_channels * film->audio_bits / 8));
} else {
film->sample_table[i].stream = film->video_stream_index;
- film->sample_table[i].pts = BE_32(&scratch[8]) & 0x7FFFFFFF;
+ film->sample_table[i].pts = AV_RB32(&scratch[8]) & 0x7FFFFFFF;
film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
}
}
@@ -231,18 +224,10 @@ static int film_read_packet(AVFormatContext *s,
/* do a special song and dance when loading FILM Cinepak chunks */
if ((sample->stream == film->video_stream_index) &&
(film->video_type == CODEC_ID_CINEPAK)) {
- if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes))
- return AVERROR_NOMEM;
- if(pkt->size < 10)
- return -1;
pkt->pos= url_ftell(pb);
- ret = get_buffer(pb, pkt->data, 10);
- /* skip the non-spec CVID bytes */
- url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR);
- ret += get_buffer(pb, pkt->data + 10,
- sample->sample_size - 10 - film->cvid_extra_bytes);
- if (ret != sample->sample_size - film->cvid_extra_bytes)
- ret = AVERROR_IO;
+ if (av_new_packet(pkt, sample->sample_size))
+ return AVERROR_NOMEM;
+ get_buffer(pb, pkt->data, sample->sample_size);
} else if ((sample->stream == film->audio_stream_index) &&
(film->audio_channels == 2)) {
/* stereo PCM needs to be interleaved */
diff --git a/contrib/ffmpeg/libavformat/sgi.c b/contrib/ffmpeg/libavformat/sgi.c
deleted file mode 100644
index bf0297e81..000000000
--- a/contrib/ffmpeg/libavformat/sgi.c
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * SGI image format
- * Todd Kirby <doubleshot@pacbell.net>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "avformat.h"
-#include "avio.h"
-
-/* #define DEBUG */
-
-/* sgi image file signature */
-#define SGI_MAGIC 474
-
-#define SGI_HEADER_SIZE 512
-
-#define SGI_GRAYSCALE 1
-#define SGI_RGB 3
-#define SGI_RGBA 4
-
-#define SGI_SINGLE_CHAN 2
-#define SGI_MULTI_CHAN 3
-
-typedef struct SGIInfo{
- short magic;
- char rle;
- char bytes_per_channel;
- unsigned short dimension;
- unsigned short xsize;
- unsigned short ysize;
- unsigned short zsize;
-} SGIInfo;
-
-
-static int sgi_probe(AVProbeData *pd)
-{
- /* test for sgi magic */
- if (pd->buf_size >= 2 && BE_16(&pd->buf[0]) == SGI_MAGIC) {
- return AVPROBE_SCORE_MAX;
- } else {
- return 0;
- }
-}
-
-/* read sgi header fields */
-static void read_sgi_header(ByteIOContext *f, SGIInfo *info)
-{
- info->magic = (unsigned short) get_be16(f);
- info->rle = get_byte(f);
- info->bytes_per_channel = get_byte(f);
- info->dimension = (unsigned short)get_be16(f);
- info->xsize = (unsigned short) get_be16(f);
- info->ysize = (unsigned short) get_be16(f);
- info->zsize = (unsigned short) get_be16(f);
-
- if(info->zsize > 4096)
- info->zsize= 0;
-
-#ifdef DEBUG
- printf("sgi header fields:\n");
- printf(" magic: %d\n", info->magic);
- printf(" rle: %d\n", info->rle);
- printf(" bpc: %d\n", info->bytes_per_channel);
- printf(" dim: %d\n", info->dimension);
- printf(" xsize: %d\n", info->xsize);
- printf(" ysize: %d\n", info->ysize);
- printf(" zsize: %d\n", info->zsize);
-#endif
-
- return;
-}
-
-
-/* read an uncompressed sgi image */
-static int read_uncompressed_sgi(const SGIInfo *si,
- AVPicture *pict, ByteIOContext *f)
-{
- int x, y, z, chan_offset, ret = 0;
- uint8_t *dest_row;
-
- /* skip header */
- url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
-
- pict->linesize[0] = si->xsize;
-
- for (z = 0; z < si->zsize; z++) {
-
-#ifndef WORDS_BIGENDIAN
- /* rgba -> bgra for rgba32 on little endian cpus */
- if (si->zsize == 4 && z != 3)
- chan_offset = 2 - z;
- else
-#endif
- chan_offset = z;
-
- for (y = si->ysize - 1; y >= 0; y--) {
- dest_row = pict->data[0] + (y * si->xsize * si->zsize);
-
- for (x = 0; x < si->xsize; x++) {
- dest_row[chan_offset] = get_byte(f);
- dest_row += si->zsize;
- }
- }
- }
-
- return ret;
-}
-
-
-/* expand an rle row into a channel */
-static int expand_rle_row(ByteIOContext *f, unsigned char *optr,
- int chan_offset, int pixelstride)
-{
- unsigned char pixel, count;
- int length = 0;
-
-#ifndef WORDS_BIGENDIAN
- /* rgba -> bgra for rgba32 on little endian cpus */
- if (pixelstride == 4 && chan_offset != 3) {
- chan_offset = 2 - chan_offset;
- }
-#endif
-
- optr += chan_offset;
-
- while (1) {
- pixel = get_byte(f);
-
- if (!(count = (pixel & 0x7f))) {
- return length;
- }
- if (pixel & 0x80) {
- while (count--) {
- *optr = get_byte(f);
- length++;
- optr += pixelstride;
- }
- } else {
- pixel = get_byte(f);
-
- while (count--) {
- *optr = pixel;
- length++;
- optr += pixelstride;
- }
- }
- }
-}
-
-
-/* read a run length encoded sgi image */
-static int read_rle_sgi(const SGIInfo *sgi_info,
- AVPicture *pict, ByteIOContext *f)
-{
- uint8_t *dest_row;
- unsigned long *start_table;
- int y, z, xsize, ysize, zsize, tablen;
- long start_offset;
- int ret = 0;
-
- xsize = sgi_info->xsize;
- ysize = sgi_info->ysize;
- zsize = sgi_info->zsize;
-
- /* skip header */
- url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
-
- /* size of rle offset and length tables */
- tablen = ysize * zsize * sizeof(long);
-
- start_table = (unsigned long *)av_malloc(tablen);
-
- if (!get_buffer(f, (uint8_t *)start_table, tablen)) {
- ret = AVERROR_IO;
- goto fail;
- }
-
- /* skip run length table */
- url_fseek(f, tablen, SEEK_CUR);
-
- for (z = 0; z < zsize; z++) {
- for (y = 0; y < ysize; y++) {
- dest_row = pict->data[0] + (ysize - 1 - y) * (xsize * zsize);
-
- start_offset = BE_32(&start_table[y + z * ysize]);
-
- /* don't seek if already at the next rle start offset */
- if (url_ftell(f) != start_offset) {
- url_fseek(f, start_offset, SEEK_SET);
- }
-
- if (expand_rle_row(f, dest_row, z, zsize) != xsize) {
- ret = AVERROR_INVALIDDATA;
- goto fail;
- }
- }
- }
-
-fail:
- av_free(start_table);
-
- return ret;
-}
-
-
-static int sgi_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- SGIInfo sgi_info, *s = &sgi_info;
- AVImageInfo info1, *info = &info1;
- int ret;
-
- read_sgi_header(f, s);
-
- if (s->bytes_per_channel != 1) {
- return AVERROR_INVALIDDATA;
- }
-
- /* check for supported image dimensions */
- if (s->dimension != 2 && s->dimension != 3) {
- return AVERROR_INVALIDDATA;
- }
-
- if (s->zsize == SGI_GRAYSCALE) {
- info->pix_fmt = PIX_FMT_GRAY8;
- } else if (s->zsize == SGI_RGB) {
- info->pix_fmt = PIX_FMT_RGB24;
- } else if (s->zsize == SGI_RGBA) {
- info->pix_fmt = PIX_FMT_RGBA32;
- } else {
- return AVERROR_INVALIDDATA;
- }
-
- info->width = s->xsize;
- info->height = s->ysize;
-
- ret = alloc_cb(opaque, info);
- if (ret)
- return ret;
-
- if (s->rle) {
- return read_rle_sgi(s, &info->pict, f);
- } else {
- return read_uncompressed_sgi(s, &info->pict, f);
- }
-
- return 0; /* not reached */
-}
-
-#ifdef CONFIG_MUXERS
-static void write_sgi_header(ByteIOContext *f, const SGIInfo *info)
-{
- int i;
-
- put_be16(f, SGI_MAGIC);
- put_byte(f, info->rle);
- put_byte(f, info->bytes_per_channel);
- put_be16(f, info->dimension);
- put_be16(f, info->xsize);
- put_be16(f, info->ysize);
- put_be16(f, info->zsize);
-
- /* The rest are constant in this implementation */
- put_be32(f, 0L); /* pixmin */
- put_be32(f, 255L); /* pixmax */
- put_be32(f, 0L); /* dummy */
-
- /* name */
- for (i = 0; i < 80; i++) {
- put_byte(f, 0);
- }
-
- put_be32(f, 0L); /* colormap */
-
- /* The rest of the 512 byte header is unused. */
- for (i = 0; i < 404; i++) {
- put_byte(f, 0);
- }
-}
-
-
-static int rle_row(ByteIOContext *f, char *row, int stride, int rowsize)
-{
- int length, count, i, x;
- char *start, repeat = 0;
-
- for (x = rowsize, length = 0; x > 0;) {
- start = row;
- row += (2 * stride);
- x -= 2;
-
- while (x > 0 && (row[-2 * stride] != row[-1 * stride] ||
- row[-1 * stride] != row[0])) {
- row += stride;
- x--;
- };
-
- row -= (2 * stride);
- x += 2;
-
- count = (row - start) / stride;
- while (count > 0) {
- i = count > 126 ? 126 : count;
- count -= i;
-
- put_byte(f, 0x80 | i);
- length++;
-
- while (i > 0) {
- put_byte(f, *start);
- start += stride;
- i--;
- length++;
- };
- };
-
- if (x <= 0) {
- break;
- }
-
- start = row;
- repeat = row[0];
-
- row += stride;
- x--;
-
- while (x > 0 && *row == repeat) {
- row += stride;
- x--;
- };
-
- count = (row - start) / stride;
- while (count > 0) {
- i = count > 126 ? 126 : count;
- count -= i;
-
- put_byte(f, i);
- length++;
-
- put_byte(f, repeat);
- length++;
- };
- };
-
- length++;
-
- put_byte(f, 0);
- return (length);
-}
-
-
-static int sgi_write(ByteIOContext *pb, AVImageInfo *info)
-{
- SGIInfo sgi_info, *si = &sgi_info;
- long *offsettab, *lengthtab;
- int i, y, z;
- int tablesize, chan_offset;
- uint8_t *srcrow;
-
- si->xsize = info->width;
- si->ysize = info->height;
- si->rle = 1;
- si->bytes_per_channel = 1;
-
- switch(info->pix_fmt) {
- case PIX_FMT_GRAY8:
- si->dimension = SGI_SINGLE_CHAN;
- si->zsize = SGI_GRAYSCALE;
- break;
- case PIX_FMT_RGB24:
- si->dimension = SGI_MULTI_CHAN;
- si->zsize = SGI_RGB;
- break;
- case PIX_FMT_RGBA32:
- si->dimension = SGI_MULTI_CHAN;
- si->zsize = SGI_RGBA;
- break;
- default:
- return AVERROR_INVALIDDATA;
- }
-
- write_sgi_header(pb, si);
-
- tablesize = si->zsize * si->ysize * sizeof(long);
-
- /* skip rle offset and length tables, write them at the end. */
- url_fseek(pb, tablesize * 2, SEEK_CUR);
- put_flush_packet(pb);
-
- lengthtab = av_malloc(tablesize);
- offsettab = av_malloc(tablesize);
-
- for (z = 0; z < si->zsize; z++) {
-
-#ifndef WORDS_BIGENDIAN
- /* rgba -> bgra for rgba32 on little endian cpus */
- if (si->zsize == 4 && z != 3)
- chan_offset = 2 - z;
- else
-#endif
- chan_offset = z;
-
- srcrow = info->pict.data[0] + chan_offset;
-
- for (y = si->ysize -1; y >= 0; y--) {
- offsettab[(z * si->ysize) + y] = url_ftell(pb);
- lengthtab[(z * si->ysize) + y] = rle_row(pb, srcrow,
- si->zsize, si->xsize);
- srcrow += info->pict.linesize[0];
- }
- }
-
- url_fseek(pb, 512, SEEK_SET);
-
- /* write offset table */
- for (i = 0; i < (si->ysize * si->zsize); i++) {
- put_be32(pb, offsettab[i]);
- }
-
- /* write length table */
- for (i = 0; i < (si->ysize * si->zsize); i++) {
- put_be32(pb, lengthtab[i]);
- }
-
- put_flush_packet(pb);
-
- av_free(lengthtab);
- av_free(offsettab);
-
- return 0;
-}
-#endif // CONFIG_MUXERS
-
-AVImageFormat sgi_image_format = {
- "sgi",
- "sgi,rgb,rgba,bw",
- sgi_probe,
- sgi_read,
- (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGBA32),
-#ifdef CONFIG_MUXERS
- sgi_write,
-#else
- NULL,
-#endif // CONFIG_MUXERS
-};
diff --git a/contrib/ffmpeg/libavformat/sierravmd.c b/contrib/ffmpeg/libavformat/sierravmd.c
index 92dbce91d..3e1c8597d 100644
--- a/contrib/ffmpeg/libavformat/sierravmd.c
+++ b/contrib/ffmpeg/libavformat/sierravmd.c
@@ -64,7 +64,7 @@ static int vmd_probe(AVProbeData *p)
/* check if the first 2 bytes of the file contain the appropriate size
* of a VMD header chunk */
- if (LE_16(&p->buf[0]) != VMD_HEADER_SIZE - 2)
+ if (AV_RL16(&p->buf[0]) != VMD_HEADER_SIZE - 2)
return 0;
/* only return half certainty since this check is a bit sketchy */
@@ -103,14 +103,14 @@ static int vmd_read_header(AVFormatContext *s,
vst->codec->codec_type = CODEC_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_VMDVIDEO;
vst->codec->codec_tag = 0; /* no fourcc */
- vst->codec->width = LE_16(&vmd->vmd_header[12]);
- vst->codec->height = LE_16(&vmd->vmd_header[14]);
+ vst->codec->width = AV_RL16(&vmd->vmd_header[12]);
+ vst->codec->height = AV_RL16(&vmd->vmd_header[14]);
vst->codec->extradata_size = VMD_HEADER_SIZE;
vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);
/* if sample rate is 0, assume no audio */
- vmd->sample_rate = LE_16(&vmd->vmd_header[804]);
+ vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]);
if (vmd->sample_rate) {
st = av_new_stream(s, 0);
if (!st)
@@ -121,7 +121,7 @@ static int vmd_read_header(AVFormatContext *s,
st->codec->codec_tag = 0; /* no fourcc */
st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
st->codec->sample_rate = vmd->sample_rate;
- st->codec->block_align = LE_16(&vmd->vmd_header[806]);
+ st->codec->block_align = AV_RL16(&vmd->vmd_header[806]);
if (st->codec->block_align & 0x8000) {
st->codec->bits_per_sample = 16;
st->codec->block_align = -(st->codec->block_align - 0x10000);
@@ -140,14 +140,14 @@ static int vmd_read_header(AVFormatContext *s,
pts_inc = num;
}
- toc_offset = LE_32(&vmd->vmd_header[812]);
- vmd->frame_count = LE_16(&vmd->vmd_header[6]);
- vmd->frames_per_block = LE_16(&vmd->vmd_header[18]);
+ toc_offset = AV_RL32(&vmd->vmd_header[812]);
+ vmd->frame_count = AV_RL16(&vmd->vmd_header[6]);
+ vmd->frames_per_block = AV_RL16(&vmd->vmd_header[18]);
url_fseek(pb, toc_offset, SEEK_SET);
raw_frame_table = NULL;
vmd->frame_table = NULL;
- sound_buffers = LE_16(&vmd->vmd_header[808]);
+ sound_buffers = AV_RL16(&vmd->vmd_header[808]);
raw_frame_table_size = vmd->frame_count * 6;
raw_frame_table = av_malloc(raw_frame_table_size);
if(vmd->frame_count * vmd->frames_per_block >= UINT_MAX / sizeof(vmd_frame_t)){
@@ -170,7 +170,7 @@ static int vmd_read_header(AVFormatContext *s,
total_frames = 0;
for (i = 0; i < vmd->frame_count; i++) {
- current_offset = LE_32(&raw_frame_table[6 * i + 2]);
+ current_offset = AV_RL32(&raw_frame_table[6 * i + 2]);
/* handle each entry in index block */
for (j = 0; j < vmd->frames_per_block; j++) {
@@ -179,7 +179,7 @@ static int vmd_read_header(AVFormatContext *s,
get_buffer(pb, chunk, BYTES_PER_FRAME_RECORD);
type = chunk[0];
- size = LE_32(&chunk[2]);
+ size = AV_RL32(&chunk[2]);
if(!size)
continue;
switch(type) {
diff --git a/contrib/ffmpeg/libavformat/smacker.c b/contrib/ffmpeg/libavformat/smacker.c
index a08bd2d9f..04fde3d03 100644
--- a/contrib/ffmpeg/libavformat/smacker.c
+++ b/contrib/ffmpeg/libavformat/smacker.c
@@ -226,7 +226,7 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
int pos;
if (url_feof(&s->pb) || smk->cur_frame >= smk->frames)
- return -EIO;
+ return AVERROR(EIO);
/* if we demuxed all streams, pass another frame */
if(smk->curstream < 0) {
@@ -311,7 +311,7 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
pkt->size = smk->buf_sizes[smk->curstream];
pkt->stream_index = smk->stream_id[smk->curstream];
pkt->pts = smk->aud_pts[smk->curstream];
- smk->aud_pts[smk->curstream] += LE_32(pkt->data);
+ smk->aud_pts[smk->curstream] += AV_RL32(pkt->data);
smk->curstream--;
}
diff --git a/contrib/ffmpeg/libavformat/sol.c b/contrib/ffmpeg/libavformat/sol.c
index 20e45f75d..951ec6eb9 100644
--- a/contrib/ffmpeg/libavformat/sol.c
+++ b/contrib/ffmpeg/libavformat/sol.c
@@ -29,7 +29,7 @@
#include "bswap.h"
/* if we don't know the size in advance */
-#define AU_UNKOWN_SIZE ((uint32_t)(~0))
+#define AU_UNKNOWN_SIZE ((uint32_t)(~0))
static int sol_probe(AVProbeData *p)
{
@@ -133,7 +133,7 @@ static int sol_read_packet(AVFormatContext *s,
int ret;
if (url_feof(&s->pb))
- return -EIO;
+ return AVERROR(EIO);
ret= av_get_packet(&s->pb, pkt, MAX_SIZE);
pkt->stream_index = 0;
diff --git a/contrib/ffmpeg/libavformat/swf.c b/contrib/ffmpeg/libavformat/swf.c
index 6029e3678..7d889af7d 100644
--- a/contrib/ffmpeg/libavformat/swf.c
+++ b/contrib/ffmpeg/libavformat/swf.c
@@ -59,102 +59,39 @@
#include <assert.h>
typedef struct {
-
+ int audio_stream_index;
offset_t duration_pos;
offset_t tag_pos;
int samples_per_frame;
int sound_samples;
- int video_samples;
int swf_frame_number;
int video_frame_number;
- int ms_per_frame;
- int ch_id;
+ int frame_rate;
int tag;
- uint8_t *audio_fifo;
+ uint8_t audio_fifo[AUDIO_FIFO_SIZE];
int audio_in_pos;
- int audio_out_pos;
- int audio_size;
int video_type;
int audio_type;
} SWFContext;
-static const CodecTag swf_codec_tags[] = {
+static const AVCodecTag swf_codec_tags[] = {
{CODEC_ID_FLV1, 0x02},
{CODEC_ID_VP6F, 0x04},
{0, 0},
};
-static const int sSampleRates[3][4] = {
- {44100, 48000, 32000, 0},
- {22050, 24000, 16000, 0},
- {11025, 12000, 8000, 0},
-};
-
-static const int sBitRates[2][3][15] = {
- { { 0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448},
- { 0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384},
- { 0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320}
- },
- { { 0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256},
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160},
- { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}
- },
-};
-
-static const int sSamplesPerFrame[3][3] =
-{
- { 384, 1152, 1152 },
- { 384, 1152, 576 },
- { 384, 1152, 576 }
-};
-
-static const int sBitsPerSlot[3] = {
- 32,
- 8,
- 8
+static const AVCodecTag swf_audio_codec_tags[] = {
+ {CODEC_ID_PCM_S16LE, 0x00},
+ {CODEC_ID_ADPCM_SWF, 0x01},
+ {CODEC_ID_MP3, 0x02},
+ {CODEC_ID_PCM_S16LE, 0x03},
+ //{CODEC_ID_NELLYMOSER, 0x06},
+ {0, 0},
};
-static int swf_mp3_info(void *data, int *byteSize, int *samplesPerFrame, int *sampleRate, int *isMono )
-{
- uint8_t *dataTmp = (uint8_t *)data;
- uint32_t header = ( (uint32_t)dataTmp[0] << 24 ) | ( (uint32_t)dataTmp[1] << 16 ) | ( (uint32_t)dataTmp[2] << 8 ) | (uint32_t)dataTmp[3];
- int layerID = 3 - ((header >> 17) & 0x03);
- int bitRateID = ((header >> 12) & 0x0f);
- int sampleRateID = ((header >> 10) & 0x03);
- int bitRate = 0;
- int bitsPerSlot = sBitsPerSlot[layerID];
- int isPadded = ((header >> 9) & 0x01);
-
- if ( (( header >> 21 ) & 0x7ff) != 0x7ff ) {
- return 0;
- }
-
- *isMono = ((header >> 6) & 0x03) == 0x03;
-
- if ( (header >> 19 ) & 0x01 ) {
- *sampleRate = sSampleRates[0][sampleRateID];
- bitRate = sBitRates[0][layerID][bitRateID] * 1000;
- *samplesPerFrame = sSamplesPerFrame[0][layerID];
- } else {
- if ( (header >> 20) & 0x01 ) {
- *sampleRate = sSampleRates[1][sampleRateID];
- bitRate = sBitRates[1][layerID][bitRateID] * 1000;
- *samplesPerFrame = sSamplesPerFrame[1][layerID];
- } else {
- *sampleRate = sSampleRates[2][sampleRateID];
- bitRate = sBitRates[1][layerID][bitRateID] * 1000;
- *samplesPerFrame = sSamplesPerFrame[2][layerID];
- }
- }
-
- *byteSize = ( ( ( ( *samplesPerFrame * (bitRate / bitsPerSlot) ) / *sampleRate ) + isPadded ) );
-
- return 1;
-}
-
#ifdef CONFIG_MUXERS
static void put_swf_tag(AVFormatContext *s, int tag)
{
@@ -306,25 +243,15 @@ static void put_swf_matrix(ByteIOContext *pb,
/* */
static int swf_write_header(AVFormatContext *s)
{
- SWFContext *swf;
+ SWFContext *swf = s->priv_data;
ByteIOContext *pb = &s->pb;
AVCodecContext *enc, *audio_enc, *video_enc;
PutBitContext p;
uint8_t buf1[256];
int i, width, height, rate, rate_base;
- swf = av_malloc(sizeof(SWFContext));
- if (!swf)
- return -1;
- s->priv_data = swf;
-
- swf->ch_id = -1;
swf->audio_in_pos = 0;
- swf->audio_out_pos = 0;
- swf->audio_size = 0;
- swf->audio_fifo = av_malloc(AUDIO_FIFO_SIZE);
swf->sound_samples = 0;
- swf->video_samples = 0;
swf->swf_frame_number = 0;
swf->video_frame_number = 0;
@@ -332,15 +259,24 @@ static int swf_write_header(AVFormatContext *s)
audio_enc = NULL;
for(i=0;i<s->nb_streams;i++) {
enc = s->streams[i]->codec;
- if (enc->codec_type == CODEC_TYPE_AUDIO)
- audio_enc = enc;
- else {
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ if (enc->codec_id == CODEC_ID_MP3) {
+ if (!enc->frame_size) {
+ av_log(s, AV_LOG_ERROR, "audio frame size not set\n");
+ return -1;
+ }
+ audio_enc = enc;
+ } else {
+ av_log(s, AV_LOG_ERROR, "SWF muxer only supports MP3\n");
+ return -1;
+ }
+ } else {
if ( enc->codec_id == CODEC_ID_VP6F ||
enc->codec_id == CODEC_ID_FLV1 ||
enc->codec_id == CODEC_ID_MJPEG ) {
video_enc = enc;
} else {
- av_log(enc, AV_LOG_ERROR, "SWF only supports VP6, FLV1 and MJPEG\n");
+ av_log(s, AV_LOG_ERROR, "SWF muxer only supports VP6, FLV1 and MJPEG\n");
return -1;
}
}
@@ -451,8 +387,6 @@ static int swf_write_header(AVFormatContext *s)
default:
/* not supported */
av_log(s, AV_LOG_ERROR, "swf doesnt support that sample rate, choose from (44100, 22050, 11025)\n");
- av_free(swf->audio_fifo);
- av_free(swf);
return -1;
}
v |= 0x02; /* 16 bit playback */
@@ -476,55 +410,12 @@ static int swf_write_video(AVFormatContext *s,
{
SWFContext *swf = s->priv_data;
ByteIOContext *pb = &s->pb;
- int c = 0;
- int outSize = 0;
- int outSamples = 0;
/* Flash Player limit */
if ( swf->swf_frame_number == 16000 ) {
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
}
- if ( swf->audio_type ) {
- /* Prescan audio data for this swf frame */
-retry_swf_audio_packet:
- if ( ( swf->audio_size-outSize ) >= 4 ) {
- int mp3FrameSize = 0;
- int mp3SampleRate = 0;
- int mp3IsMono = 0;
- int mp3SamplesPerFrame = 0;
-
- /* copy out mp3 header from ring buffer */
- uint8_t header[4];
- for (c=0; c<4; c++) {
- header[c] = swf->audio_fifo[(swf->audio_in_pos+outSize+c) % AUDIO_FIFO_SIZE];
- }
-
- if ( swf_mp3_info(header,&mp3FrameSize,&mp3SamplesPerFrame,&mp3SampleRate,&mp3IsMono) ) {
- if ( ( swf->audio_size-outSize ) >= mp3FrameSize ) {
- outSize += mp3FrameSize;
- outSamples += mp3SamplesPerFrame;
- if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
- goto retry_swf_audio_packet;
- }
- }
- } else {
- /* invalid mp3 data, skip forward
- we need to do this since the Flash Player
- does not like custom headers */
- swf->audio_in_pos ++;
- swf->audio_size --;
- swf->audio_in_pos %= AUDIO_FIFO_SIZE;
- goto retry_swf_audio_packet;
- }
- }
-
- /* audio stream is behind video stream, bail */
- if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
- return 0;
- }
- }
-
if ( swf->video_type == CODEC_ID_VP6F ||
swf->video_type == CODEC_ID_FLV1 ) {
if ( swf->video_frame_number == 0 ) {
@@ -608,23 +499,17 @@ retry_swf_audio_packet:
swf->swf_frame_number ++;
- swf->video_samples += swf->samples_per_frame;
-
/* streaming sound always should be placed just before showframe tags */
- if ( outSize > 0 ) {
+ if (swf->audio_type && swf->audio_in_pos) {
put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
- put_le16(pb, outSamples);
- put_le16(pb, 0);
- for (c=0; c<outSize; c++) {
- put_byte(pb,swf->audio_fifo[(swf->audio_in_pos+c) % AUDIO_FIFO_SIZE]);
- }
+ put_le16(pb, swf->sound_samples);
+ put_le16(pb, 0); // seek samples
+ put_buffer(pb, swf->audio_fifo, swf->audio_in_pos);
put_swf_end_tag(s);
/* update FIFO */
- swf->sound_samples += outSamples;
- swf->audio_in_pos += outSize;
- swf->audio_size -= outSize;
- swf->audio_in_pos %= AUDIO_FIFO_SIZE;
+ swf->sound_samples = 0;
+ swf->audio_in_pos = 0;
}
/* output the frame */
@@ -640,22 +525,21 @@ static int swf_write_audio(AVFormatContext *s,
AVCodecContext *enc, const uint8_t *buf, int size)
{
SWFContext *swf = s->priv_data;
- int c = 0;
/* Flash Player limit */
if ( swf->swf_frame_number == 16000 ) {
av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
}
- if (enc->codec_id == CODEC_ID_MP3 ) {
- for (c=0; c<size; c++) {
- swf->audio_fifo[(swf->audio_out_pos+c)%AUDIO_FIFO_SIZE] = buf[c];
- }
- swf->audio_size += size;
- swf->audio_out_pos += size;
- swf->audio_out_pos %= AUDIO_FIFO_SIZE;
+ if (swf->audio_in_pos + size >= AUDIO_FIFO_SIZE) {
+ av_log(s, AV_LOG_ERROR, "audio fifo too small to mux audio essence\n");
+ return -1;
}
+ memcpy(swf->audio_fifo + swf->audio_in_pos, buf, size);
+ swf->audio_in_pos += size;
+ swf->sound_samples += enc->frame_size;
+
/* if audio only stream make sure we add swf frames */
if ( swf->video_type == 0 ) {
swf_write_video(s, enc, 0, 0);
@@ -699,10 +583,8 @@ static int swf_write_trailer(AVFormatContext *s)
put_le32(pb, file_size);
url_fseek(pb, swf->duration_pos, SEEK_SET);
put_le16(pb, video_enc->frame_number);
+ url_fseek(pb, file_size, SEEK_SET);
}
-
- av_free(swf->audio_fifo);
-
return 0;
}
#endif //CONFIG_MUXERS
@@ -747,18 +629,13 @@ static int swf_probe(AVProbeData *p)
static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
- SWFContext *swf = 0;
+ SWFContext *swf = s->priv_data;
ByteIOContext *pb = &s->pb;
- int nbits, len, frame_rate, tag, v;
- offset_t firstTagOff;
+ int nbits, len, tag, v;
+ offset_t frame_offset = -1;
AVStream *ast = 0;
AVStream *vst = 0;
- swf = av_malloc(sizeof(SWFContext));
- if (!swf)
- return -1;
- s->priv_data = swf;
-
tag = get_be32(pb) & 0xffffff00;
if (tag == MKBETAG('C', 'W', 'S', 0))
@@ -773,96 +650,60 @@ static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
nbits = get_byte(pb) >> 3;
len = (4 * nbits - 3 + 7) / 8;
url_fskip(pb, len);
- frame_rate = get_le16(pb);
+ swf->frame_rate = get_le16(pb); /* 8.8 fixed */
get_le16(pb); /* frame count */
- /* The Flash Player converts 8.8 frame rates
- to milliseconds internally. Do the same to get
- a correct framerate */
- swf->ms_per_frame = ( 1000 * 256 ) / frame_rate;
swf->samples_per_frame = 0;
- swf->ch_id = -1;
- firstTagOff = url_ftell(pb);
for(;;) {
+ offset_t tag_offset = url_ftell(pb);
tag = get_swf_tag(pb, &len);
- if (tag < 0) {
- if ( ast || vst ) {
- if ( vst && ast ) {
- vst->codec->time_base.den = ast->codec->sample_rate / swf->samples_per_frame;
- vst->codec->time_base.num = 1;
- }
- break;
- }
- av_log(s, AV_LOG_ERROR, "No media found in SWF\n");
- return AVERROR_IO;
+ if (tag < 0 || tag == TAG_VIDEOFRAME || tag == TAG_STREAMBLOCK) {
+ url_fseek(pb, frame_offset == -1 ? tag_offset : frame_offset, SEEK_SET);
+ break;
}
if ( tag == TAG_VIDEOSTREAM && !vst) {
- int codec_id;
- swf->ch_id = get_le16(pb);
+ int ch_id = get_le16(pb);
get_le16(pb);
get_le16(pb);
get_le16(pb);
get_byte(pb);
/* Check for FLV1 */
- codec_id = codec_get_id(swf_codec_tags, get_byte(pb));
- if ( codec_id ) {
- vst = av_new_stream(s, 0);
- av_set_pts_info(vst, 24, 1, 1000); /* 24 bit pts in ms */
-
- vst->codec->codec_type = CODEC_TYPE_VIDEO;
- vst->codec->codec_id = codec_id;
- if ( swf->samples_per_frame ) {
- vst->codec->time_base.den = 1000. / swf->ms_per_frame;
- vst->codec->time_base.num = 1;
- }
- }
+ vst = av_new_stream(s, ch_id);
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = codec_get_id(swf_codec_tags, get_byte(pb));
} else if ( ( tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2 ) && !ast) {
/* streaming found */
+ int sample_rate_code;
get_byte(pb);
v = get_byte(pb);
swf->samples_per_frame = get_le16(pb);
- if (len!=4)
+ ast = av_new_stream(s, -1); /* -1 to avoid clash with video stream ch_id */
+ swf->audio_stream_index = ast->index;
+ ast->codec->channels = 1 + (v&1);
+ ast->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast->codec->codec_id = codec_get_id(swf_audio_codec_tags, (v>>4) & 15);
+ ast->need_parsing = 1;
+ sample_rate_code= (v>>2) & 3;
+ if (!sample_rate_code)
+ return AVERROR_IO;
+ ast->codec->sample_rate = 11025 << (sample_rate_code-1);
+ av_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
+ if (len > 4)
url_fskip(pb,len-4);
- /* if mp3 streaming found, OK */
- if ((v & 0x20) != 0) {
- if ( tag == TAG_STREAMHEAD2 ) {
- get_le16(pb);
- }
- ast = av_new_stream(s, 1);
- if (!ast)
- return -ENOMEM;
- av_set_pts_info(ast, 24, 1, 1000); /* 24 bit pts in ms */
-
- if (v & 0x01)
- ast->codec->channels = 2;
- else
- ast->codec->channels = 1;
-
- switch((v>> 2) & 0x03) {
- case 1:
- ast->codec->sample_rate = 11025;
- break;
- case 2:
- ast->codec->sample_rate = 22050;
- break;
- case 3:
- ast->codec->sample_rate = 44100;
- break;
- default:
- av_free(ast);
- return AVERROR_IO;
- }
- ast->codec->codec_type = CODEC_TYPE_AUDIO;
- ast->codec->codec_id = CODEC_ID_MP3;
- ast->need_parsing = 1;
- }
+
+ } else if (tag == TAG_JPEG2 && !vst) {
+ vst = av_new_stream(s, -2); /* -2 to avoid clash with video stream and audio stream */
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_MJPEG;
+ url_fskip(pb, len);
+ frame_offset = tag_offset;
} else {
url_fskip(pb, len);
}
}
- url_fseek(pb, firstTagOff, SEEK_SET);
-
+ if (vst)
+ av_set_pts_info(vst, 64, 256, swf->frame_rate);
return 0;
}
@@ -878,36 +719,48 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
if (tag < 0)
return AVERROR_IO;
if (tag == TAG_VIDEOFRAME) {
+ int ch_id = get_le16(pb);
+ len -= 2;
for( i=0; i<s->nb_streams; i++ ) {
st = s->streams[i];
- if (st->id == 0) {
- if ( get_le16(pb) == swf->ch_id ) {
- frame = get_le16(pb);
- av_get_packet(pb, pkt, len-4);
- pkt->pts = frame * swf->ms_per_frame;
- pkt->stream_index = st->index;
- return pkt->size;
- } else {
- url_fskip(pb, len-2);
- continue;
- }
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO && st->id == ch_id) {
+ frame = get_le16(pb);
+ av_get_packet(pb, pkt, len-2);
+ pkt->pts = frame;
+ pkt->stream_index = st->index;
+ return pkt->size;
}
}
- url_fskip(pb, len);
} else if (tag == TAG_STREAMBLOCK) {
- for( i=0; i<s->nb_streams; i++ ) {
+ st = s->streams[swf->audio_stream_index];
+ if (st->codec->codec_id == CODEC_ID_MP3) {
+ url_fskip(pb, 4);
+ av_get_packet(pb, pkt, len-4);
+ } else { // ADPCM, PCM
+ av_get_packet(pb, pkt, len);
+ }
+ pkt->stream_index = st->index;
+ return pkt->size;
+ } else if (tag == TAG_JPEG2) {
+ for (i=0; i<s->nb_streams; i++) {
st = s->streams[i];
- if (st->id == 1) {
- url_fskip(pb, 4);
- av_get_packet(pb, pkt, len-4);
+ if (st->id == -2) {
+ get_le16(pb); /* BITMAP_ID */
+ av_new_packet(pkt, len-2);
+ get_buffer(pb, pkt->data, 4);
+ if (AV_RB32(pkt->data) == 0xffd8ffd9) {
+ /* old SWF files containing SOI/EOI as data start */
+ pkt->size -= 4;
+ get_buffer(pb, pkt->data, pkt->size);
+ } else {
+ get_buffer(pb, pkt->data + 4, pkt->size - 4);
+ }
pkt->stream_index = st->index;
return pkt->size;
}
}
- url_fskip(pb, len);
- } else {
- url_fskip(pb, len);
}
+ url_fskip(pb, len);
}
return 0;
}
diff --git a/contrib/ffmpeg/libavformat/tcp.c b/contrib/ffmpeg/libavformat/tcp.c
index 93755c497..a5539be4c 100644
--- a/contrib/ffmpeg/libavformat/tcp.c
+++ b/contrib/ffmpeg/libavformat/tcp.c
@@ -20,18 +20,7 @@
*/
#include "avformat.h"
#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#if defined(__BEOS__) || defined(__INNOTEK_LIBC__)
-typedef int socklen_t;
-#endif
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
-#include <netdb.h>
+#include "network.h"
#include <sys/time.h>
#include <fcntl.h>
@@ -73,7 +62,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
s = av_malloc(sizeof(TCPContext));
if (!s)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
h->priv_data = s;
if (port <= 0 || port >= 65536)
@@ -84,7 +73,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
if (resolve_host(&dest_addr.sin_addr, hostname) < 0)
goto fail;
- fd = socket(PF_INET, SOCK_STREAM, 0);
+ fd = socket(AF_INET, SOCK_STREAM, 0);
if (fd < 0)
goto fail;
fcntl(fd, F_SETFL, O_NONBLOCK);
@@ -101,7 +90,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
/* wait until we are connected or until abort */
for(;;) {
if (url_interrupt_cb()) {
- ret = -EINTR;
+ ret = AVERROR(EINTR);
goto fail1;
}
fd_max = fd;
@@ -127,7 +116,7 @@ static int tcp_open(URLContext *h, const char *uri, int flags)
ret = AVERROR_IO;
fail1:
if (fd >= 0)
- close(fd);
+ closesocket(fd);
av_free(s);
return ret;
}
@@ -141,7 +130,7 @@ static int tcp_read(URLContext *h, uint8_t *buf, int size)
for (;;) {
if (url_interrupt_cb())
- return -EINTR;
+ return AVERROR(EINTR);
fd_max = s->fd;
FD_ZERO(&rfds);
FD_SET(s->fd, &rfds);
@@ -149,18 +138,10 @@ static int tcp_read(URLContext *h, uint8_t *buf, int size)
tv.tv_usec = 100 * 1000;
ret = select(fd_max + 1, &rfds, NULL, NULL, &tv);
if (ret > 0 && FD_ISSET(s->fd, &rfds)) {
-#ifdef __BEOS__
len = recv(s->fd, buf, size, 0);
-#else
- len = read(s->fd, buf, size);
-#endif
if (len < 0) {
if (errno != EINTR && errno != EAGAIN)
-#ifdef __BEOS__
- return errno;
-#else
- return -errno;
-#endif
+ return AVERROR(errno);
} else return len;
} else if (ret < 0) {
return -1;
@@ -178,7 +159,7 @@ static int tcp_write(URLContext *h, uint8_t *buf, int size)
size1 = size;
while (size > 0) {
if (url_interrupt_cb())
- return -EINTR;
+ return AVERROR(EINTR);
fd_max = s->fd;
FD_ZERO(&wfds);
FD_SET(s->fd, &wfds);
@@ -186,19 +167,10 @@ static int tcp_write(URLContext *h, uint8_t *buf, int size)
tv.tv_usec = 100 * 1000;
ret = select(fd_max + 1, NULL, &wfds, NULL, &tv);
if (ret > 0 && FD_ISSET(s->fd, &wfds)) {
-#ifdef __BEOS__
len = send(s->fd, buf, size, 0);
-#else
- len = write(s->fd, buf, size);
-#endif
if (len < 0) {
- if (errno != EINTR && errno != EAGAIN) {
-#ifdef __BEOS__
- return errno;
-#else
- return -errno;
-#endif
- }
+ if (errno != EINTR && errno != EAGAIN)
+ return AVERROR(errno);
continue;
}
size -= len;
@@ -213,11 +185,7 @@ static int tcp_write(URLContext *h, uint8_t *buf, int size)
static int tcp_close(URLContext *h)
{
TCPContext *s = h->priv_data;
-#ifdef CONFIG_BEOS_NETSERVER
closesocket(s->fd);
-#else
- close(s->fd);
-#endif
av_free(s);
return 0;
}
diff --git a/contrib/ffmpeg/libavformat/thp.c b/contrib/ffmpeg/libavformat/thp.c
new file mode 100644
index 000000000..d0d80428c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/thp.c
@@ -0,0 +1,170 @@
+/*
+ * THP Demuxer
+ * Copyright (c) 2007 Marco Gerards.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "avformat.h"
+#include "allformats.h"
+
+typedef struct ThpDemuxContext {
+ int version;
+ int first_frame;
+ int first_framesz;
+ int last_frame;
+ int compoff;
+ int framecnt;
+ AVRational fps;
+ int frame;
+ int next_frame;
+ int next_framesz;
+ int video_stream_index;
+ int compcount;
+ unsigned char components[16];
+ AVStream* vst;
+ int has_audio;
+} ThpDemuxContext;
+
+
+static int thp_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size < 4)
+ return 0;
+
+ if (AV_RL32(p->buf) == MKTAG('T', 'H', 'P', '\0'))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int thp_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ThpDemuxContext *thp = s->priv_data;
+ AVStream *st;
+ ByteIOContext *pb = &s->pb;
+ int i;
+
+ /* Read the file header. */
+
+ get_be32(pb); /* Skip Magic. */
+ thp->version = get_be32(pb);
+
+ get_be32(pb); /* Max buf size. */
+ get_be32(pb); /* Max samples. */
+
+ thp->fps = av_d2q(av_int2flt(get_be32(pb)), INT_MAX);
+ thp->framecnt = get_be32(pb);
+ thp->first_framesz = get_be32(pb);
+ get_be32(pb); /* Data size. */
+
+ thp->compoff = get_be32(pb);
+ get_be32(pb); /* offsetDataOffset. */
+ thp->first_frame = get_be32(pb);
+ thp->last_frame = get_be32(pb);
+
+ thp->next_framesz = thp->first_framesz;
+ thp->next_frame = thp->first_frame;
+
+ /* Read the component structure. */
+ url_fseek (pb, thp->compoff, SEEK_SET);
+ thp->compcount = get_be32(pb);
+
+ /* Read the list of component types. */
+ get_buffer(pb, thp->components, 16);
+
+ for (i = 0; i < thp->compcount; i++) {
+ if (thp->components[i] == 0) {
+ if (thp->vst != 0)
+ break;
+
+ /* Video component. */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ /* The denominator and numerator are switched because 1/fps
+ is required. */
+ av_set_pts_info(st, 64, thp->fps.den, thp->fps.num);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_THP;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = get_be32(pb);
+ st->codec->height = get_be32(pb);
+ st->codec->sample_rate = av_q2d(thp->fps);
+ thp->vst = st;
+ thp->video_stream_index = st->index;
+
+ if (thp->version == 0x11000)
+ get_be32(pb); /* Unknown. */
+ }
+ else if (thp->components[i] == 1) {
+ /* XXX: Required for audio playback. */
+ thp->has_audio = 1;
+ }
+ }
+
+ return 0;
+}
+
+static int thp_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ ThpDemuxContext *thp = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int size;
+ int ret;
+
+ /* Terminate when last frame is reached. */
+ if (thp->frame >= thp->framecnt)
+ return AVERROR_IO;
+
+ url_fseek(pb, thp->next_frame, SEEK_SET);
+
+ /* Locate the next frame and read out its size. */
+ thp->next_frame += thp->next_framesz;
+ thp->next_framesz = get_be32(pb);
+
+ get_be32(pb); /* Previous total size. */
+ size = get_be32(pb); /* Total size of this frame. */
+
+ if (thp->has_audio)
+ get_be32(pb); /* Audio size. */
+
+ ret = av_get_packet(pb, pkt, size);
+ if (ret != size) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ pkt->stream_index = thp->video_stream_index;
+ thp->frame++;
+
+ return 0;
+}
+
+AVInputFormat thp_demuxer = {
+ "thp",
+ "THP",
+ sizeof(ThpDemuxContext),
+ thp_probe,
+ thp_read_header,
+ thp_read_packet
+};
diff --git a/contrib/ffmpeg/libavformat/tiertexseq.c b/contrib/ffmpeg/libavformat/tiertexseq.c
index b1a39bf76..8f565824a 100644
--- a/contrib/ffmpeg/libavformat/tiertexseq.c
+++ b/contrib/ffmpeg/libavformat/tiertexseq.c
@@ -62,7 +62,7 @@ static int seq_probe(AVProbeData *p)
{
int i;
- if (p->buf_size < 256)
+ if (p->buf_size < 258)
return 0;
/* there's no real header in a .seq file, the only thing they have in common */
@@ -71,6 +71,9 @@ static int seq_probe(AVProbeData *p)
if (p->buf[i] != 0)
return 0;
+ if(p->buf[256]==0 && p->buf[257]==0)
+ return 0;
+
/* only one fourth of the score since the previous check is too naive */
return AVPROBE_SCORE_MAX / 4;
}
diff --git a/contrib/ffmpeg/libavformat/tta.c b/contrib/ffmpeg/libavformat/tta.c
index a513d9d38..a3709437e 100644
--- a/contrib/ffmpeg/libavformat/tta.c
+++ b/contrib/ffmpeg/libavformat/tta.c
@@ -40,9 +40,7 @@ static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
TTAContext *c = s->priv_data;
AVStream *st;
- int i, channels, bps, samplerate, datalen, framelen, start;
-
- start = url_ftell(&s->pb);
+ int i, channels, bps, samplerate, datalen, framelen;
if (get_le32(&s->pb) != ff_get_fourcc("TTA1"))
return -1; // not tta file
@@ -64,7 +62,7 @@ static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
url_fskip(&s->pb, 4); // header crc
- framelen = 1.04489795918367346939 * samplerate;
+ framelen = samplerate*256/245;
c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0);
c->currentframe = 0;
@@ -77,7 +75,7 @@ static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
return AVERROR_NOMEM;
for (i = 0; i < c->totalframes; i++)
- c->seektable[i] = get_le32(&s->pb);
+ c->seektable[i] = get_le32(&s->pb);
url_fskip(&s->pb, 4); // seektable crc
st = av_new_stream(s, 0);
@@ -90,14 +88,14 @@ static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec->sample_rate = samplerate;
st->codec->bits_per_sample = bps;
- st->codec->extradata_size = url_ftell(&s->pb) - start;
+ st->codec->extradata_size = url_ftell(&s->pb);
if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
//this check is redundant as get_buffer should fail
av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
return -1;
}
st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
- url_fseek(&s->pb, start, SEEK_SET); // or SEEK_CUR and -size ? :)
+ url_fseek(&s->pb, 0, SEEK_SET);
get_buffer(&s->pb, st->codec->extradata, st->codec->extradata_size);
return 0;
diff --git a/contrib/ffmpeg/libavformat/udp.c b/contrib/ffmpeg/libavformat/udp.c
index 96fa4e152..bbf8ca2ec 100644
--- a/contrib/ffmpeg/libavformat/udp.c
+++ b/contrib/ffmpeg/libavformat/udp.c
@@ -20,15 +20,7 @@
*/
#include "avformat.h"
#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#ifndef __BEOS__
-# include <arpa/inet.h>
-#else
-# include "barpainet.h"
-#endif
-#include <netdb.h>
+#include "network.h"
#ifndef IPV6_ADD_MEMBERSHIP
#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
@@ -214,11 +206,7 @@ static int udp_ipv6_set_local(URLContext *h) {
fail:
if (udp_fd >= 0)
-#ifdef CONFIG_BEOS_NETSERVER
closesocket(udp_fd);
-#else
- close(udp_fd);
-#endif
if(res0)
freeaddrinfo(res0);
return -1;
@@ -307,7 +295,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
s = av_malloc(sizeof(UDPContext));
if (!s)
- return -ENOMEM;
+ return AVERROR(ENOMEM);
h->priv_data = s;
s->ttl = 16;
@@ -342,7 +330,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
}
#ifndef CONFIG_IPV6
- udp_fd = socket(PF_INET, SOCK_DGRAM, 0);
+ udp_fd = socket(AF_INET, SOCK_DGRAM, 0);
if (udp_fd < 0)
goto fail;
@@ -367,7 +355,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
getsockname(udp_fd, (struct sockaddr *)&my_addr1, &len);
s->local_port = ntohs(my_addr1.sin_port);
-#ifndef CONFIG_BEOS_NETSERVER
+#ifdef IP_MULTICAST_TTL
if (s->is_multicast) {
if (h->flags & URL_WRONLY) {
/* output */
@@ -395,7 +383,6 @@ static int udp_open(URLContext *h, const char *uri, int flags)
udp_fd = udp_ipv6_set_local(h);
if (udp_fd < 0)
goto fail;
-#ifndef CONFIG_BEOS_NETSERVER
if (s->is_multicast) {
if (h->flags & URL_WRONLY) {
if (udp_ipv6_set_multicast_ttl(udp_fd, s->ttl, (struct sockaddr *)&s->dest_addr) < 0)
@@ -406,7 +393,6 @@ static int udp_open(URLContext *h, const char *uri, int flags)
}
}
#endif
-#endif
if (is_output) {
/* limit the tx buf size to limit latency */
@@ -421,11 +407,7 @@ static int udp_open(URLContext *h, const char *uri, int flags)
return 0;
fail:
if (udp_fd >= 0)
-#ifdef CONFIG_BEOS_NETSERVER
closesocket(udp_fd);
-#else
- close(udp_fd);
-#endif
av_free(s);
return AVERROR_IO;
}
@@ -482,22 +464,20 @@ static int udp_close(URLContext *h)
{
UDPContext *s = h->priv_data;
-#ifndef CONFIG_BEOS_NETSERVER
#ifndef CONFIG_IPV6
+#ifdef IP_DROP_MEMBERSHIP
if (s->is_multicast && !(h->flags & URL_WRONLY)) {
if (setsockopt(s->udp_fd, IPPROTO_IP, IP_DROP_MEMBERSHIP,
&s->mreq, sizeof(s->mreq)) < 0) {
perror("IP_DROP_MEMBERSHIP");
}
}
+#endif
#else
if (s->is_multicast && !(h->flags & URL_WRONLY))
udp_ipv6_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
#endif
- close(s->udp_fd);
-#else
closesocket(s->udp_fd);
-#endif
av_free(s);
return 0;
}
diff --git a/contrib/ffmpeg/libavformat/utils.c b/contrib/ffmpeg/libavformat/utils.c
index eaeeb7c16..36cb269b5 100644
--- a/contrib/ffmpeg/libavformat/utils.c
+++ b/contrib/ffmpeg/libavformat/utils.c
@@ -32,7 +32,6 @@
static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
static void av_frac_add(AVFrac *f, int64_t incr);
-static void av_frac_set(AVFrac *f, int64_t val);
/** head of registered input format linked list. */
AVInputFormat *first_iformat = NULL;
@@ -140,9 +139,6 @@ AVOutputFormat *guess_stream_format(const char *short_name, const char *filename
return fmt;
}
-/**
- * Guesses the codec id based upon muxer and filename.
- */
enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
const char *filename, const char *mime_type, enum CodecType type){
if(type == CODEC_TYPE_VIDEO){
@@ -162,9 +158,6 @@ enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
return CODEC_ID_NONE;
}
-/**
- * finds AVInputFormat based on input format's short name.
- */
AVInputFormat *av_find_input_format(const char *short_name)
{
AVInputFormat *fmt;
@@ -177,22 +170,12 @@ AVInputFormat *av_find_input_format(const char *short_name)
/* memory handling */
-/**
- * Default packet destructor.
- */
void av_destruct_packet(AVPacket *pkt)
{
av_free(pkt->data);
pkt->data = NULL; pkt->size = 0;
}
-/**
- * Allocate the payload of a packet and intialized its fields to default values.
- *
- * @param pkt packet
- * @param size wanted payload size
- * @return 0 if OK. AVERROR_xxx otherwise.
- */
int av_new_packet(AVPacket *pkt, int size)
{
uint8_t *data;
@@ -210,13 +193,6 @@ int av_new_packet(AVPacket *pkt, int size)
return 0;
}
-/**
- * Allocate and read the payload of a packet and intialized its fields to default values.
- *
- * @param pkt packet
- * @param size wanted payload size
- * @return >0 (read size) if OK. AVERROR_xxx otherwise.
- */
int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
{
int ret= av_new_packet(pkt, size);
@@ -235,8 +211,6 @@ int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
return ret;
}
-/* This is a hack - the packet memory allocation stuff is broken. The
- packet is allocated if it was not really allocated */
int av_dup_packet(AVPacket *pkt)
{
if (pkt->destruct != av_destruct_packet) {
@@ -257,30 +231,20 @@ int av_dup_packet(AVPacket *pkt)
return 0;
}
-/**
- * Allocate the payload of a packet and intialized its fields to default values.
- *
- * @param filename possible numbered sequence string
- * @return 1 if a valid numbered sequence string, 0 otherwise.
- */
int av_filename_number_test(const char *filename)
{
char buf[1024];
return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
}
-/**
- * Guess file format.
- */
-AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
+static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
{
AVInputFormat *fmt1, *fmt;
- int score, score_max;
+ int score;
fmt = NULL;
- score_max = 0;
for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
- if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
+ if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
continue;
score = 0;
if (fmt1->read_probe) {
@@ -290,14 +254,19 @@ AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
score = 50;
}
}
- if (score > score_max) {
- score_max = score;
+ if (score > *score_max) {
+ *score_max = score;
fmt = fmt1;
}
}
return fmt;
}
+AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
+ int score=0;
+ return av_probe_input_format2(pd, is_opened, &score);
+}
+
/************************************************************/
/* input media file */
@@ -327,6 +296,7 @@ static const AVOption options[]={
{"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
{"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
{"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
+{"analyzeduration", NULL, OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
{NULL},
};
@@ -336,10 +306,8 @@ static const AVOption options[]={
static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
-#if LIBAVFORMAT_VERSION_INT >= ((51<<16)+(0<<8)+0)
-static
-#endif
-void avformat_get_context_defaults(AVFormatContext *s){
+static void avformat_get_context_defaults(AVFormatContext *s)
+{
memset(s, 0, sizeof(AVFormatContext));
s->av_class = &av_format_context_class;
@@ -357,10 +325,6 @@ AVFormatContext *av_alloc_format_context(void)
return ic;
}
-/**
- * Allocates all the structures needed to read an input stream.
- * This does not open the needed codecs for decoding the stream[s].
- */
int av_open_input_stream(AVFormatContext **ic_ptr,
ByteIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap)
@@ -422,17 +386,6 @@ int av_open_input_stream(AVFormatContext **ic_ptr,
#define PROBE_BUF_MIN 2048
#define PROBE_BUF_MAX (1<<20)
-/**
- * Open a media file as input. The codec are not opened. Only the file
- * header (if present) is read.
- *
- * @param ic_ptr the opened media file handle is put here
- * @param filename filename to open.
- * @param fmt if non NULL, force the file format to use
- * @param buf_size optional buffer size (zero if default is OK)
- * @param ap additionnal parameters needed when opening the file (NULL if default)
- * @return 0 if OK. AVERROR_xxx otherwise.
- */
int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
AVInputFormat *fmt,
int buf_size,
@@ -474,10 +427,11 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
}
for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
+ int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
/* read probe data */
pd->buf= av_realloc(pd->buf, probe_size);
pd->buf_size = get_buffer(pb, pd->buf, probe_size);
- if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
+ if (url_fseek(pb, 0, SEEK_SET) < 0) {
url_fclose(pb);
if (url_fopen(pb, filename, URL_RDONLY) < 0) {
file_opened = 0;
@@ -486,7 +440,7 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
}
}
/* guess file format */
- fmt = av_probe_input_format(pd, 1);
+ fmt = av_probe_input_format2(pd, 1, &score);
}
av_freep(&pd->buf);
}
@@ -528,16 +482,6 @@ int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
/*******************************************************/
-/**
- * Read a transport packet from a media file.
- *
- * This function is absolete and should never be used.
- * Use av_read_frame() instead.
- *
- * @param s media file handle
- * @param pkt is filled
- * @return 0 if OK. AVERROR_xxx if error.
- */
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
return s->iformat->read_packet(s, pkt);
@@ -639,7 +583,7 @@ static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
- int num, den, presentation_delayed;
+ int num, den, presentation_delayed, delay, i;
/* handle wrapping */
if(st->cur_dts != AV_NOPTS_VALUE){
if(pkt->pts != AV_NOPTS_VALUE)
@@ -659,91 +603,81 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
pkt->flags |= PKT_FLAG_KEY;
/* do we have a video B frame ? */
+ delay= st->codec->has_b_frames;
presentation_delayed = 0;
- if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
- /* XXX: need has_b_frame, but cannot get it if the codec is
- not initialized */
- if (( st->codec->codec_id == CODEC_ID_H264
- || st->codec->has_b_frames) &&
- pc && pc->pict_type != FF_B_TYPE)
- presentation_delayed = 1;
- /* this may be redundant, but it shouldnt hurt */
- if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
- presentation_delayed = 1;
- }
+ /* XXX: need has_b_frame, but cannot get it if the codec is
+ not initialized */
+ if (delay &&
+ pc && pc->pict_type != FF_B_TYPE)
+ presentation_delayed = 1;
+ /* this may be redundant, but it shouldnt hurt */
+ if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
+ presentation_delayed = 1;
if(st->cur_dts == AV_NOPTS_VALUE){
- if(presentation_delayed) st->cur_dts = -pkt->duration;
- else st->cur_dts = 0;
+ st->cur_dts = -delay * pkt->duration;
}
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
/* interpolate PTS and DTS if they are not present */
- if (presentation_delayed) {
- /* DTS = decompression time stamp */
- /* PTS = presentation time stamp */
- if (pkt->dts == AV_NOPTS_VALUE) {
- /* if we know the last pts, use it */
- if(st->last_IP_pts != AV_NOPTS_VALUE)
- st->cur_dts = pkt->dts = st->last_IP_pts;
- else
+ if(delay <=1){
+ if (presentation_delayed) {
+ /* DTS = decompression time stamp */
+ /* PTS = presentation time stamp */
+ if (pkt->dts == AV_NOPTS_VALUE)
+ pkt->dts = st->last_IP_pts;
+ if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->cur_dts;
- } else {
- st->cur_dts = pkt->dts;
- }
- /* this is tricky: the dts must be incremented by the duration
- of the frame we are displaying, i.e. the last I or P frame */
- if (st->last_IP_duration == 0)
- st->cur_dts += pkt->duration;
- else
- st->cur_dts += st->last_IP_duration;
- st->last_IP_duration = pkt->duration;
- st->last_IP_pts= pkt->pts;
- /* cannot compute PTS if not present (we can compute it only
- by knowing the futur */
- } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
- if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
- int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
- int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
- if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
- pkt->pts += pkt->duration;
-// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
- }
- }
- /* presentation is not delayed : PTS and DTS are the same */
- if (pkt->pts == AV_NOPTS_VALUE) {
- if (pkt->dts == AV_NOPTS_VALUE) {
- pkt->pts = st->cur_dts;
- pkt->dts = st->cur_dts;
+ /* this is tricky: the dts must be incremented by the duration
+ of the frame we are displaying, i.e. the last I or P frame */
+ if (st->last_IP_duration == 0)
+ st->last_IP_duration = pkt->duration;
+ st->cur_dts = pkt->dts + st->last_IP_duration;
+ st->last_IP_duration = pkt->duration;
+ st->last_IP_pts= pkt->pts;
+ /* cannot compute PTS if not present (we can compute it only
+ by knowing the futur */
+ } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
+ if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
+ int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
+ int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
+ if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
+ pkt->pts += pkt->duration;
+ // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
+ }
}
- else {
- st->cur_dts = pkt->dts;
+
+ /* presentation is not delayed : PTS and DTS are the same */
+ if(pkt->pts == AV_NOPTS_VALUE)
pkt->pts = pkt->dts;
- }
- } else {
- st->cur_dts = pkt->pts;
+ if(pkt->pts == AV_NOPTS_VALUE)
+ pkt->pts = st->cur_dts;
pkt->dts = pkt->pts;
+ st->cur_dts = pkt->pts + pkt->duration;
}
- st->cur_dts += pkt->duration;
}
-// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
+
+ if(pkt->pts != AV_NOPTS_VALUE){
+ st->pts_buffer[0]= pkt->pts;
+ for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
+ st->pts_buffer[i]= (i-delay-1) * pkt->duration;
+ for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
+ FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
+ if(pkt->dts == AV_NOPTS_VALUE)
+ pkt->dts= st->pts_buffer[0];
+ if(pkt->dts > st->cur_dts)
+ st->cur_dts = pkt->dts;
+ }
+
+// av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
/* update flags */
if (pc) {
pkt->flags = 0;
/* key frame computation */
- switch(st->codec->codec_type) {
- case CODEC_TYPE_VIDEO:
if (pc->pict_type == FF_I_TYPE)
pkt->flags |= PKT_FLAG_KEY;
- break;
- case CODEC_TYPE_AUDIO:
- pkt->flags |= PKT_FLAG_KEY;
- break;
- default:
- break;
- }
}
}
@@ -787,6 +721,12 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
pkt->dts = st->parser->dts;
pkt->destruct = av_destruct_packet_nofree;
compute_pkt_fields(s, st, st->parser, pkt);
+
+ if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
+ av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
+ 0, 0, AVINDEX_KEYFRAME);
+ }
+
break;
}
} else {
@@ -798,7 +738,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
/* read next packet */
ret = av_read_packet(s, &s->cur_pkt);
if (ret < 0) {
- if (ret == -EAGAIN)
+ if (ret == AVERROR(EAGAIN))
return ret;
/* return the last frames, if any */
for(i = 0; i < s->nb_streams; i++) {
@@ -835,6 +775,10 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
}else if(st->need_parsing == 2){
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}
+ if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
+ st->parser->last_frame_offset=
+ st->parser->cur_offset= s->cur_pkt.pos;
+ }
}
}
}
@@ -848,25 +792,6 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
return 0;
}
-/**
- * Return the next frame of a stream.
- *
- * The returned packet is valid
- * until the next av_read_frame() or until av_close_input_file() and
- * must be freed with av_free_packet. For video, the packet contains
- * exactly one frame. For audio, it contains an integer number of
- * frames if each frame has a known fixed size (e.g. PCM or ADPCM
- * data). If the audio frames have a variable size (e.g. MPEG audio),
- * then it contains one frame.
- *
- * pkt->pts, pkt->dts and pkt->duration are always set to correct
- * values in AV_TIME_BASE unit (and guessed if the format cannot
- * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
- * has B frames, so it is better to rely on pkt->dts if you do not
- * decompress the payload.
- *
- * @return 0 if OK, < 0 if error or end of file.
- */
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
AVPacketList *pktl;
@@ -905,7 +830,7 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
AVPacketList **plast_pktl= &s->packet_buffer;
int ret= av_read_frame_internal(s, pkt);
if(ret<0){
- if(pktl && ret != -EAGAIN){
+ if(pktl && ret != AVERROR(EAGAIN)){
eof=1;
continue;
}else
@@ -995,18 +920,10 @@ static void av_read_frame_flush(AVFormatContext *s)
st->parser = NULL;
}
st->last_IP_pts = AV_NOPTS_VALUE;
- st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
+ st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
}
}
-/**
- * Updates cur_dts of all streams based on given timestamp and AVStream.
- *
- * Stream ref_st unchanged, others set cur_dts in their native timebase
- * only needed for timestamp wrapping or if (dts not set and pts!=dts)
- * @param timestamp new dts expressed in time_base of param ref_st
- * @param ref_st reference stream giving time_base of param timestamp
- */
void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
int i;
@@ -1019,11 +936,6 @@ void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
}
}
-/**
- * Add a index entry into a sorted list updateing if it is already there.
- *
- * @param timestamp timestamp in the timebase of the given stream
- */
int av_add_index_entry(AVStream *st,
int64_t pos, int64_t timestamp, int size, int distance, int flags)
{
@@ -1111,14 +1023,6 @@ static int is_raw_stream(AVFormatContext *s)
return 1;
}
-/**
- * Gets the index for a specific timestamp.
- * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
- * the timestamp which is <= the requested one, if backward is 0
- * then it will be >=
- * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
- * @return < 0 if no such timestamp could be found
- */
int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
int flags)
{
@@ -1153,12 +1057,6 @@ int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
#define DEBUG_SEEK
-/**
- * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
- * this isnt supposed to be called directly by a user application, but by demuxers
- * @param target_ts target timestamp in the time base of the given stream
- * @param stream_index stream number
- */
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
AVInputFormat *avif= s->iformat;
int64_t pos_min, pos_max, pos, pos_limit;
@@ -1223,12 +1121,6 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
return 0;
}
-/**
- * Does a binary search using read_timestamp().
- * this isnt supposed to be called directly by a user application, but by demuxers
- * @param target_ts target timestamp in the time base of the given stream
- * @param stream_index stream number
- */
int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
int64_t pos, ts;
int64_t start_pos, filesize;
@@ -1369,23 +1261,42 @@ static int av_seek_frame_generic(AVFormatContext *s,
AVStream *st;
AVIndexEntry *ie;
- if (!s->index_built) {
- if (is_raw_stream(s)) {
- av_build_index_raw(s);
- } else {
- return -1;
- }
- s->index_built = 1;
- }
-
st = s->streams[stream_index];
+
index = av_index_search_timestamp(st, timestamp, flags);
+
+ if(index < 0){
+ int i;
+ AVPacket pkt;
+
+ if(st->index_entries && st->nb_index_entries){
+ ie= &st->index_entries[st->nb_index_entries-1];
+ url_fseek(&s->pb, ie->pos, SEEK_SET);
+ av_update_cur_dts(s, st, ie->timestamp);
+ }else
+ url_fseek(&s->pb, 0, SEEK_SET);
+
+ for(i=0;; i++) {
+ int ret = av_read_frame(s, &pkt);
+ if(ret<0)
+ break;
+ av_free_packet(&pkt);
+ if(stream_index == pkt.stream_index){
+ if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
+ break;
+ }
+ }
+ index = av_index_search_timestamp(st, timestamp, flags);
+ }
if (index < 0)
return -1;
- /* now we have found the index, we can seek */
- ie = &st->index_entries[index];
av_read_frame_flush(s);
+ if (s->iformat->read_seek){
+ if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
+ return 0;
+ }
+ ie = &st->index_entries[index];
url_fseek(&s->pb, ie->pos, SEEK_SET);
av_update_cur_dts(s, st, ie->timestamp);
@@ -1393,17 +1304,6 @@ static int av_seek_frame_generic(AVFormatContext *s,
return 0;
}
-/**
- * Seek to the key frame at timestamp.
- * 'timestamp' in 'stream_index'.
- * @param stream_index If stream_index is (-1), a default
- * stream is selected, and timestamp is automatically converted
- * from AV_TIME_BASE units to the stream specific time_base.
- * @param timestamp timestamp in AVStream.time_base units
- * or if there is no stream specified then in AV_TIME_BASE units
- * @param flags flags which select direction and seeking mode
- * @return >= 0 on success
- */
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{
int ret;
@@ -1472,8 +1372,8 @@ static void av_update_stream_timings(AVFormatContext *ic)
int i;
AVStream *st;
- start_time = MAXINT64;
- end_time = MININT64;
+ start_time = INT64_MAX;
+ end_time = INT64_MIN;
for(i = 0;i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time != AV_NOPTS_VALUE) {
@@ -1488,9 +1388,9 @@ static void av_update_stream_timings(AVFormatContext *ic)
}
}
}
- if (start_time != MAXINT64) {
+ if (start_time != INT64_MAX) {
ic->start_time = start_time;
- if (end_time != MININT64) {
+ if (end_time != INT64_MIN) {
ic->duration = end_time - start_time;
if (ic->file_size > 0) {
/* compute the bit rate */
@@ -1557,7 +1457,7 @@ static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
#define DURATION_MAX_READ_SIZE 250000
/* only usable for MPEG-PS streams */
-static void av_estimate_timings_from_pts(AVFormatContext *ic)
+static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
{
AVPacket pkt1, *pkt = &pkt1;
AVStream *st;
@@ -1565,21 +1465,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
int64_t end_time;
int64_t filesize, offset, duration;
- /* free previous packet */
- if (ic->cur_st && ic->cur_st->parser)
- av_free_packet(&ic->cur_pkt);
- ic->cur_st = NULL;
-
- /* flush packet queue */
- flush_packet_queue(ic);
-
- for(i=0;i<ic->nb_streams;i++) {
- st = ic->streams[i];
- if (st->parser) {
- av_parser_close(st->parser);
- st->parser= NULL;
- }
- }
+ av_read_frame_flush(ic);
/* we read the first packets to get the first PTS (not fully
accurate, but it is enough now) */
@@ -1649,10 +1535,10 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic)
fill_all_stream_timings(ic);
- url_fseek(&ic->pb, 0, SEEK_SET);
+ url_fseek(&ic->pb, old_offset, SEEK_SET);
}
-static void av_estimate_timings(AVFormatContext *ic)
+static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
{
int64_t file_size;
@@ -1670,7 +1556,7 @@ static void av_estimate_timings(AVFormatContext *ic)
!strcmp(ic->iformat->name, "mpegts")) &&
file_size && !ic->pb.is_streamed) {
/* get accurate estimate from the PTSes */
- av_estimate_timings_from_pts(ic);
+ av_estimate_timings_from_pts(ic, old_offset);
} else if (av_has_timings(ic)) {
/* at least one components has timings - we use them for all
the components */
@@ -1720,7 +1606,7 @@ static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
{
int16_t *samples;
AVCodec *codec;
- int got_picture, ret=0;
+ int got_picture, data_size, ret=0;
AVFrame picture;
if(!st->codec->codec){
@@ -1739,11 +1625,12 @@ static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
&got_picture, (uint8_t *)data, size);
break;
case CODEC_TYPE_AUDIO:
- samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ samples = av_malloc(data_size);
if (!samples)
goto fail;
- ret = avcodec_decode_audio(st->codec, samples,
- &got_picture, (uint8_t *)data, size);
+ ret = avcodec_decode_audio2(st->codec, samples,
+ &data_size, (uint8_t *)data, size);
av_free(samples);
break;
default:
@@ -1757,19 +1644,12 @@ static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
/* absolute maximum size we read until we abort */
#define MAX_READ_SIZE 5000000
-/* maximum duration until we stop analysing the stream */
-#define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
+#define MAX_STD_TIMEBASES (60*12+5)
+static int get_std_framerate(int i){
+ if(i<60*12) return i*1001;
+ else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
+}
-/**
- * Read the beginning of a media file to get stream information. This
- * is useful for file formats with no headers such as MPEG. This
- * function also compute the real frame rate in case of mpeg2 repeat
- * frame mode.
- *
- * @param ic media file handle
- * @return >=0 if OK. AVERROR_xxx if error.
- * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
- */
int av_find_stream_info(AVFormatContext *ic)
{
int i, count, ret, read_size, j;
@@ -1777,8 +1657,14 @@ int av_find_stream_info(AVFormatContext *ic)
AVPacket pkt1, *pkt;
AVPacketList *pktl=NULL, **ppktl;
int64_t last_dts[MAX_STREAMS];
- int64_t duration_sum[MAX_STREAMS];
int duration_count[MAX_STREAMS]={0};
+ double (*duration_error)[MAX_STD_TIMEBASES];
+ offset_t old_offset = url_ftell(&ic->pb);
+ int64_t codec_info_duration[MAX_STREAMS]={0};
+ int codec_info_nb_frames[MAX_STREAMS]={0};
+
+ duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
+ if (!duration_error) return AVERROR_NOMEM;
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
@@ -1799,7 +1685,6 @@ int av_find_stream_info(AVFormatContext *ic)
for(i=0;i<MAX_STREAMS;i++){
last_dts[i]= AV_NOPTS_VALUE;
- duration_sum[i]= INT64_MAX;
}
count = 0;
@@ -1812,7 +1697,7 @@ int av_find_stream_info(AVFormatContext *ic)
if (!has_codec_parameters(st->codec))
break;
/* variable fps and no guess at the real fps */
- if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
+ if( (st->codec->time_base.den >= 101LL*st->codec->time_base.num || st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
&& duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
break;
if(st->parser && st->parser->parser->split && !st->codec->extradata)
@@ -1875,9 +1760,10 @@ int av_find_stream_info(AVFormatContext *ic)
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
- st->codec_info_duration += pkt->duration;
+ if(codec_info_nb_frames[st->index]>1)
+ codec_info_duration[st->index] += pkt->duration;
if (pkt->duration != 0)
- st->codec_info_nb_frames++;
+ codec_info_nb_frames[st->index]++;
{
int index= pkt->stream_index;
@@ -1885,21 +1771,22 @@ int av_find_stream_info(AVFormatContext *ic)
int64_t duration= pkt->dts - last;
if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
- if(duration*duration_count[index]*10/9 < duration_sum[index]){
- duration_sum[index]= duration;
- duration_count[index]=1;
- }else{
- int factor= av_rescale(2*duration, duration_count[index], duration_sum[index]);
- if(factor==3)
- duration_count[index] *= 2;
- factor= av_rescale(duration, duration_count[index], duration_sum[index]);
- duration_sum[index] += duration;
- duration_count[index]+= factor;
+ double dur= duration * av_q2d(st->time_base);
+
+// if(st->codec->codec_type == CODEC_TYPE_VIDEO)
+// av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
+ if(duration_count[index] < 2)
+ memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
+ for(i=1; i<MAX_STD_TIMEBASES; i++){
+ int framerate= get_std_framerate(i);
+ int ticks= lrintf(dur*framerate/(1001*12));
+ double error= dur - ticks*1001*12/(double)framerate;
+ duration_error[index][i] += error*error;
}
- if(st->codec_info_nb_frames == 0 && 0)
- st->codec_info_duration += duration;
+ duration_count[index]++;
}
- last_dts[pkt->stream_index]= pkt->dts;
+ if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
+ last_dts[pkt->stream_index]= pkt->dts;
}
if(st->parser && st->parser->parser->split && !st->codec->extradata){
int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
@@ -1932,7 +1819,7 @@ int av_find_stream_info(AVFormatContext *ic)
(st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
try_decode_frame(st, pkt->data, pkt->size);
- if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
+ if (av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
break;
}
count++;
@@ -1951,28 +1838,19 @@ int av_find_stream_info(AVFormatContext *ic)
st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
if(duration_count[i]
- && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
+ && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
//FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
- st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
- int64_t num, den, error, best_error;
-
- num= st->time_base.den*duration_count[i];
- den= st->time_base.num*duration_sum[i];
-
- best_error= INT64_MAX;
- for(j=1; j<60*12; j++){
- error= FFABS(1001*12*num - 1001*j*den);
+ st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
+ double best_error= 2*av_q2d(st->time_base);
+ best_error= best_error*best_error*duration_count[i]*1000*12*30;
+
+ for(j=1; j<MAX_STD_TIMEBASES; j++){
+ double error= duration_error[i][j] * get_std_framerate(j);
+// if(st->codec->codec_type == CODEC_TYPE_VIDEO)
+// av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
if(error < best_error){
best_error= error;
- av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
- }
- }
- for(j=0; j<3; j++){
- static const int ticks[]= {24,30,60};
- error= FFABS(1001*12*num - 1000*12*den * ticks[j]);
- if(error < best_error){
- best_error= error;
- av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, ticks[j]*1000, 1001, INT_MAX);
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
}
}
}
@@ -1987,10 +1865,13 @@ int av_find_stream_info(AVFormatContext *ic)
st->r_frame_rate.den = st->time_base.num;
}
}
+ }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if(!st->codec->bits_per_sample)
+ st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
}
}
- av_estimate_timings(ic);
+ av_estimate_timings(ic, old_offset);
#if 0
/* correct DTS for b frame streams with no timestamps */
for(i=0;i<ic->nb_streams;i++) {
@@ -2015,15 +1896,14 @@ int av_find_stream_info(AVFormatContext *ic)
}
}
#endif
+
+ av_free(duration_error);
+
return ret;
}
/*******************************************************/
-/**
- * start playing a network based stream (e.g. RTSP stream) at the
- * current position
- */
int av_read_play(AVFormatContext *s)
{
if (!s->iformat->read_play)
@@ -2031,11 +1911,6 @@ int av_read_play(AVFormatContext *s)
return s->iformat->read_play(s);
}
-/**
- * Pause a network based stream (e.g. RTSP stream).
- *
- * Use av_read_play() to resume it.
- */
int av_read_pause(AVFormatContext *s)
{
if (!s->iformat->read_pause)
@@ -2043,11 +1918,6 @@ int av_read_pause(AVFormatContext *s)
return s->iformat->read_pause(s);
}
-/**
- * Close a media file (but not its codecs).
- *
- * @param s media file handle
- */
void av_close_input_file(AVFormatContext *s)
{
int i, must_open_file;
@@ -2082,16 +1952,6 @@ void av_close_input_file(AVFormatContext *s)
av_free(s);
}
-/**
- * Add a new stream to a media file.
- *
- * Can only be called in the read_header() function. If the flag
- * AVFMTCTX_NOHEADER is in the format context, then new streams
- * can be added in read_packet too.
- *
- * @param s media file handle
- * @param id file format dependent stream id
- */
AVStream *av_new_stream(AVFormatContext *s, int id)
{
AVStream *st;
@@ -2147,13 +2007,6 @@ int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
return 0;
}
-/**
- * allocate the stream private data and write the stream header to an
- * output media file
- *
- * @param s media file handle
- * @return 0 if OK. AVERROR_xxx if error.
- */
int av_write_header(AVFormatContext *s)
{
int ret, i;
@@ -2181,6 +2034,23 @@ int av_write_header(AVFormatContext *s)
}
break;
}
+
+ if(s->oformat->codec_tag){
+ if(st->codec->codec_tag){
+ //FIXME
+ //check that tag + id is in the table
+ //if neither is in the table -> ok
+ //if tag is in the table with another id -> FAIL
+ //if id is in the table with another tag -> FAIL unless strict < ?
+ }else
+ st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
+ }
+ }
+
+ if (!s->priv_data && s->oformat->priv_data_size > 0) {
+ s->priv_data = av_mallocz(s->oformat->priv_data_size);
+ if (!s->priv_data)
+ return AVERROR_NOMEM;
}
if(s->oformat->write_header){
@@ -2250,11 +2120,12 @@ static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
}
if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
- av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
+ av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64" st:%d\n", st->cur_dts, pkt->dts, st->index);
return -1;
}
if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
- av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
+ av_log(NULL, AV_LOG_ERROR, "error, pts < dts (%"PRId64" < %"PRId64")\n",
+ pkt->pts, pkt->dts);
return -1;
}
@@ -2288,19 +2159,12 @@ static void truncate_ts(AVStream *st, AVPacket *pkt){
// if(pkt->dts < 0)
// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
- pkt->pts &= pts_mask;
- pkt->dts &= pts_mask;
+ if (pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts &= pts_mask;
+ if (pkt->dts != AV_NOPTS_VALUE)
+ pkt->dts &= pts_mask;
}
-/**
- * Write a packet to an output media file.
- *
- * The packet shall contain one audio or video frame.
- *
- * @param s media file handle
- * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
- * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
- */
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
{
int ret;
@@ -2317,20 +2181,6 @@ int av_write_frame(AVFormatContext *s, AVPacket *pkt)
return ret;
}
-/**
- * Interleave a packet per DTS in an output media file.
- *
- * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
- * so they cannot be used after it, note calling av_free_packet() on them is still safe.
- *
- * @param s media file handle
- * @param out the interleaved packet will be output here
- * @param in the input packet
- * @param flush 1 if no further packets are available as input and all
- * remaining packets should be output
- * @return 1 if a packet was output, 0 if no packet could be output,
- * < 0 if an error occured
- */
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
AVPacketList *pktl, **next_point, *this_pktl;
int stream_count=0;
@@ -2400,20 +2250,6 @@ static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in,
return av_interleave_packet_per_dts(s, out, in, flush);
}
-/**
- * Writes a packet to an output media file ensuring correct interleaving.
- *
- * The packet must contain one audio or video frame.
- * If the packets are already correctly interleaved the application should
- * call av_write_frame() instead as its slightly faster, its also important
- * to keep in mind that completly non interleaved input will need huge amounts
- * of memory to interleave with this, so its prefereable to interleave at the
- * demuxer level
- *
- * @param s media file handle
- * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
- * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
- */
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
AVStream *st= s->streams[ pkt->stream_index];
@@ -2447,13 +2283,6 @@ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
}
}
-/**
- * @brief Write the stream trailer to an output media file and
- * free the file private data.
- *
- * @param s media file handle
- * @return 0 if OK. AVERROR_xxx if error.
- */
int av_write_trailer(AVFormatContext *s)
{
int ret, i;
@@ -2586,9 +2415,6 @@ static AbvEntry frame_abvs[] = {
{ "4cif", 704, 576, 0, 0 },
};
-/**
- * parses width and height out of string str.
- */
int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
{
int i;
@@ -2617,13 +2443,6 @@ int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
return 0;
}
-/**
- * Converts frame rate from string to a fraction.
- *
- * First we try to get an exact integer or fractional frame rate.
- * If this fails we convert the frame rate to a double and return
- * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
- */
int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
{
int i;
@@ -2661,21 +2480,6 @@ int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
return 0;
}
-/**
- * Converts date string to number of seconds since Jan 1st, 1970.
- *
- * @code
- * Syntax:
- * - If not a duration:
- * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
- * Time is localtime unless Z is suffixed to the end. In this case GMT
- * Return the date in micro seconds since 1970
- *
- * - If a duration:
- * HH[:MM[:SS[.m...]]]
- * S+[.m...]
- * @endcode
- */
#ifndef CONFIG_WINCE
int64_t parse_date(const char *datestr, int duration)
{
@@ -2756,7 +2560,7 @@ int64_t parse_date(const char *datestr, int duration)
if (duration)
return 0;
else
- return now * int64_t_C(1000000);
+ return now * INT64_C(1000000);
}
if (duration) {
@@ -2786,12 +2590,6 @@ int64_t parse_date(const char *datestr, int duration)
}
#endif /* CONFIG_WINCE */
-/**
- * Attempts to find a specific tag in a URL.
- *
- * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
- * Return 1 if found.
- */
int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
{
const char *p;
@@ -2831,18 +2629,6 @@ int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
return 0;
}
-/**
- * Returns in 'buf' the path with '%d' replaced by number.
-
- * Also handles the '%0nd' format where 'n' is the total number
- * of digits and '%%'.
- *
- * @param buf destination buffer
- * @param buf_size destination buffer size
- * @param path numbered sequence string
- * @number frame number
- * @return 0 if OK, -1 if format error.
- */
int av_get_frame_filename(char *buf, int buf_size,
const char *path, int number)
{
@@ -2898,68 +2684,80 @@ int av_get_frame_filename(char *buf, int buf_size,
return -1;
}
-/**
- * Print nice hexa dump of a buffer
- * @param f stream for output
- * @param buf buffer
- * @param size buffer size
- */
-void av_hex_dump(FILE *f, uint8_t *buf, int size)
+static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
{
int len, i, j, c;
+#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
for(i=0;i<size;i+=16) {
len = size - i;
if (len > 16)
len = 16;
- fprintf(f, "%08x ", i);
+ PRINT("%08x ", i);
for(j=0;j<16;j++) {
if (j < len)
- fprintf(f, " %02x", buf[i+j]);
+ PRINT(" %02x", buf[i+j]);
else
- fprintf(f, " ");
+ PRINT(" ");
}
- fprintf(f, " ");
+ PRINT(" ");
for(j=0;j<len;j++) {
c = buf[i+j];
if (c < ' ' || c > '~')
c = '.';
- fprintf(f, "%c", c);
+ PRINT("%c", c);
}
- fprintf(f, "\n");
+ PRINT("\n");
}
+#undef PRINT
+}
+
+void av_hex_dump(FILE *f, uint8_t *buf, int size)
+{
+ hex_dump_internal(NULL, f, 0, buf, size);
+}
+
+void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
+{
+ hex_dump_internal(avcl, NULL, level, buf, size);
}
-/**
- * Print on 'f' a nice dump of a packet
- * @param f stream for output
- * @param pkt packet to dump
- * @param dump_payload true if the payload must be displayed too
- */
//FIXME needs to know the time_base
-void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
+static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
{
- fprintf(f, "stream #%d:\n", pkt->stream_index);
- fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
- fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
+#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
+ PRINT("stream #%d:\n", pkt->stream_index);
+ PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
+ PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
/* DTS is _always_ valid after av_read_frame() */
- fprintf(f, " dts=");
+ PRINT(" dts=");
if (pkt->dts == AV_NOPTS_VALUE)
- fprintf(f, "N/A");
+ PRINT("N/A");
else
- fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
+ PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
/* PTS may be not known if B frames are present */
- fprintf(f, " pts=");
+ PRINT(" pts=");
if (pkt->pts == AV_NOPTS_VALUE)
- fprintf(f, "N/A");
+ PRINT("N/A");
else
- fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
- fprintf(f, "\n");
- fprintf(f, " size=%d\n", pkt->size);
+ PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
+ PRINT("\n");
+ PRINT(" size=%d\n", pkt->size);
+#undef PRINT
if (dump_payload)
av_hex_dump(f, pkt->data, pkt->size);
}
+void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
+{
+ pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
+}
+
+void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
+{
+ pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
+}
+
void url_split(char *proto, int proto_size,
char *authorization, int authorization_size,
char *hostname, int hostname_size,
@@ -3031,15 +2829,6 @@ void url_split(char *proto, int proto_size,
pstrcpy(path, path_size, p);
}
-/**
- * Set the pts for a given stream.
- *
- * @param s stream
- * @param pts_wrap_bits number of bits effectively used by the pts
- * (used for wrap control, 33 is the value for MPEG)
- * @param pts_num numerator to convert to seconds (MPEG: 1)
- * @param pts_den denominator to convert to seconds (MPEG: 90000)
- */
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
int pts_num, int pts_den)
{
@@ -3073,15 +2862,6 @@ static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
}
/**
- * Set f to (val + 0.5).
- */
-static void av_frac_set(AVFrac *f, int64_t val)
-{
- f->val = val;
- f->num = f->den >> 1;
-}
-
-/**
* Fractionnal addition to f: f = f + (incr / f->den).
*
* @param f fractional number
diff --git a/contrib/ffmpeg/libavformat/v4l2.c b/contrib/ffmpeg/libavformat/v4l2.c
index 00adccaa8..aeaac3347 100644
--- a/contrib/ffmpeg/libavformat/v4l2.c
+++ b/contrib/ffmpeg/libavformat/v4l2.c
@@ -59,6 +59,11 @@ struct video_data {
unsigned int *buf_len;
};
+struct buff_data {
+ int index;
+ int fd;
+};
+
struct fmt_map {
enum PixelFormat ff_fmt;
int32_t v4l2_fmt;
@@ -74,7 +79,7 @@ static struct fmt_map fmt_conversion_table[] = {
.v4l2_fmt = V4L2_PIX_FMT_YUV422P,
},
{
- .ff_fmt = PIX_FMT_YUV422,
+ .ff_fmt = PIX_FMT_YUYV422,
.v4l2_fmt = V4L2_PIX_FMT_YUYV,
},
{
@@ -99,7 +104,7 @@ static struct fmt_map fmt_conversion_table[] = {
},
/*
{
- .ff_fmt = PIX_FMT_RGBA32,
+ .ff_fmt = PIX_FMT_RGB32,
.v4l2_fmt = V4L2_PIX_FMT_BGR32,
},
*/
@@ -109,16 +114,16 @@ static struct fmt_map fmt_conversion_table[] = {
},
};
-static int device_open(const char *devname, uint32_t *capabilities)
+static int device_open(AVFormatContext *ctx, uint32_t *capabilities)
{
struct v4l2_capability cap;
int fd;
int res;
- fd = open(devname, O_RDWR /*| O_NONBLOCK*/, 0);
+ fd = open(ctx->filename, O_RDWR /*| O_NONBLOCK*/, 0);
if (fd < 0) {
- av_log(NULL, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
- devname, strerror(errno));
+ av_log(ctx, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
+ ctx->filename, strerror(errno));
return -1;
}
@@ -127,20 +132,20 @@ static int device_open(const char *devname, uint32_t *capabilities)
// ENOIOCTLCMD definition only availble on __KERNEL__
if (res < 0 && errno == 515)
{
- av_log(NULL, AV_LOG_ERROR, "QUERYCAP not implemented, probably V4L device but not supporting V4L2\n");
+ av_log(ctx, AV_LOG_ERROR, "QUERYCAP not implemented, probably V4L device but not supporting V4L2\n");
close(fd);
return -1;
}
if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
strerror(errno));
close(fd);
return -1;
}
if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
- av_log(NULL, AV_LOG_ERROR, "Not a video capture device\n");
+ av_log(ctx, AV_LOG_ERROR, "Not a video capture device\n");
close(fd);
return -1;
@@ -150,8 +155,10 @@ static int device_open(const char *devname, uint32_t *capabilities)
return fd;
}
-static int device_init(int fd, int *width, int *height, int pix_fmt)
+static int device_init(AVFormatContext *ctx, int *width, int *height, int pix_fmt)
{
+ struct video_data *s = ctx->priv_data;
+ int fd = s->fd;
struct v4l2_format fmt;
int res;
@@ -163,7 +170,7 @@ static int device_init(int fd, int *width, int *height, int pix_fmt)
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
res = ioctl(fd, VIDIOC_S_FMT, &fmt);
if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
- av_log(NULL, AV_LOG_INFO, "The V4L2 driver changed the video from %dx%d to %dx%d\n", *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
+ av_log(ctx, AV_LOG_INFO, "The V4L2 driver changed the video from %dx%d to %dx%d\n", *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
*width = fmt.fmt.pix.width;
*height = fmt.fmt.pix.height;
}
@@ -213,8 +220,9 @@ static enum PixelFormat fmt_v4l2ff(uint32_t pix_fmt)
return -1;
}
-static int mmap_init(struct video_data *s)
+static int mmap_init(AVFormatContext *ctx)
{
+ struct video_data *s = ctx->priv_data;
struct v4l2_requestbuffers req;
int i, res;
@@ -225,29 +233,29 @@ static int mmap_init(struct video_data *s)
res = ioctl (s->fd, VIDIOC_REQBUFS, &req);
if (res < 0) {
if (errno == EINVAL) {
- av_log(NULL, AV_LOG_ERROR, "Device does not support mmap\n");
+ av_log(ctx, AV_LOG_ERROR, "Device does not support mmap\n");
} else {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
}
return -1;
}
if (req.count < 2) {
- av_log(NULL, AV_LOG_ERROR, "Insufficient buffer memory\n");
+ av_log(ctx, AV_LOG_ERROR, "Insufficient buffer memory\n");
return -1;
}
s->buffers = req.count;
s->buf_start = av_malloc(sizeof(void *) * s->buffers);
if (s->buf_start == NULL) {
- av_log(NULL, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
+ av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
return -1;
}
s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
if (s->buf_len == NULL) {
- av_log(NULL, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
+ av_log(ctx, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
av_free(s->buf_start);
return -1;
@@ -262,21 +270,21 @@ static int mmap_init(struct video_data *s)
buf.index = i;
res = ioctl (s->fd, VIDIOC_QUERYBUF, &buf);
if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
return -1;
}
s->buf_len[i] = buf.length;
if (s->buf_len[i] < s->frame_size) {
- av_log(NULL, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size);
+ av_log(ctx, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size);
return -1;
}
s->buf_start[i] = mmap (NULL, buf.length,
PROT_READ | PROT_WRITE, MAP_SHARED, s->fd, buf.m.offset);
if (s->buf_start[i] == MAP_FAILED) {
- av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
+ av_log(ctx, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
return -1;
}
@@ -285,14 +293,37 @@ static int mmap_init(struct video_data *s)
return 0;
}
-static int read_init(struct video_data *s)
+static int read_init(AVFormatContext *ctx)
{
return -1;
}
-static int mmap_read_frame(struct video_data *s, void *frame, int64_t *ts)
+static void mmap_release_buffer(AVPacket *pkt)
{
struct v4l2_buffer buf;
+ int res, fd;
+ struct buff_data *buf_descriptor = pkt->priv;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = buf_descriptor->index;
+ fd = buf_descriptor->fd;
+ av_free(buf_descriptor);
+
+ res = ioctl (fd, VIDIOC_QBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
+ }
+ pkt->data = NULL;
+ pkt->size = 0;
+}
+
+static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt)
+{
+ struct video_data *s = ctx->priv_data;
+ struct v4l2_buffer buf;
+ struct buff_data *buf_descriptor;
int res;
memset(&buf, 0, sizeof(struct v4l2_buffer));
@@ -303,38 +334,47 @@ static int mmap_read_frame(struct video_data *s, void *frame, int64_t *ts)
while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 &&
((errno == EAGAIN) || (errno == EINTR)));
if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno));
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno));
return -1;
}
assert (buf.index < s->buffers);
if (buf.bytesused != s->frame_size) {
- av_log(NULL, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size);
+ av_log(ctx, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size);
return -1;
}
/* Image is at s->buff_start[buf.index] */
- memcpy(frame, s->buf_start[buf.index], buf.bytesused);
- *ts = buf.timestamp.tv_sec * int64_t_C(1000000) + buf.timestamp.tv_usec;
-
- res = ioctl (s->fd, VIDIOC_QBUF, &buf);
- if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
+ pkt->data= s->buf_start[buf.index];
+ pkt->size = buf.bytesused;
+ pkt->pts = buf.timestamp.tv_sec * INT64_C(1000000) + buf.timestamp.tv_usec;
+ pkt->destruct = mmap_release_buffer;
+ buf_descriptor = av_malloc(sizeof(struct buff_data));
+ if (buf_descriptor == NULL) {
+ /* Something went wrong... Since av_malloc() failed, we cannot even
+ * allocate a buffer for memcopying into it
+ */
+ av_log(ctx, AV_LOG_ERROR, "Failed to allocate a buffer descriptor\n");
+ res = ioctl (s->fd, VIDIOC_QBUF, &buf);
return -1;
}
+ buf_descriptor->fd = s->fd;
+ buf_descriptor->index = buf.index;
+ pkt->priv = buf_descriptor;
return s->buf_len[buf.index];
}
-static int read_frame(struct video_data *s, void *frame, int64_t *ts)
+static int read_frame(AVFormatContext *ctx, AVPacket *pkt)
{
return -1;
}
-static int mmap_start(struct video_data *s)
+static int mmap_start(AVFormatContext *ctx)
{
+ struct video_data *s = ctx->priv_data;
enum v4l2_buf_type type;
int i, res;
@@ -348,7 +388,7 @@ static int mmap_start(struct video_data *s)
res = ioctl (s->fd, VIDIOC_QBUF, &buf);
if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", strerror(errno));
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", strerror(errno));
return -1;
}
@@ -357,7 +397,7 @@ static int mmap_start(struct video_data *s)
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
res = ioctl (s->fd, VIDIOC_STREAMON, &type);
if (res < 0) {
- av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", strerror(errno));
+ av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", strerror(errno));
return -1;
}
@@ -382,6 +422,57 @@ static void mmap_close(struct video_data *s)
av_free(s->buf_len);
}
+static int v4l2_set_parameters( AVFormatContext *s1, AVFormatParameters *ap )
+{
+ struct video_data *s = s1->priv_data;
+ struct v4l2_input input;
+ struct v4l2_standard standard;
+ int i;
+
+ /* set tv video input */
+ memset (&input, 0, sizeof (input));
+ input.index = ap->channel;
+ if(ioctl (s->fd, VIDIOC_ENUMINPUT, &input) < 0) {
+ av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl enum input failed:\n");
+ return AVERROR_IO;
+ }
+
+ av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set input_id: %d, input: %s\n",
+ ap->channel, input.name);
+ if(ioctl (s->fd, VIDIOC_S_INPUT, &input.index) < 0 ) {
+ av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set input(%d) failed\n",
+ ap->channel);
+ return AVERROR_IO;
+ }
+
+ av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s\n",
+ ap->standard );
+ /* set tv standard */
+ memset (&standard, 0, sizeof (standard));
+ for(i=0;;i++) {
+ standard.index = i;
+ if (ioctl(s->fd, VIDIOC_ENUMSTD, &standard) < 0) {
+ av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
+ ap->standard);
+ return AVERROR_IO;
+ }
+
+ if(!strcasecmp(standard.name, ap->standard)) {
+ break;
+ }
+ }
+
+ av_log(s1, AV_LOG_DEBUG, "The V4L2 driver set standard: %s, id: %"PRIu64"\n",
+ ap->standard, standard.id);
+ if (ioctl(s->fd, VIDIOC_S_STD, &standard.id) < 0) {
+ av_log(s1, AV_LOG_ERROR, "The V4L2 driver ioctl set standard(%s) failed\n",
+ ap->standard);
+ return AVERROR_IO;
+ }
+
+ return 0;
+}
+
static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
struct video_data *s = s1->priv_data;
@@ -389,7 +480,6 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
int width, height;
int res, frame_rate, frame_rate_base;
uint32_t desired_format, capabilities;
- const char *video_device;
if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
av_log(s1, AV_LOG_ERROR, "Missing/Wrong parameters\n");
@@ -410,7 +500,7 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
st = av_new_stream(s1, 0);
if (!st) {
- return -ENOMEM;
+ return AVERROR(ENOMEM);
}
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
@@ -419,12 +509,8 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
s->frame_rate = frame_rate;
s->frame_rate_base = frame_rate_base;
- video_device = ap->device;
- if (!video_device) {
- video_device = "/dev/video";
- }
capabilities = 0;
- s->fd = device_open(video_device, &capabilities);
+ s->fd = device_open(s1, &capabilities);
if (s->fd < 0) {
av_free(st);
@@ -433,13 +519,13 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
av_log(s1, AV_LOG_INFO, "[%d]Capabilities: %x\n", s->fd, capabilities);
desired_format = fmt_ff2v4l(ap->pix_fmt);
- if (desired_format == 0 || (device_init(s->fd, &width, &height, desired_format) < 0)) {
+ if (desired_format == 0 || (device_init(s1, &width, &height, desired_format) < 0)) {
int i, done;
done = 0; i = 0;
while (!done) {
desired_format = fmt_conversion_table[i].v4l2_fmt;
- if (device_init(s->fd, &width, &height, desired_format) < 0) {
+ if (device_init(s1, &width, &height, desired_format) < 0) {
desired_format = 0;
i++;
} else {
@@ -459,17 +545,20 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
}
s->frame_format = desired_format;
+ if( v4l2_set_parameters( s1, ap ) < 0 )
+ return AVERROR_IO;
+
st->codec->pix_fmt = fmt_v4l2ff(desired_format);
s->frame_size = avpicture_get_size(st->codec->pix_fmt, width, height);
if (capabilities & V4L2_CAP_STREAMING) {
s->io_method = io_mmap;
- res = mmap_init(s);
+ res = mmap_init(s1);
if (res == 0) {
- res = mmap_start(s);
+ res = mmap_start(s1);
}
} else {
s->io_method = io_read;
- res = read_init(s);
+ res = read_init(s1);
}
if (res < 0) {
close(s->fd);
@@ -495,13 +584,14 @@ static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
struct video_data *s = s1->priv_data;
int res;
- if (av_new_packet(pkt, s->frame_size) < 0)
- return AVERROR_IO;
-
if (s->io_method == io_mmap) {
- res = mmap_read_frame(s, pkt->data, &pkt->pts);
+ av_init_packet(pkt);
+ res = mmap_read_frame(s1, pkt);
} else if (s->io_method == io_read) {
- res = read_frame(s, pkt->data, &pkt->pts);
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+
+ res = read_frame(s1, pkt);
} else {
return AVERROR_IO;
}
diff --git a/contrib/ffmpeg/libavformat/voc.c b/contrib/ffmpeg/libavformat/voc.c
index 329f07739..97b73d163 100644
--- a/contrib/ffmpeg/libavformat/voc.c
+++ b/contrib/ffmpeg/libavformat/voc.c
@@ -23,7 +23,7 @@
const unsigned char voc_magic[21] = "Creative Voice File\x1A";
-const CodecTag voc_codec_tags[] = {
+const AVCodecTag voc_codec_tags[] = {
{CODEC_ID_PCM_U8, 0x00},
{CODEC_ID_ADPCM_SBPRO_4, 0x01},
{CODEC_ID_ADPCM_SBPRO_3, 0x02},
diff --git a/contrib/ffmpeg/libavformat/voc.h b/contrib/ffmpeg/libavformat/voc.h
index 16adb0078..9b2bb8cce 100644
--- a/contrib/ffmpeg/libavformat/voc.h
+++ b/contrib/ffmpeg/libavformat/voc.h
@@ -43,7 +43,7 @@ typedef enum voc_type {
} voc_type_t;
extern const unsigned char voc_magic[21];
-extern const CodecTag voc_codec_tags[];
+extern const AVCodecTag voc_codec_tags[];
int voc_get_packet(AVFormatContext *s, AVPacket *pkt,
AVStream *st, int max_size);
diff --git a/contrib/ffmpeg/libavformat/vocdec.c b/contrib/ffmpeg/libavformat/vocdec.c
index 6a7869227..85d304dff 100644
--- a/contrib/ffmpeg/libavformat/vocdec.c
+++ b/contrib/ffmpeg/libavformat/vocdec.c
@@ -22,7 +22,6 @@
#include "voc.h"
-static const int voc_max_pkt_size = 2048;
static int voc_probe(AVProbeData *p)
@@ -51,7 +50,7 @@ static int voc_read_header(AVFormatContext *s, AVFormatParameters *ap)
url_fskip(pb, 20);
header_size = get_le16(pb) - 22;
if (header_size != 4) {
- av_log(s, AV_LOG_ERROR, "unkown header size: %d\n", header_size);
+ av_log(s, AV_LOG_ERROR, "unknown header size: %d\n", header_size);
return AVERROR_NOTSUPP;
}
url_fskip(pb, header_size);
@@ -128,7 +127,7 @@ voc_get_packet(AVFormatContext *s, AVPacket *pkt, AVStream *st, int max_size)
dec->bit_rate = dec->sample_rate * dec->bits_per_sample;
if (max_size <= 0)
- max_size = voc_max_pkt_size;
+ max_size = 2048;
size = FFMIN(voc->remaining_size, max_size);
voc->remaining_size -= size;
return av_get_packet(pb, pkt, size);
@@ -152,4 +151,5 @@ AVInputFormat voc_demuxer = {
voc_read_header,
voc_read_packet,
voc_read_close,
+ .codec_tag=(const AVCodecTag*[]){voc_codec_tags, 0},
};
diff --git a/contrib/ffmpeg/libavformat/vocenc.c b/contrib/ffmpeg/libavformat/vocenc.c
index ed304883d..6a07c92dd 100644
--- a/contrib/ffmpeg/libavformat/vocenc.c
+++ b/contrib/ffmpeg/libavformat/vocenc.c
@@ -51,28 +51,26 @@ static int voc_write_packet(AVFormatContext *s, AVPacket *pkt)
ByteIOContext *pb = &s->pb;
if (!voc->param_written) {
- int format = codec_get_tag(voc_codec_tags, enc->codec_id);
-
- if (format > 0xFF) {
+ if (enc->codec_tag > 0xFF) {
put_byte(pb, VOC_TYPE_NEW_VOICE_DATA);
put_le24(pb, pkt->size + 12);
put_le32(pb, enc->sample_rate);
put_byte(pb, enc->bits_per_sample);
put_byte(pb, enc->channels);
- put_le16(pb, format);
+ put_le16(pb, enc->codec_tag);
put_le32(pb, 0);
} else {
if (s->streams[0]->codec->channels > 1) {
put_byte(pb, VOC_TYPE_EXTENDED);
put_le24(pb, 4);
put_le16(pb, 65536-256000000/(enc->sample_rate*enc->channels));
- put_byte(pb, format);
+ put_byte(pb, enc->codec_tag);
put_byte(pb, enc->channels - 1);
}
put_byte(pb, VOC_TYPE_VOICE_DATA);
put_le24(pb, pkt->size + 2);
put_byte(pb, 256 - 1000000 / enc->sample_rate);
- put_byte(pb, format);
+ put_byte(pb, enc->codec_tag);
}
voc->param_written = 1;
} else {
@@ -101,4 +99,5 @@ AVOutputFormat voc_muxer = {
voc_write_header,
voc_write_packet,
voc_write_trailer,
+ .codec_tag=(const AVCodecTag*[]){voc_codec_tags, 0},
};
diff --git a/contrib/ffmpeg/libavformat/wav.c b/contrib/ffmpeg/libavformat/wav.c
index 7fb982349..0699bec70 100644
--- a/contrib/ffmpeg/libavformat/wav.c
+++ b/contrib/ffmpeg/libavformat/wav.c
@@ -25,6 +25,9 @@
typedef struct {
offset_t data;
offset_t data_end;
+ int64_t minpts;
+ int64_t maxpts;
+ int last_duration;
} WAVContext;
#ifdef CONFIG_MUXERS
@@ -32,7 +35,7 @@ static int wav_write_header(AVFormatContext *s)
{
WAVContext *wav = s->priv_data;
ByteIOContext *pb = &s->pb;
- offset_t fmt;
+ offset_t fmt, fact;
put_tag(pb, "RIFF");
put_le32(pb, 0); /* file length */
@@ -46,7 +49,16 @@ static int wav_write_header(AVFormatContext *s)
}
end_tag(pb, fmt);
+ if(s->streams[0]->codec->codec_tag != 0x01 /* hence for all other than PCM */
+ && !url_is_streamed(&s->pb)) {
+ fact = start_tag(pb, "fact");
+ put_le32(pb, 0);
+ end_tag(pb, fact);
+ }
+
av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+ wav->maxpts = wav->last_duration = 0;
+ wav->minpts = INT64_MAX;
/* data header */
wav->data = start_tag(pb, "data");
@@ -59,7 +71,14 @@ static int wav_write_header(AVFormatContext *s)
static int wav_write_packet(AVFormatContext *s, AVPacket *pkt)
{
ByteIOContext *pb = &s->pb;
+ WAVContext *wav = s->priv_data;
put_buffer(pb, pkt->data, pkt->size);
+ if(pkt->pts != AV_NOPTS_VALUE) {
+ wav->minpts = FFMIN(wav->minpts, pkt->pts);
+ wav->maxpts = FFMAX(wav->maxpts, pkt->pts);
+ wav->last_duration = pkt->duration;
+ } else
+ av_log(s, AV_LOG_ERROR, "wav_write_packet: NOPTS\n");
return 0;
}
@@ -79,6 +98,18 @@ static int wav_write_trailer(AVFormatContext *s)
url_fseek(pb, file_size, SEEK_SET);
put_flush_packet(pb);
+
+ if(s->streams[0]->codec->codec_tag != 0x01) {
+ /* Update num_samps in fact chunk */
+ int number_of_samples;
+ number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration,
+ s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num,
+ s->streams[0]->time_base.den);
+ url_fseek(pb, wav->data-12, SEEK_SET);
+ put_le32(pb, number_of_samples);
+ url_fseek(pb, file_size, SEEK_SET);
+ put_flush_packet(pb);
+ }
}
return 0;
}
@@ -188,13 +219,11 @@ static int wav_read_packet(AVFormatContext *s,
size = (size / st->codec->block_align) * st->codec->block_align;
}
size= FFMIN(size, left);
- if (av_new_packet(pkt, size))
+ ret= av_get_packet(&s->pb, pkt, size);
+ if (ret <= 0)
return AVERROR_IO;
pkt->stream_index = 0;
- ret = get_buffer(&s->pb, pkt->data, pkt->size);
- if (ret < 0)
- av_free_packet(pkt);
/* note: we need to modify the packet size here to handle the last
packet */
pkt->size = ret;
@@ -235,6 +264,8 @@ AVInputFormat wav_demuxer = {
wav_read_packet,
wav_read_close,
wav_read_seek,
+ .flags= AVFMT_GENERIC_INDEX,
+ .codec_tag= (const AVCodecTag*[]){codec_wav_tags, 0},
};
#endif
#ifdef CONFIG_WAV_MUXER
@@ -249,5 +280,6 @@ AVOutputFormat wav_muxer = {
wav_write_header,
wav_write_packet,
wav_write_trailer,
+ .codec_tag= (const AVCodecTag*[]){codec_wav_tags, 0},
};
#endif
diff --git a/contrib/ffmpeg/libavformat/wc3movie.c b/contrib/ffmpeg/libavformat/wc3movie.c
index 6b3242797..3e58a1bba 100644
--- a/contrib/ffmpeg/libavformat/wc3movie.c
+++ b/contrib/ffmpeg/libavformat/wc3movie.c
@@ -115,8 +115,8 @@ static int wc3_probe(AVProbeData *p)
if (p->buf_size < 12)
return 0;
- if ((LE_32(&p->buf[0]) != FORM_TAG) ||
- (LE_32(&p->buf[8]) != MOVE_TAG))
+ if ((AV_RL32(&p->buf[0]) != FORM_TAG) ||
+ (AV_RL32(&p->buf[8]) != MOVE_TAG))
return 0;
return AVPROBE_SCORE_MAX;
@@ -153,8 +153,8 @@ static int wc3_read_header(AVFormatContext *s,
if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
WC3_PREAMBLE_SIZE)
return AVERROR_IO;
- fourcc_tag = LE_32(&preamble[0]);
- size = (BE_32(&preamble[4]) + 1) & (~1);
+ fourcc_tag = AV_RL32(&preamble[0]);
+ size = (AV_RB32(&preamble[4]) + 1) & (~1);
do {
switch (fourcc_tag) {
@@ -170,7 +170,7 @@ static int wc3_read_header(AVFormatContext *s,
url_fseek(pb, 8, SEEK_CUR);
if ((ret = get_buffer(pb, preamble, 4)) != 4)
return AVERROR_IO;
- wc3->palette_count = LE_32(&preamble[0]);
+ wc3->palette_count = AV_RL32(&preamble[0]);
if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE){
wc3->palette_count= 0;
return -1;
@@ -193,8 +193,8 @@ static int wc3_read_header(AVFormatContext *s,
if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
WC3_PREAMBLE_SIZE)
return AVERROR_IO;
- wc3->width = LE_32(&preamble[0]);
- wc3->height = LE_32(&preamble[4]);
+ wc3->width = AV_RL32(&preamble[0]);
+ wc3->height = AV_RL32(&preamble[4]);
break;
case PALT_TAG:
@@ -229,9 +229,9 @@ static int wc3_read_header(AVFormatContext *s,
if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
WC3_PREAMBLE_SIZE)
return AVERROR_IO;
- fourcc_tag = LE_32(&preamble[0]);
+ fourcc_tag = AV_RL32(&preamble[0]);
/* chunk sizes are 16-bit aligned */
- size = (BE_32(&preamble[4]) + 1) & (~1);
+ size = (AV_RB32(&preamble[4]) + 1) & (~1);
} while (fourcc_tag != BRCH_TAG);
@@ -291,9 +291,9 @@ static int wc3_read_packet(AVFormatContext *s,
WC3_PREAMBLE_SIZE)
ret = AVERROR_IO;
- fourcc_tag = LE_32(&preamble[0]);
+ fourcc_tag = AV_RL32(&preamble[0]);
/* chunk sizes are 16-bit aligned */
- size = (BE_32(&preamble[4]) + 1) & (~1);
+ size = (AV_RB32(&preamble[4]) + 1) & (~1);
switch (fourcc_tag) {
@@ -305,7 +305,7 @@ static int wc3_read_packet(AVFormatContext *s,
/* load up new palette */
if ((ret = get_buffer(pb, preamble, 4)) != 4)
return AVERROR_IO;
- palette_number = LE_32(&preamble[0]);
+ palette_number = AV_RL32(&preamble[0]);
if (palette_number >= wc3->palette_count)
return AVERROR_INVALIDDATA;
base_palette_index = palette_number * PALETTE_COUNT * 3;
diff --git a/contrib/ffmpeg/libavformat/westwood.c b/contrib/ffmpeg/libavformat/westwood.c
index 5c42e3b55..bed2f0d14 100644
--- a/contrib/ffmpeg/libavformat/westwood.c
+++ b/contrib/ffmpeg/libavformat/westwood.c
@@ -101,7 +101,7 @@ static int wsaud_probe(AVProbeData *p)
return 0;
/* check sample rate */
- field = LE_16(&p->buf[0]);
+ field = AV_RL16(&p->buf[0]);
if ((field < 8000) || (field > 48000))
return 0;
@@ -124,7 +124,7 @@ static int wsaud_read_header(AVFormatContext *s,
if (get_buffer(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE)
return AVERROR_IO;
- wsaud->audio_samplerate = LE_16(&header[0]);
+ wsaud->audio_samplerate = AV_RL16(&header[0]);
if (header[11] == 99)
wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS;
else
@@ -170,10 +170,10 @@ static int wsaud_read_packet(AVFormatContext *s,
return AVERROR_IO;
/* validate the chunk */
- if (LE_32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
+ if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
return AVERROR_INVALIDDATA;
- chunk_size = LE_16(&preamble[0]);
+ chunk_size = AV_RL16(&preamble[0]);
ret= av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR_IO;
@@ -202,8 +202,8 @@ static int wsvqa_probe(AVProbeData *p)
return 0;
/* check for the VQA signatures */
- if ((BE_32(&p->buf[0]) != FORM_TAG) ||
- (BE_32(&p->buf[8]) != WVQA_TAG))
+ if ((AV_RB32(&p->buf[0]) != FORM_TAG) ||
+ (AV_RB32(&p->buf[8]) != WVQA_TAG))
return 0;
return AVPROBE_SCORE_MAX;
@@ -224,7 +224,7 @@ static int wsvqa_read_header(AVFormatContext *s,
st = av_new_stream(s, 0);
if (!st)
return AVERROR_NOMEM;
- av_set_pts_info(st, 33, 1, 90000);
+ av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
wsvqa->video_stream_index = st->index;
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_WS_VQA;
@@ -242,25 +242,22 @@ static int wsvqa_read_header(AVFormatContext *s,
av_free(st->codec->extradata);
return AVERROR_IO;
}
- st->codec->width = LE_16(&header[6]);
- st->codec->height = LE_16(&header[8]);
-
- st->codec->time_base.num = 1;
- st->codec->time_base.den = VQA_FRAMERATE;
+ st->codec->width = AV_RL16(&header[6]);
+ st->codec->height = AV_RL16(&header[8]);
/* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
- if (LE_16(&header[24]) || (LE_16(&header[0]) == 1)) {
+ if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) {
st = av_new_stream(s, 0);
if (!st)
return AVERROR_NOMEM;
- av_set_pts_info(st, 33, 1, 90000);
+ av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
st->codec->codec_type = CODEC_TYPE_AUDIO;
- if (LE_16(&header[0]) == 1)
+ if (AV_RL16(&header[0]) == 1)
st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
else
st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
st->codec->codec_tag = 0; /* no tag */
- st->codec->sample_rate = LE_16(&header[24]);
+ st->codec->sample_rate = AV_RL16(&header[24]);
if (!st->codec->sample_rate)
st->codec->sample_rate = 22050;
st->codec->channels = header[26];
@@ -284,8 +281,8 @@ static int wsvqa_read_header(AVFormatContext *s,
av_free(st->codec->extradata);
return AVERROR_IO;
}
- chunk_tag = BE_32(&scratch[0]);
- chunk_size = BE_32(&scratch[4]);
+ chunk_tag = AV_RB32(&scratch[0]);
+ chunk_size = AV_RB32(&scratch[4]);
/* catch any unknown header tags, for curiousity */
switch (chunk_tag) {
@@ -326,8 +323,8 @@ static int wsvqa_read_packet(AVFormatContext *s,
int skip_byte;
while (get_buffer(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) {
- chunk_type = BE_32(&preamble[0]);
- chunk_size = BE_32(&preamble[4]);
+ chunk_type = AV_RB32(&preamble[0]);
+ chunk_size = AV_RB32(&preamble[4]);
skip_byte = chunk_size & 0x01;
if ((chunk_type == SND1_TAG) || (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) {
@@ -342,25 +339,14 @@ static int wsvqa_read_packet(AVFormatContext *s,
if (chunk_type == SND2_TAG) {
pkt->stream_index = wsvqa->audio_stream_index;
-
- pkt->pts = 90000;
- pkt->pts *= wsvqa->audio_frame_counter;
- pkt->pts /= wsvqa->audio_samplerate;
-
/* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
wsvqa->audio_frame_counter += (chunk_size * 2) / wsvqa->audio_channels;
} else if(chunk_type == SND1_TAG) {
pkt->stream_index = wsvqa->audio_stream_index;
-
- pkt->pts = 90000;
- pkt->pts *= wsvqa->audio_frame_counter;
- pkt->pts /= wsvqa->audio_samplerate;
-
/* unpacked size is stored in header */
- wsvqa->audio_frame_counter += LE_16(pkt->data) / wsvqa->audio_channels;
+ wsvqa->audio_frame_counter += AV_RL16(pkt->data) / wsvqa->audio_channels;
} else {
pkt->stream_index = wsvqa->video_stream_index;
- pkt->pts = wsvqa->video_pts;
wsvqa->video_pts += VQA_VIDEO_PTS_INC;
}
/* stay on 16-bit alignment */
diff --git a/contrib/ffmpeg/libavformat/wv.c b/contrib/ffmpeg/libavformat/wv.c
index 2de07fe3f..ed1eefeea 100644
--- a/contrib/ffmpeg/libavformat/wv.c
+++ b/contrib/ffmpeg/libavformat/wv.c
@@ -50,8 +50,10 @@ static const int wv_rates[16] = {
typedef struct{
uint32_t blksize, flags;
int rate, chan, bpp;
+ uint32_t samples, soff;
int block_parsed;
uint8_t extra[WV_EXTRA_SIZE];
+ int64_t pos;
}WVContext;
static int wv_probe(AVProbeData *p)
@@ -73,6 +75,7 @@ static int wv_read_block_header(AVFormatContext *ctx, ByteIOContext *pb)
int size;
int rate, bpp, chan;
+ wc->pos = url_ftell(pb);
tag = get_le32(pb);
if (tag != MKTAG('w', 'v', 'p', 'k'))
return -1;
@@ -89,10 +92,10 @@ static int wv_read_block_header(AVFormatContext *ctx, ByteIOContext *pb)
}
get_byte(pb); // track no
get_byte(pb); // track sub index
- get_le32(pb); // total samples in file
- get_le32(pb); // offset in samples of current block
+ wc->samples = get_le32(pb); // total samples in file
+ wc->soff = get_le32(pb); // offset in samples of current block
get_buffer(pb, wc->extra, WV_EXTRA_SIZE);
- wc->flags = LE_32(wc->extra + 4);
+ wc->flags = AV_RL32(wc->extra + 4);
//parse flags
if(wc->flags & WV_FLOAT){
av_log(ctx, AV_LOG_ERROR, "Floating point data is not supported\n");
@@ -155,6 +158,8 @@ static int wv_read_header(AVFormatContext *s,
st->codec->sample_rate = wc->rate;
st->codec->bits_per_sample = wc->bpp;
av_set_pts_info(st, 64, 1, wc->rate);
+ s->start_time = 0;
+ s->duration = (int64_t)wc->samples * AV_TIME_BASE / st->codec->sample_rate;
return 0;
}
@@ -165,7 +170,7 @@ static int wv_read_packet(AVFormatContext *s,
int ret;
if (url_feof(&s->pb))
- return -EIO;
+ return AVERROR(EIO);
if(wc->block_parsed){
if(wv_read_block_header(s, &s->pb) < 0)
return -1;
@@ -182,7 +187,8 @@ static int wv_read_packet(AVFormatContext *s,
pkt->stream_index = 0;
wc->block_parsed = 1;
pkt->size = ret + WV_EXTRA_SIZE;
-
+ pkt->pts = wc->soff;
+ av_add_index_entry(s->streams[0], wc->pos, pkt->pts, 0, 0, AVINDEX_KEYFRAME);
return 0;
}
@@ -191,6 +197,38 @@ static int wv_read_close(AVFormatContext *s)
return 0;
}
+static int wv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ WVContext *wc = s->priv_data;
+ AVPacket pkt1, *pkt = &pkt1;
+ int ret;
+ int index = av_index_search_timestamp(st, timestamp, flags);
+ int64_t pos, pts;
+
+ /* if found, seek there */
+ if (index >= 0){
+ wc->block_parsed = 1;
+ url_fseek(&s->pb, st->index_entries[index].pos, SEEK_SET);
+ return 0;
+ }
+ /* if timestamp is out of bounds, return error */
+ if(timestamp < 0 || timestamp >= s->duration)
+ return -1;
+
+ pos = url_ftell(&s->pb);
+ do{
+ ret = av_read_frame(s, pkt);
+ if (ret < 0){
+ url_fseek(&s->pb, pos, SEEK_SET);
+ return -1;
+ }
+ pts = pkt->pts;
+ av_free_packet(pkt);
+ }while(pts < timestamp);
+ return 0;
+}
+
AVInputFormat wv_demuxer = {
"wv",
"WavPack",
@@ -199,4 +237,5 @@ AVInputFormat wv_demuxer = {
wv_read_header,
wv_read_packet,
wv_read_close,
+ wv_read_seek,
};
diff --git a/contrib/ffmpeg/libavformat/x11grab.c b/contrib/ffmpeg/libavformat/x11grab.c
new file mode 100644
index 000000000..8916d799a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/x11grab.c
@@ -0,0 +1,529 @@
+/*
+ * X11 video grab interface
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg integration:
+ * Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
+ * Edouard Gomez <ed.gomez@free.fr>
+ *
+ * This file contains code from grab.c:
+ * Copyright (c) 2000-2001 Fabrice Bellard
+ *
+ * This file contains code from the xvidcap project:
+ * Copyright (C) 1997-1998 Rasca, Berlin
+ * 2003-2004 Karl H. Beckers, Frankfurt
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file x11grab.c
+ * X11 frame device demuxer by Clemens Fruhwirth <clemens@endorphin.org>
+ * and Edouard Gomez <ed.gomez@free.fr>.
+ */
+
+#include "avformat.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#define _LINUX_TIME_H 1
+#include <time.h>
+#include <X11/X.h>
+#include <X11/Xlib.h>
+#include <X11/Xlibint.h>
+#include <X11/Xproto.h>
+#include <X11/Xutil.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <X11/extensions/XShm.h>
+
+/**
+ * X11 Device Demuxer context
+ */
+typedef struct x11_grab_s
+{
+ int frame_size; /**< Size in bytes of a grabbed frame */
+ AVRational time_base; /**< Time base */
+ int64_t time_frame; /**< Current time */
+
+ int height; /**< Height of the grab frame */
+ int width; /**< Width of the grab frame */
+ int x_off; /**< Horizontal top-left corner coordinate */
+ int y_off; /**< Vertical top-left corner coordinate */
+
+ Display *dpy; /**< X11 display from which x11grab grabs frames */
+ XImage *image; /**< X11 image holding the grab */
+ int use_shm; /**< !0 when using XShm extension */
+ XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */
+ int mouse_warning_shown;
+} x11_grab_t;
+
+/**
+ * Initializes the x11 grab device demuxer (public device demuxer API).
+ *
+ * @param s1 Context from avformat core
+ * @param ap Parameters from avformat core
+ * @return <ul>
+ * <li>ENOMEM no memory left</li>
+ * <li>AVERROR_IO other failure case</li>
+ * <li>0 success</li>
+ * </ul>
+ */
+static int
+x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ x11_grab_t *x11grab = s1->priv_data;
+ Display *dpy;
+ AVStream *st = NULL;
+ int input_pixfmt;
+ XImage *image;
+ int x_off = 0;
+ int y_off = 0;
+ int use_shm;
+ char *param, *offset;
+
+ param = av_strdup(s1->filename);
+ offset = strchr(param, '+');
+ if (offset) {
+ sscanf(offset, "%d,%d", &x_off, &y_off);
+ *offset= 0;
+ }
+
+ av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, param, x_off, y_off, ap->width, ap->height);
+
+ dpy = XOpenDisplay(param);
+ if(!dpy) {
+ av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
+ return AVERROR_IO;
+ }
+
+ if (!ap || ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "AVParameters don't have any video size. Use -s.\n");
+ return AVERROR_IO;
+ }
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return AVERROR(ENOMEM);
+ }
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ use_shm = XShmQueryExtension(dpy);
+ av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not");
+
+ if(use_shm) {
+ int scr = XDefaultScreen(dpy);
+ image = XShmCreateImage(dpy,
+ DefaultVisual(dpy, scr),
+ DefaultDepth(dpy, scr),
+ ZPixmap,
+ NULL,
+ &x11grab->shminfo,
+ ap->width, ap->height);
+ x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
+ image->bytes_per_line * image->height,
+ IPC_CREAT|0777);
+ if (x11grab->shminfo.shmid == -1) {
+ av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
+ return AVERROR(ENOMEM);
+ }
+ x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
+ x11grab->shminfo.readOnly = False;
+
+ if (!XShmAttach(dpy, &x11grab->shminfo)) {
+ av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
+ /* needs some better error subroutine :) */
+ return AVERROR_IO;
+ }
+ } else {
+ image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)),
+ x_off,y_off,
+ ap->width,ap->height,
+ AllPlanes, ZPixmap);
+ }
+
+ switch (image->bits_per_pixel) {
+ case 8:
+ av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
+ input_pixfmt = PIX_FMT_PAL8;
+ break;
+ case 16:
+ if ( image->red_mask == 0xf800 &&
+ image->green_mask == 0x07e0 &&
+ image->blue_mask == 0x001f ) {
+ av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n");
+ input_pixfmt = PIX_FMT_RGB565;
+ } else if (image->red_mask == 0x7c00 &&
+ image->green_mask == 0x03e0 &&
+ image->blue_mask == 0x001f ) {
+ av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n");
+ input_pixfmt = PIX_FMT_RGB555;
+ } else {
+ av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
+ av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
+ return AVERROR_IO;
+ }
+ break;
+ case 24:
+ if ( image->red_mask == 0xff0000 &&
+ image->green_mask == 0x00ff00 &&
+ image->blue_mask == 0x0000ff ) {
+ input_pixfmt = PIX_FMT_BGR24;
+ } else if ( image->red_mask == 0x0000ff &&
+ image->green_mask == 0x00ff00 &&
+ image->blue_mask == 0xff0000 ) {
+ input_pixfmt = PIX_FMT_RGB24;
+ } else {
+ av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
+ av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
+ return AVERROR_IO;
+ }
+ break;
+ case 32:
+#if 0
+ GetColorInfo (image, &c_info);
+ if ( c_info.alpha_mask == 0xff000000 && image->green_mask == 0x0000ff00) {
+ /* byte order is relevant here, not endianness
+ * endianness is handled by avcodec, but atm no such thing
+ * as having ABGR, instead of ARGB in a word. Since we
+ * need this for Solaris/SPARC, but need to do the conversion
+ * for every frame we do it outside of this loop, cf. below
+ * this matches both ARGB32 and ABGR32 */
+ input_pixfmt = PIX_FMT_ARGB32;
+ } else {
+ av_log(s1, AV_LOG_ERROR,"image depth %i not supported ... aborting\n", image->bits_per_pixel);
+ return AVERROR_IO;
+ }
+#endif
+ input_pixfmt = PIX_FMT_RGB32;
+ break;
+ default:
+ av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
+ return -1;
+ }
+
+ x11grab->frame_size = ap->width * ap->height * image->bits_per_pixel/8;
+ x11grab->dpy = dpy;
+ x11grab->width = ap->width;
+ x11grab->height = ap->height;
+ x11grab->time_base = ap->time_base;
+ x11grab->time_frame = av_gettime() / av_q2d(ap->time_base);
+ x11grab->x_off = x_off;
+ x11grab->y_off = y_off;
+ x11grab->image = image;
+ x11grab->use_shm = use_shm;
+ x11grab->mouse_warning_shown = 0;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = ap->width;
+ st->codec->height = ap->height;
+ st->codec->pix_fmt = input_pixfmt;
+ st->codec->time_base = ap->time_base;
+ st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(ap->time_base) * 8;
+
+ return 0;
+}
+
+/**
+ * Get pointer coordinates from X11.
+ *
+ * @param x Integer where horizontal coordinate will be returned
+ * @param y Integer where vertical coordinate will be returned
+ * @param dpy X11 display from where pointer coordinates are retrieved
+ * @param s1 Context used for logging errors if necessary
+ */
+static void
+get_pointer_coordinates(int *x, int *y, Display *dpy, AVFormatContext *s1)
+{
+ Window mrootwindow, childwindow;
+ int dummy;
+
+ mrootwindow = DefaultRootWindow(dpy);
+
+ if (XQueryPointer(dpy, mrootwindow, &mrootwindow, &childwindow,
+ x, y, &dummy, &dummy, (unsigned int*)&dummy)) {
+ } else {
+ x11_grab_t *s = s1->priv_data;
+ if (!s->mouse_warning_shown) {
+ av_log(s1, AV_LOG_INFO, "couldn't find mouse pointer\n");
+ s->mouse_warning_shown = 1;
+ }
+ *x = -1;
+ *y = -1;
+ }
+}
+
+/**
+ * Mouse painting helper function that applies an 'and' and 'or' mask pair to
+ * '*dst' pixel. It actually draws a mouse pointer pixel to grabbed frame.
+ *
+ * @param dst Destination pixel
+ * @param and Part of the mask that must be applied using a bitwise 'and'
+ * operator
+ * @param or Part of the mask that must be applied using a bitwise 'or'
+ * operator
+ * @param bits_per_pixel Bits per pixel used in the grabbed image
+ */
+static void inline
+apply_masks(uint8_t *dst, int and, int or, int bits_per_pixel)
+{
+ switch (bits_per_pixel) {
+ case 32:
+ *(uint32_t*)dst = (*(uint32_t*)dst & and) | or;
+ break;
+ case 16:
+ *(uint16_t*)dst = (*(uint16_t*)dst & and) | or;
+ break;
+ case 8:
+ *dst = !!or;
+ break;
+ }
+}
+
+/**
+ * Paints a mouse pointer in an X11 image.
+ *
+ * @param image Image where to paint the mouse pointer
+ * @param s context used to retrieve original grabbing rectangle
+ * coordinates
+ * @param x Mouse pointer coordinate
+ * @param y Mouse pointer coordinate
+ */
+static void
+paint_mouse_pointer(XImage *image, x11_grab_t *s, int x, int y)
+{
+ /* 16x20x1bpp bitmap for the black channel of the mouse pointer */
+ static const uint16_t const mousePointerBlack[] =
+ {
+ 0x0000, 0x0003, 0x0005, 0x0009, 0x0011,
+ 0x0021, 0x0041, 0x0081, 0x0101, 0x0201,
+ 0x03c1, 0x0049, 0x0095, 0x0093, 0x0120,
+ 0x0120, 0x0240, 0x0240, 0x0380, 0x0000
+ };
+
+ /* 16x20x1bpp bitmap for the white channel of the mouse pointer */
+ static const uint16_t const mousePointerWhite[] =
+ {
+ 0x0000, 0x0000, 0x0002, 0x0006, 0x000e,
+ 0x001e, 0x003e, 0x007e, 0x00fe, 0x01fe,
+ 0x003e, 0x0036, 0x0062, 0x0060, 0x00c0,
+ 0x00c0, 0x0180, 0x0180, 0x0000, 0x0000
+ };
+
+ int x_off = s->x_off;
+ int y_off = s->y_off;
+ int width = s->width;
+ int height = s->height;
+
+ if ( x - x_off >= 0 && x < width + x_off
+ && y - y_off >= 0 && y < height + y_off) {
+ uint8_t *im_data = (uint8_t*)image->data;
+ int bytes_per_pixel;
+ int line;
+ int masks;
+
+ /* Select correct masks and pixel size */
+ if (image->bits_per_pixel == 8) {
+ masks = 1;
+ } else {
+ masks = (image->red_mask|image->green_mask|image->blue_mask);
+ }
+ bytes_per_pixel = image->bits_per_pixel>>3;
+
+ /* Shift to right line */
+ im_data += image->bytes_per_line * (y - y_off);
+ /* Shift to right pixel in the line */
+ im_data += bytes_per_pixel * (x - x_off);
+
+ /* Draw the cursor - proper loop */
+ for (line = 0; line < FFMIN(20, (y_off + height) - y); line++) {
+ uint8_t *cursor = im_data;
+ int column;
+ uint16_t bm_b;
+ uint16_t bm_w;
+
+ bm_b = mousePointerBlack[line];
+ bm_w = mousePointerWhite[line];
+
+ for (column = 0; column < FFMIN(16, (x_off + width) - x); column++) {
+ apply_masks(cursor, ~(masks*(bm_b&1)), masks*(bm_w&1),
+ image->bits_per_pixel);
+ cursor += bytes_per_pixel;
+ bm_b >>= 1;
+ bm_w >>= 1;
+ }
+ im_data += image->bytes_per_line;
+ }
+ }
+}
+
+
+/**
+ * Reads new data in the image structure.
+ *
+ * @param dpy X11 display to grab from
+ * @param d
+ * @param image Image where the grab will be put
+ * @param x Top-Left grabbing rectangle horizontal coordinate
+ * @param y Top-Left grabbing rectangle vertical coordinate
+ * @return 0 if error, !0 if successful
+ */
+static int
+xget_zpixmap(Display *dpy, Drawable d, XImage *image, int x, int y)
+{
+ xGetImageReply rep;
+ xGetImageReq *req;
+ long nbytes;
+
+ if (!image) {
+ return 0;
+ }
+
+ LockDisplay(dpy);
+ GetReq(GetImage, req);
+
+ /* First set up the standard stuff in the request */
+ req->drawable = d;
+ req->x = x;
+ req->y = y;
+ req->width = image->width;
+ req->height = image->height;
+ req->planeMask = (unsigned int)AllPlanes;
+ req->format = ZPixmap;
+
+ if (!_XReply(dpy, (xReply *)&rep, 0, xFalse) || !rep.length) {
+ UnlockDisplay(dpy);
+ SyncHandle();
+ return 0;
+ }
+
+ nbytes = (long)rep.length << 2;
+ _XReadPad(dpy, image->data, nbytes);
+
+ UnlockDisplay(dpy);
+ SyncHandle();
+ return 1;
+}
+
+/**
+ * Grabs a frame from x11 (public device demuxer API).
+ *
+ * @param s1 Context from avformat core
+ * @param pkt Packet holding the brabbed frame
+ * @return frame size in bytes
+ */
+static int
+x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ x11_grab_t *s = s1->priv_data;
+ Display *dpy = s->dpy;
+ XImage *image = s->image;
+ int x_off = s->x_off;
+ int y_off = s->y_off;
+
+ int64_t curtime, delay;
+ struct timespec ts;
+
+ /* Calculate the time of the next frame */
+ s->time_frame += INT64_C(1000000);
+
+ /* wait based on the frame rate */
+ for(;;) {
+ curtime = av_gettime();
+ delay = s->time_frame * av_q2d(s->time_base) - curtime;
+ if (delay <= 0) {
+ if (delay < INT64_C(-1000000) * av_q2d(s->time_base)) {
+ s->time_frame += INT64_C(1000000);
+ }
+ break;
+ }
+ ts.tv_sec = delay / 1000000;
+ ts.tv_nsec = (delay % 1000000) * 1000;
+ nanosleep(&ts, NULL);
+ }
+
+ if (av_new_packet(pkt, s->frame_size) < 0) {
+ return AVERROR_IO;
+ }
+
+ pkt->pts = curtime;
+
+ if(s->use_shm) {
+ if (!XShmGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off, AllPlanes)) {
+ av_log (s1, AV_LOG_INFO, "XShmGetImage() failed\n");
+ }
+ } else {
+ if (!xget_zpixmap(dpy, RootWindow(dpy, DefaultScreen(dpy)), image, x_off, y_off)) {
+ av_log (s1, AV_LOG_INFO, "XGetZPixmap() failed\n");
+ }
+ }
+
+ {
+ int pointer_x, pointer_y;
+ get_pointer_coordinates(&pointer_x, &pointer_y, dpy, s1);
+ paint_mouse_pointer(image, s, pointer_x, pointer_y);
+ }
+
+
+ /* XXX: avoid memcpy */
+ memcpy(pkt->data, image->data, s->frame_size);
+ return s->frame_size;
+}
+
+/**
+ * Closes x11 frame grabber (public device demuxer API).
+ *
+ * @param s1 Context from avformat core
+ * @return 0 success, !0 failure
+ */
+static int
+x11grab_read_close(AVFormatContext *s1)
+{
+ x11_grab_t *x11grab = s1->priv_data;
+
+ /* Detach cleanly from shared mem */
+ if (x11grab->use_shm) {
+ XShmDetach(x11grab->dpy, &x11grab->shminfo);
+ shmdt(x11grab->shminfo.shmaddr);
+ shmctl(x11grab->shminfo.shmid, IPC_RMID, NULL);
+ }
+
+ /* Destroy X11 image */
+ if (x11grab->image) {
+ XDestroyImage(x11grab->image);
+ x11grab->image = NULL;
+ }
+
+ /* Free X11 display */
+ XCloseDisplay(x11grab->dpy);
+ return 0;
+}
+
+/** x11 grabber device demuxer declaration */
+AVInputFormat x11_grab_device_demuxer =
+{
+ "x11grab",
+ "X11grab",
+ sizeof(x11_grab_t),
+ NULL,
+ x11grab_read_header,
+ x11grab_read_packet,
+ x11grab_read_close,
+ .flags = AVFMT_NOFILE,
+};
diff --git a/contrib/ffmpeg/libavformat/yuv.c b/contrib/ffmpeg/libavformat/yuv.c
deleted file mode 100644
index fe52cdea5..000000000
--- a/contrib/ffmpeg/libavformat/yuv.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * .Y.U.V image format
- * Copyright (c) 2003 Fabrice Bellard.
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "avformat.h"
-
-static int sizes[][2] = {
- { 640, 480 },
- { 720, 480 },
- { 720, 576 },
- { 352, 288 },
- { 352, 240 },
- { 160, 128 },
- { 512, 384 },
- { 640, 352 },
- { 640, 240 },
-};
-
-static int infer_size(int *width_ptr, int *height_ptr, int size)
-{
- int i;
-
- for(i=0;i<sizeof(sizes)/sizeof(sizes[0]);i++) {
- if ((sizes[i][0] * sizes[i][1]) == size) {
- *width_ptr = sizes[i][0];
- *height_ptr = sizes[i][1];
- return 0;
- }
- }
- return -1;
-}
-
-static int yuv_read(ByteIOContext *f,
- int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
-{
- ByteIOContext pb1, *pb = &pb1;
- int img_size, ret;
- char fname[1024], *p;
- int size;
- URLContext *h;
- AVImageInfo info1, *info = &info1;
-
- img_size = url_fsize(f);
-
- /* XXX: hack hack */
- h = url_fileno(f);
- url_get_filename(h, fname, sizeof(fname));
-
- if (infer_size(&info->width, &info->height, img_size) < 0) {
- return AVERROR_IO;
- }
- info->pix_fmt = PIX_FMT_YUV420P;
-
- ret = alloc_cb(opaque, info);
- if (ret)
- return ret;
-
- size = info->width * info->height;
-
- p = strrchr(fname, '.');
- if (!p || p[1] != 'Y')
- return AVERROR_IO;
-
- get_buffer(f, info->pict.data[0], size);
-
- p[1] = 'U';
- if (url_fopen(pb, fname, URL_RDONLY) < 0)
- return AVERROR_IO;
-
- get_buffer(pb, info->pict.data[1], size / 4);
- url_fclose(pb);
-
- p[1] = 'V';
- if (url_fopen(pb, fname, URL_RDONLY) < 0)
- return AVERROR_IO;
-
- get_buffer(pb, info->pict.data[2], size / 4);
- url_fclose(pb);
- return 0;
-}
-
-static int yuv_write(ByteIOContext *pb2, AVImageInfo *info)
-{
- ByteIOContext pb1, *pb;
- char fname[1024], *p;
- int i, j, width, height;
- uint8_t *ptr;
- URLContext *h;
- static const char *ext = "YUV";
-
- /* XXX: hack hack */
- h = url_fileno(pb2);
- url_get_filename(h, fname, sizeof(fname));
-
- p = strrchr(fname, '.');
- if (!p || p[1] != 'Y')
- return AVERROR_IO;
-
- width = info->width;
- height = info->height;
-
- for(i=0;i<3;i++) {
- if (i == 1) {
- width >>= 1;
- height >>= 1;
- }
-
- if (i >= 1) {
- pb = &pb1;
- p[1] = ext[i];
- if (url_fopen(pb, fname, URL_WRONLY) < 0)
- return AVERROR_IO;
- } else {
- pb = pb2;
- }
-
- ptr = info->pict.data[i];
- for(j=0;j<height;j++) {
- put_buffer(pb, ptr, width);
- ptr += info->pict.linesize[i];
- }
- put_flush_packet(pb);
- if (i >= 1) {
- url_fclose(pb);
- }
- }
- return 0;
-}
-
-static int yuv_probe(AVProbeData *pd)
-{
- if (match_ext(pd->filename, "Y"))
- return AVPROBE_SCORE_MAX;
- else
- return 0;
-}
-
-AVImageFormat yuv_image_format = {
- "yuv",
- "Y",
- yuv_probe,
- yuv_read,
- (1 << PIX_FMT_YUV420P),
- yuv_write,
-};