summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Melanson <mike@multimedia.cx>2003-06-21 03:58:57 +0000
committerMike Melanson <mike@multimedia.cx>2003-06-21 03:58:57 +0000
commit2a06f90f75f6e99af8b39d41e78dc6f6850fe68d (patch)
treee215f9e2dbfef6eea251880179c3f73c6765f489
parent7ec65b9aeaded392195543631da79bcf912382ab (diff)
downloadxine-lib-2a06f90f75f6e99af8b39d41e78dc6f6850fe68d.tar.gz
xine-lib-2a06f90f75f6e99af8b39d41e78dc6f6850fe68d.tar.bz2
major purpose of this update is to import the rearranged MC edge
emulation facilities CVS patchset: 5079 CVS date: 2003/06/21 03:58:57
-rw-r--r--src/libffmpeg/libavcodec/Makefile.am1
-rw-r--r--src/libffmpeg/libavcodec/avcodec.h4
-rw-r--r--src/libffmpeg/libavcodec/dsputil.h2
-rw-r--r--src/libffmpeg/libavcodec/h264.c9
-rw-r--r--src/libffmpeg/libavcodec/huffyuv.c4
-rw-r--r--src/libffmpeg/libavcodec/mjpeg.c439
-rw-r--r--src/libffmpeg/libavcodec/mpeg12.c3
-rw-r--r--src/libffmpeg/libavcodec/mpegvideo.c76
-rw-r--r--src/libffmpeg/libavcodec/mpegvideo.h4
-rw-r--r--src/libffmpeg/libavcodec/ra288.c91
-rw-r--r--src/libffmpeg/libavcodec/svq1_cb.h4
-rw-r--r--src/libffmpeg/libavcodec/wmv2.c6
12 files changed, 520 insertions, 123 deletions
diff --git a/src/libffmpeg/libavcodec/Makefile.am b/src/libffmpeg/libavcodec/Makefile.am
index 17307c0c2..a2f814d62 100644
--- a/src/libffmpeg/libavcodec/Makefile.am
+++ b/src/libffmpeg/libavcodec/Makefile.am
@@ -38,6 +38,7 @@ libavcodec_la_SOURCES = \
mace.c \
mem.c \
mjpeg.c \
+ motion_est.c \
mpeg12.c \
mpegaudiodec.c \
mpegvideo.c \
diff --git a/src/libffmpeg/libavcodec/avcodec.h b/src/libffmpeg/libavcodec/avcodec.h
index c2017706b..6daf668c6 100644
--- a/src/libffmpeg/libavcodec/avcodec.h
+++ b/src/libffmpeg/libavcodec/avcodec.h
@@ -31,6 +31,7 @@ enum CodecID {
CODEC_ID_AC3,
CODEC_ID_MJPEG,
CODEC_ID_MJPEGB,
+ CODEC_ID_LJPEG,
CODEC_ID_MPEG4,
CODEC_ID_RAWVIDEO,
CODEC_ID_MSMPEG4V1,
@@ -98,7 +99,7 @@ enum PixelFormat {
PIX_FMT_BGR24, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
PIX_FMT_YUV422P, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
PIX_FMT_YUV444P, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
- PIX_FMT_RGBA32, ///< always stored in cpu endianness
+ PIX_FMT_RGBA32, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
PIX_FMT_YUV410P, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
PIX_FMT_YUV411P, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
PIX_FMT_RGB565, ///< always stored in cpu endianness
@@ -1226,6 +1227,7 @@ extern AVCodec h263_encoder;
extern AVCodec h263p_encoder;
extern AVCodec rv10_encoder;
extern AVCodec mjpeg_encoder;
+extern AVCodec ljpeg_encoder;
extern AVCodec mpeg4_encoder;
extern AVCodec msmpeg4v1_encoder;
extern AVCodec msmpeg4v2_encoder;
diff --git a/src/libffmpeg/libavcodec/dsputil.h b/src/libffmpeg/libavcodec/dsputil.h
index 190ea10f1..5dddfbe3e 100644
--- a/src/libffmpeg/libavcodec/dsputil.h
+++ b/src/libffmpeg/libavcodec/dsputil.h
@@ -167,7 +167,7 @@ typedef struct DSPContext {
/**
* Halfpel motion compensation with rounding (a+b+1)>>1.
- * this is an array[4][4] of motion compensation funcions for 4
+ * this is an array[4][4] of motion compensation functions for 4
* horizontal blocksizes (2,4,8,16) and the 4 halfpel positions<br>
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ]
* @param block destination into which the result is averaged (a+b+1)>>1
diff --git a/src/libffmpeg/libavcodec/h264.c b/src/libffmpeg/libavcodec/h264.c
index 0df229b53..97af5b95c 100644
--- a/src/libffmpeg/libavcodec/h264.c
+++ b/src/libffmpeg/libavcodec/h264.c
@@ -197,9 +197,10 @@ typedef struct H264Context{
int halfpel_flag;
int thirdpel_flag;
+
int unknown_svq3_flag;
int next_slice_index;
-
+
SPS sps_buffer[MAX_SPS_COUNT];
SPS sps; ///< current sps
@@ -1897,7 +1898,7 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
|| full_my < 0-extra_height
|| full_mx + 16/*FIXME*/ > s->width + extra_width
|| full_my + 16/*FIXME*/ > s->height + extra_height){
- ff_emulated_edge_mc(s, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, s->width, s->height);
+ ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*s->linesize, s->linesize, 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, s->width, s->height);
src_y= s->edge_emu_buffer + 2 + 2*s->linesize;
emu=1;
}
@@ -1910,13 +1911,13 @@ static inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square,
if(s->flags&CODEC_FLAG_GRAY) return;
if(emu){
- ff_emulated_edge_mc(s, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
src_cb= s->edge_emu_buffer;
}
chroma_op(dest_cb, src_cb, s->uvlinesize, chroma_height, mx&7, my&7);
if(emu){
- ff_emulated_edge_mc(s, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, s->uvlinesize, 9, 9/*FIXME*/, (mx>>3), (my>>3), s->width>>1, s->height>>1);
src_cr= s->edge_emu_buffer;
}
chroma_op(dest_cr, src_cr, s->uvlinesize, chroma_height, mx&7, my&7);
diff --git a/src/libffmpeg/libavcodec/huffyuv.c b/src/libffmpeg/libavcodec/huffyuv.c
index 26058b19e..53c0df0c0 100644
--- a/src/libffmpeg/libavcodec/huffyuv.c
+++ b/src/libffmpeg/libavcodec/huffyuv.c
@@ -1124,14 +1124,14 @@ static int encode_end(AVCodecContext *avctx)
return 0;
}
-#endif //CONFIG_ENCODERS
-
static const AVOption huffyuv_options[] =
{
AVOPTION_CODEC_INT("prediction_method", "prediction_method", prediction_method, 0, 2, 0),
AVOPTION_END()
};
+#endif //CONFIG_ENCODERS
+
AVCodec huffyuv_decoder = {
"huffyuv",
CODEC_TYPE_VIDEO,
diff --git a/src/libffmpeg/libavcodec/mjpeg.c b/src/libffmpeg/libavcodec/mjpeg.c
index 22530c9e4..ec8046446 100644
--- a/src/libffmpeg/libavcodec/mjpeg.c
+++ b/src/libffmpeg/libavcodec/mjpeg.c
@@ -27,6 +27,8 @@
*/
//#define DEBUG
+#include <assert.h>
+
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
@@ -36,7 +38,7 @@
#undef TWOMATRIXES
typedef struct MJpegContext {
- uint8_t huff_size_dc_luminance[12];
+ uint8_t huff_size_dc_luminance[12]; //FIXME use array [3] instead of lumi / chrom, for easier addressing
uint16_t huff_code_dc_luminance[12];
uint8_t huff_size_dc_chrominance[12];
uint16_t huff_code_dc_chrominance[12];
@@ -279,6 +281,17 @@ void mjpeg_close(MpegEncContext *s)
av_free(s->mjpeg_ctx);
}
+#define PREDICT(ret, topleft, top, left, predictor)\
+ switch(predictor){\
+ case 1: ret= left; break;\
+ case 2: ret= top; break;\
+ case 3: ret= topleft; break;\
+ case 4: ret= left + top - topleft; break;\
+ case 5: ret= left + ((top - topleft)>>1); break;\
+ case 6: ret= top + ((left - topleft)>>1); break;\
+ case 7: ret= (left + top)>>1; break;\
+ }
+
static inline void put_marker(PutBitContext *p, int code)
{
put_bits(p, 8, 0xff);
@@ -356,7 +369,7 @@ static void jpeg_put_comments(MpegEncContext *s)
int size;
uint8_t *ptr;
- if (s->aspect_ratio_info)
+ if (s->aspect_ratio_info /* && !lossless */)
{
/* JFIF header */
put_marker(p, APP0);
@@ -405,6 +418,8 @@ static void jpeg_put_comments(MpegEncContext *s)
void mjpeg_picture_header(MpegEncContext *s)
{
+ const int lossless= s->avctx->codec_id == CODEC_ID_LJPEG;
+
put_marker(&s->pb, SOI);
if (!s->mjpeg_data_only_frames)
@@ -413,10 +428,13 @@ void mjpeg_picture_header(MpegEncContext *s)
if (s->mjpeg_write_tables) jpeg_table_header(s);
- put_marker(&s->pb, SOF0);
+ put_marker(&s->pb, lossless ? SOF3 : SOF0);
put_bits(&s->pb, 16, 17);
- put_bits(&s->pb, 8, 8); /* 8 bits/component */
+ if(lossless && s->avctx->pix_fmt == PIX_FMT_RGBA32)
+ put_bits(&s->pb, 8, 9); /* 9 bits/component RCT */
+ else
+ put_bits(&s->pb, 8, 8); /* 8 bits/component */
put_bits(&s->pb, 16, s->height);
put_bits(&s->pb, 16, s->width);
put_bits(&s->pb, 8, 3); /* 3 components */
@@ -432,7 +450,7 @@ void mjpeg_picture_header(MpegEncContext *s)
put_bits(&s->pb, 4, s->mjpeg_hsample[1]); /* H factor */
put_bits(&s->pb, 4, s->mjpeg_vsample[1]); /* V factor */
#ifdef TWOMATRIXES
- put_bits(&s->pb, 8, 1); /* select matrix */
+ put_bits(&s->pb, 8, lossless ? 0 : 1); /* select matrix */
#else
put_bits(&s->pb, 8, 0); /* select matrix */
#endif
@@ -442,7 +460,7 @@ void mjpeg_picture_header(MpegEncContext *s)
put_bits(&s->pb, 4, s->mjpeg_hsample[2]); /* H factor */
put_bits(&s->pb, 4, s->mjpeg_vsample[2]); /* V factor */
#ifdef TWOMATRIXES
- put_bits(&s->pb, 8, 1); /* select matrix */
+ put_bits(&s->pb, 8, lossless ? 0 : 1); /* select matrix */
#else
put_bits(&s->pb, 8, 0); /* select matrix */
#endif
@@ -461,15 +479,15 @@ void mjpeg_picture_header(MpegEncContext *s)
/* Cb component */
put_bits(&s->pb, 8, 2); /* index */
put_bits(&s->pb, 4, 1); /* DC huffman table index */
- put_bits(&s->pb, 4, 1); /* AC huffman table index */
+ put_bits(&s->pb, 4, lossless ? 0 : 1); /* AC huffman table index */
/* Cr component */
put_bits(&s->pb, 8, 3); /* index */
put_bits(&s->pb, 4, 1); /* DC huffman table index */
- put_bits(&s->pb, 4, 1); /* AC huffman table index */
+ put_bits(&s->pb, 4, lossless ? 0 : 1); /* AC huffman table index */
- put_bits(&s->pb, 8, 0); /* Ss (not used) */
- put_bits(&s->pb, 8, 63); /* Se (not used) */
+ put_bits(&s->pb, 8, lossless ? s->avctx->prediction_method+1 : 0); /* Ss (not used) */
+ put_bits(&s->pb, 8, lossless ? 0 : 63); /* Se (not used) */
put_bits(&s->pb, 8, 0); /* Ah/Al (not used) */
}
@@ -632,6 +650,146 @@ void mjpeg_encode_mb(MpegEncContext *s,
}
}
+static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
+ MpegEncContext * const s = avctx->priv_data;
+ MJpegContext * const m = s->mjpeg_ctx;
+ AVFrame *pict = data;
+ const int width= s->width;
+ const int height= s->height;
+ AVFrame * const p= (AVFrame*)&s->current_picture;
+ const int predictor= avctx->prediction_method+1;
+
+ init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
+
+ *p = *pict;
+ p->pict_type= FF_I_TYPE;
+ p->key_frame= 1;
+
+ mjpeg_picture_header(s);
+
+ s->header_bits= get_bit_count(&s->pb);
+
+ if(avctx->pix_fmt == PIX_FMT_RGBA32){
+ int x, y, i;
+ const int linesize= p->linesize[0];
+ uint16_t buffer[2048][4];
+ int left[3], top[3], topleft[3];
+
+ for(i=0; i<3; i++){
+ buffer[0][i]= 1 << (9 - 1);
+ }
+
+ for(y = 0; y < height; y++) {
+ const int modified_predictor= y ? 1 : predictor;
+ uint8_t *ptr = p->data[0] + (linesize * y);
+
+ for(i=0; i<3; i++){
+ top[i]= left[i]= topleft[i]= buffer[0][i];
+ }
+ for(x = 0; x < width; x++) {
+ buffer[x][1] = ptr[4*x+0] - ptr[4*x+1] + 0x100;
+ buffer[x][2] = ptr[4*x+2] - ptr[4*x+1] + 0x100;
+ buffer[x][0] = (ptr[4*x+0] + 2*ptr[4*x+1] + ptr[4*x+2])>>2;
+
+ for(i=0;i<3;i++) {
+ int pred, diff;
+
+ PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
+
+ topleft[i]= top[i];
+ top[i]= buffer[x+1][i];
+
+ left[i]= buffer[x][i];
+
+ diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100;
+
+ if(i==0)
+ mjpeg_encode_dc(s, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly
+ else
+ mjpeg_encode_dc(s, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
+ }
+ }
+ }
+ }else{
+ int mb_x, mb_y, i;
+ const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0];
+ const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0];
+
+ for(mb_y = 0; mb_y < mb_height; mb_y++) {
+ for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ if(mb_x==0 || mb_y==0){
+ for(i=0;i<3;i++) {
+ uint8_t *ptr;
+ int x, y, h, v, linesize;
+ h = s->mjpeg_hsample[i];
+ v = s->mjpeg_vsample[i];
+ linesize= p->linesize[i];
+
+ for(y=0; y<v; y++){
+ for(x=0; x<h; x++){
+ int pred;
+
+ ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ if(y==0 && mb_y==0){
+ if(x==0 && mb_x==0){
+ pred= 128;
+ }else{
+ pred= ptr[-1];
+ }
+ }else{
+ if(x==0 && mb_x==0){
+ pred= ptr[-linesize];
+ }else{
+ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
+ }
+ }
+
+ if(i==0)
+ mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly
+ else
+ mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
+ }
+ }
+ }
+ }else{
+ for(i=0;i<3;i++) {
+ uint8_t *ptr;
+ int x, y, h, v, linesize;
+ h = s->mjpeg_hsample[i];
+ v = s->mjpeg_vsample[i];
+ linesize= p->linesize[i];
+
+ for(y=0; y<v; y++){
+ for(x=0; x<h; x++){
+ int pred;
+
+ ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+//printf("%d %d %d %d %8X\n", mb_x, mb_y, x, y, ptr);
+ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
+
+ if(i==0)
+ mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly
+ else
+ mjpeg_encode_dc(s, (int8_t)(*ptr - pred), m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ emms_c();
+
+ mjpeg_picture_trailer(s);
+ s->picture_number++;
+
+ flush_put_bits(&s->pb);
+ return pbBufPtr(&s->pb) - s->pb.buf;
+// return (get_bit_count(&f->pb)+7)/8;
+}
+
+
/******************************************/
/* decoding */
@@ -653,6 +811,11 @@ typedef struct MJpegDecodeContext {
int first_picture; /* true if decoding first picture */
int interlaced; /* true if interlaced */
int bottom_field; /* true if bottom field */
+ int lossless;
+ int rgb;
+ int rct; /* standard rct */
+ int pegasus_rct; /* pegasus reversible colorspace transform */
+ int bits; /* bits per component */
int width, height;
int nb_components;
@@ -810,15 +973,21 @@ static int mjpeg_decode_dht(MJpegDecodeContext *s)
return 0;
}
-static int mjpeg_decode_sof0(MJpegDecodeContext *s)
+static int mjpeg_decode_sof(MJpegDecodeContext *s)
{
int len, nb_components, i, width, height;
/* XXX: verify len field validity */
len = get_bits(&s->gb, 16);
- /* only 8 bits/component accepted */
- if (get_bits(&s->gb, 8) != 8)
+ s->bits= get_bits(&s->gb, 8);
+
+ if(s->pegasus_rct) s->bits=9;
+ if(s->bits==9 && !s->pegasus_rct) s->rct=1; //FIXME ugly
+
+ if (s->bits != 8 && !s->lossless){
+ printf("only 8 bits/component accepted\n");
return -1;
+ }
height = get_bits(&s->gb, 16);
width = get_bits(&s->gb, 16);
dprintf("sof0: picture: %dx%d\n", width, height);
@@ -846,6 +1015,8 @@ static int mjpeg_decode_sof0(MJpegDecodeContext *s)
dprintf("component %d %d:%d id: %d quant:%d\n", i, s->h_count[i],
s->v_count[i], s->component_id[i], s->quant_index[i]);
}
+
+ if(s->v_max==1 && s->h_max==1 && s->lossless==1) s->rgb=1;
/* if different size, realloc/alloc picture */
/* XXX: also check h_count and v_count */
@@ -863,7 +1034,17 @@ static int mjpeg_decode_sof0(MJpegDecodeContext *s)
s->bottom_field = 0;
}
- for(i=0;i<nb_components;i++) {
+ if(s->rgb){
+ int w, h;
+ w = s->width;
+ h = s->height;
+ if (s->interlaced)
+ w *= 2;
+ s->linesize[0] = 4*w;
+ s->current_picture[0] = av_mallocz(4*w * h);
+ s->current_picture[1] = s->current_picture[2] = NULL;
+ }else{
+ for(i=0;i<nb_components;i++) {
int w, h;
w = (s->width + 8 * s->h_max - 1) / (8 * s->h_max);
h = (s->height + 8 * s->v_max - 1) / (8 * s->v_max);
@@ -878,6 +1059,7 @@ static int mjpeg_decode_sof0(MJpegDecodeContext *s)
dprintf("error: no picture buffers allocated\n");
return -1;
}
+ }
}
s->first_picture = 0;
}
@@ -959,7 +1141,7 @@ static int decode_block(MJpegDecodeContext *s, DCTELEM *block,
static int mjpeg_decode_sos(MJpegDecodeContext *s)
{
- int len, nb_components, i, j, n, h, v, ret;
+ int len, nb_components, i, j, n, h, v, ret, point_transform, predictor;
int mb_width, mb_height, mb_x, mb_y, vmax, hmax, index, id;
int comp_index[4];
int dc_index[4];
@@ -967,7 +1149,8 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
int nb_blocks[4];
int h_count[4];
int v_count[4];
-
+ const int block_size= s->lossless ? 1 : 8;
+
/* XXX: verify len field validity */
len = get_bits(&s->gb, 16);
nb_components = get_bits(&s->gb, 8);
@@ -998,6 +1181,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
}
comp_index[i] = index;
+
nb_blocks[i] = s->h_count[index] * s->v_count[index];
h_count[i] = s->h_count[index];
v_count[i] = s->v_count[index];
@@ -1025,29 +1209,178 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
break;
}
}
- skip_bits(&s->gb, 8); /* Ss */
+
+ predictor= get_bits(&s->gb, 8); /* lossless predictor or start of spectral (Ss) */
skip_bits(&s->gb, 8); /* Se */
- skip_bits(&s->gb, 8); /* Ah and Al (each are 4 bits) */
+ skip_bits(&s->gb, 4); /* Ah */
+ point_transform= get_bits(&s->gb, 4); /* Al */
for(i=0;i<nb_components;i++)
s->last_dc[i] = 1024;
if (nb_components > 1) {
/* interleaved stream */
- mb_width = (s->width + s->h_max * 8 - 1) / (s->h_max * 8);
- mb_height = (s->height + s->v_max * 8 - 1) / (s->v_max * 8);
+ mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
+ mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
} else {
h = s->h_max / s->h_count[comp_index[0]];
v = s->v_max / s->v_count[comp_index[0]];
- mb_width = (s->width + h * 8 - 1) / (h * 8);
- mb_height = (s->height + v * 8 - 1) / (v * 8);
+ mb_width = (s->width + h * block_size - 1) / (h * block_size);
+ mb_height = (s->height + v * block_size - 1) / (v * block_size);
nb_blocks[0] = 1;
h_count[0] = 1;
v_count[0] = 1;
}
+
+ if(s->avctx->debug & FF_DEBUG_PICT_INFO)
+ printf("%s %s p:%d >>:%d\n", s->lossless ? "lossless" : "sequencial DCT", s->rgb ? "RGB" : "", predictor, point_transform);
- for(mb_y = 0; mb_y < mb_height; mb_y++) {
+ if(s->lossless){
+ if(s->rgb){
+ uint16_t buffer[2048][4];
+ int left[3], top[3], topleft[3];
+ const int linesize= s->linesize[0];
+ const int mask= (1<<s->bits)-1;
+
+ for(i=0; i<3; i++){
+ buffer[0][i]= 1 << (s->bits + point_transform - 1);
+ }
+ for(mb_y = 0; mb_y < mb_height; mb_y++) {
+ const int modified_predictor= mb_y ? 1 : predictor;
+ uint8_t *ptr = s->current_picture[0] + (linesize * mb_y);
+
+ if (s->interlaced && s->bottom_field)
+ ptr += linesize >> 1;
+
+ for(i=0; i<3; i++){
+ top[i]= left[i]= topleft[i]= buffer[0][i];
+ }
+ for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ if (s->restart_interval && !s->restart_count)
+ s->restart_count = s->restart_interval;
+
+ for(i=0;i<3;i++) {
+ int pred;
+
+ topleft[i]= top[i];
+ top[i]= buffer[mb_x][i];
+
+ PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
+
+ left[i]=
+ buffer[mb_x][i]= mask & (pred + (mjpeg_decode_dc(s, dc_index[i]) << point_transform));
+ }
+
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ }
+ }
+
+ if(s->rct){
+ for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ ptr[4*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200)>>2);
+ ptr[4*mb_x+0] = buffer[mb_x][1] + ptr[4*mb_x+1];
+ ptr[4*mb_x+2] = buffer[mb_x][2] + ptr[4*mb_x+1];
+ }
+ }else if(s->pegasus_rct){
+ for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ ptr[4*mb_x+1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2])>>2);
+ ptr[4*mb_x+0] = buffer[mb_x][1] + ptr[4*mb_x+1];
+ ptr[4*mb_x+2] = buffer[mb_x][2] + ptr[4*mb_x+1];
+ }
+ }else{
+ for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ ptr[4*mb_x+0] = buffer[mb_x][0];
+ ptr[4*mb_x+1] = buffer[mb_x][1];
+ ptr[4*mb_x+2] = buffer[mb_x][2];
+ }
+ }
+ }
+ }else{
+ for(mb_y = 0; mb_y < mb_height; mb_y++) {
+ for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ if (s->restart_interval && !s->restart_count)
+ s->restart_count = s->restart_interval;
+
+ if(mb_x==0 || mb_y==0 || s->interlaced){
+ for(i=0;i<nb_components;i++) {
+ uint8_t *ptr;
+ int x, y, c, linesize;
+ n = nb_blocks[i];
+ c = comp_index[i];
+ h = h_count[i];
+ v = v_count[i];
+ x = 0;
+ y = 0;
+ linesize= s->linesize[c];
+
+ for(j=0; j<n; j++) {
+ int pred;
+
+ ptr = s->current_picture[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ if(y==0 && mb_y==0){
+ if(x==0 && mb_x==0){
+ pred= 128 << point_transform;
+ }else{
+ pred= ptr[-1];
+ }
+ }else{
+ if(x==0 && mb_x==0){
+ pred= ptr[-linesize];
+ }else{
+ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
+ }
+ }
+
+ if (s->interlaced && s->bottom_field)
+ ptr += linesize >> 1;
+ *ptr= pred + (mjpeg_decode_dc(s, dc_index[i]) << point_transform);
+
+ if (++x == h) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }else{
+ for(i=0;i<nb_components;i++) {
+ uint8_t *ptr;
+ int x, y, c, linesize;
+ n = nb_blocks[i];
+ c = comp_index[i];
+ h = h_count[i];
+ v = v_count[i];
+ x = 0;
+ y = 0;
+ linesize= s->linesize[c];
+
+ for(j=0; j<n; j++) {
+ int pred;
+
+ ptr = s->current_picture[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
+ PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
+ *ptr= pred + (mjpeg_decode_dc(s, dc_index[i]) << point_transform);
+ if (++x == h) {
+ x = 0;
+ y++;
+ }
+ }
+ }
+ }
+ if (s->restart_interval && !--s->restart_count) {
+ align_get_bits(&s->gb);
+ skip_bits(&s->gb, 16); /* skip RSTn */
+ }
+ }
+ }
+ }
+ }else{
+ for(mb_y = 0; mb_y < mb_height; mb_y++) {
for(mb_x = 0; mb_x < mb_width; mb_x++) {
+ if (s->restart_interval && !s->restart_count)
+ s->restart_count = s->restart_interval;
+
for(i=0;i<nb_components;i++) {
uint8_t *ptr;
int x, y, c;
@@ -1057,8 +1390,6 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
v = v_count[i];
x = 0;
y = 0;
- if (s->restart_interval && !s->restart_count)
- s->restart_count = s->restart_interval;
for(j=0;j<n;j++) {
memset(s->block, 0, sizeof(s->block));
if (decode_block(s, s->block, i,
@@ -1091,6 +1422,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
}
}
}
+ }
ret = 0;
the_end:
emms_c();
@@ -1123,6 +1455,10 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
id = be2me_32(id);
len -= 6;
+ if(s->avctx->debug & FF_DEBUG_STARTCODE){
+ printf("APPx %8X\n", id);
+ }
+
/* buggy AVID, it puts EOI only at every 10th frame */
/* also this fourcc is used by non-avid files too, it holds some
informations, but it's always present in AVID creates files */
@@ -1135,7 +1471,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
4bytes field_size
4bytes field_size_less_padding
*/
-// s->buggy_avid = 1;
+ s->buggy_avid = 1;
// if (s->first_picture)
// printf("mjpeg: workarounding buggy AVID\n");
s->interlace_polarity = get_bits(&s->gb, 8);
@@ -1204,6 +1540,28 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
len -= 7;
goto out;
}
+
+ if (id == ff_get_fourcc("LJIF")){
+ printf("Pegasus lossless jpeg header found\n");
+ skip_bits(&s->gb, 16); /* version ? */
+ skip_bits(&s->gb, 16); /* unknwon always 0? */
+ skip_bits(&s->gb, 16); /* unknwon always 0? */
+ skip_bits(&s->gb, 16); /* unknwon always 0? */
+ switch( get_bits(&s->gb, 8)){
+ case 1:
+ s->rgb= 1;
+ s->pegasus_rct=0;
+ break;
+ case 2:
+ s->rgb= 1;
+ s->pegasus_rct=1;
+ break;
+ default:
+ printf("unknown colorspace\n");
+ }
+ len -= 9;
+ goto out;
+ }
/* Apple MJPEG-A */
if ((s->start_code == APP1) && (len > (0x28 - 8)))
@@ -1393,6 +1751,9 @@ static int mjpeg_decode_frame(AVCodecContext *avctx,
init_get_bits(&s->gb, buf_ptr, (buf_end - buf_ptr)*8);
s->start_code = start_code;
+ if(s->avctx->debug & FF_DEBUG_STARTCODE){
+ printf("startcode: %X\n", start_code);
+ }
/* process markers */
if (start_code >= 0xd0 && start_code <= 0xd7) {
@@ -1418,7 +1779,13 @@ static int mjpeg_decode_frame(AVCodecContext *avctx,
mjpeg_decode_dht(s);
break;
case SOF0:
- if (mjpeg_decode_sof0(s) < 0)
+ s->lossless=0;
+ if (mjpeg_decode_sof(s) < 0)
+ return -1;
+ break;
+ case SOF3:
+ s->lossless=1;
+ if (mjpeg_decode_sof(s) < 0)
return -1;
break;
case EOI:
@@ -1443,7 +1810,10 @@ eoi_parser:
/* XXX: not complete test ! */
switch((s->h_count[0] << 4) | s->v_count[0]) {
case 0x11:
- avctx->pix_fmt = PIX_FMT_YUV444P;
+ if(s->rgb){
+ avctx->pix_fmt = PIX_FMT_RGBA32;
+ }else
+ avctx->pix_fmt = PIX_FMT_YUV444P;
break;
case 0x21:
avctx->pix_fmt = PIX_FMT_YUV422P;
@@ -1471,7 +1841,6 @@ eoi_parser:
break;
case SOF1:
case SOF2:
- case SOF3:
case SOF5:
case SOF6:
case SOF7:
@@ -1570,7 +1939,7 @@ read_header:
{
init_get_bits(&s->gb, buf+sof_offs, (buf_end - (buf+sof_offs))*8);
s->start_code = SOF0;
- if (mjpeg_decode_sof0(s) < 0)
+ if (mjpeg_decode_sof(s) < 0)
return -1;
}
@@ -1668,3 +2037,13 @@ AVCodec mjpegb_decoder = {
0,
NULL
};
+
+AVCodec ljpeg_encoder = { //FIXME avoid MPV_* lossless jpeg shouldnt need them
+ "ljpeg",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_LJPEG,
+ sizeof(MpegEncContext),
+ MPV_encode_init,
+ encode_picture_lossless,
+ MPV_encode_end,
+};
diff --git a/src/libffmpeg/libavcodec/mpeg12.c b/src/libffmpeg/libavcodec/mpeg12.c
index c31a711c7..c68f8d8a0 100644
--- a/src/libffmpeg/libavcodec/mpeg12.c
+++ b/src/libffmpeg/libavcodec/mpeg12.c
@@ -1990,6 +1990,9 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
+ if (!s1->mpeg_enc_ctx_allocated)
+ return 0;
+
/* end of slice reached */
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
/* end of image */
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c
index d263c46d5..9aa258f95 100644
--- a/src/libffmpeg/libavcodec/mpegvideo.c
+++ b/src/libffmpeg/libavcodec/mpegvideo.c
@@ -528,8 +528,7 @@ int MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i;
-
- avctx->pix_fmt = PIX_FMT_YUV420P;
+ int chroma_h_shift, chroma_v_shift;
s->bit_rate = avctx->bit_rate;
s->bit_rate_tolerance = avctx->bit_rate_tolerance;
@@ -620,22 +619,25 @@ int MPV_encode_init(AVCodecContext *avctx)
s->intra_quant_bias= avctx->intra_quant_bias;
if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
s->inter_quant_bias= avctx->inter_quant_bias;
-
+
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
+
switch(avctx->codec->id) {
case CODEC_ID_MPEG1VIDEO:
s->out_format = FMT_MPEG1;
s->low_delay= 0; //s->max_b_frames ? 0 : 1;
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
break;
+ case CODEC_ID_LJPEG:
case CODEC_ID_MJPEG:
s->out_format = FMT_MJPEG;
s->intra_only = 1; /* force intra only for jpeg */
s->mjpeg_write_tables = 1; /* write all tables */
s->mjpeg_data_only_frames = 0; /* write all the needed headers */
- s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
- s->mjpeg_vsample[1] = 1; /* the only currently supported values */
+ s->mjpeg_vsample[0] = 1<<chroma_v_shift;
+ s->mjpeg_vsample[1] = 1;
s->mjpeg_vsample[2] = 1;
- s->mjpeg_hsample[0] = 2;
+ s->mjpeg_hsample[0] = 1<<chroma_h_shift;
s->mjpeg_hsample[1] = 1;
s->mjpeg_hsample[2] = 1;
if (mjpeg_init(s) < 0)
@@ -761,9 +763,7 @@ int MPV_encode_init(AVCodecContext *avctx)
if (MPV_common_init(s) < 0)
return -1;
-#ifdef CONFIG_ENCODERS_FULL
ff_init_me(s);
-#endif
#ifdef CONFIG_ENCODERS
#ifdef CONFIG_RISKY
@@ -1550,10 +1550,8 @@ int MPV_encode_picture(AVCodecContext *avctx,
if (s->out_format == FMT_MJPEG)
mjpeg_picture_trailer(s);
-#ifdef CONFIG_ENCODERS_FULL
if(s->flags&CODEC_FLAG_PASS1)
ff_write_pass1_stats(s);
-#endif
for(i=0; i<4; i++){
avctx->error[i] += s->current_picture_ptr->error[i];
@@ -1605,7 +1603,7 @@ static inline void gmc1_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
|| src_y + 17 >= s->v_edge_pos){
- ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer;
}
}
@@ -1644,7 +1642,7 @@ static inline void gmc1_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
|| src_y + 9 >= s->v_edge_pos>>1){
- ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
emu=1;
}
@@ -1653,7 +1651,7 @@ static inline void gmc1_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
@@ -1724,12 +1722,22 @@ static inline void gmc_motion(MpegEncContext *s,
s->h_edge_pos>>1, s->v_edge_pos>>1);
}
-
-void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
+/**
+ * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
+ * @param buf destination buffer
+ * @param src source buffer
+ * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
+ * @param block_w width of block
+ * @param block_h height of block
+ * @param src_x x coordinate of the top left sample of the block in the source buffer
+ * @param src_y y coordinate of the top left sample of the block in the source buffer
+ * @param w width of the source buffer
+ * @param h height of the source buffer
+ */
+void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
int src_x, int src_y, int w, int h){
int x, y;
int start_y, start_x, end_y, end_x;
- uint8_t *buf= s->edge_emu_buffer;
if(src_y>= h){
src+= (h-1-src_y)*linesize;
@@ -1825,7 +1833,7 @@ if(s->quarter_sample)
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
|| src_y + (motion_y&1) + h > v_edge_pos){
- ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + src_offset;
emu=1;
@@ -1862,7 +1870,7 @@ if(s->quarter_sample)
offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
ptr = ref_picture[1] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer + (src_offset >> 1);
}
@@ -1870,7 +1878,7 @@ if(s->quarter_sample)
ptr = ref_picture[2] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer + (src_offset >> 1);
}
@@ -1910,7 +1918,7 @@ static inline void qpel_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
|| src_y + (motion_y&3) + h > v_edge_pos){
- ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,
src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + src_offset;
emu=1;
@@ -1960,7 +1968,7 @@ static inline void qpel_motion(MpegEncContext *s,
offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
ptr = ref_picture[1] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer + (src_offset >> 1);
}
@@ -1968,7 +1976,7 @@ static inline void qpel_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer + (src_offset >> 1);
}
@@ -2062,7 +2070,7 @@ static inline void MPV_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
|| src_y + (motion_y&3) + 8 > s->v_edge_pos){
- ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer;
}
}
@@ -2093,7 +2101,7 @@ static inline void MPV_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
|| src_y + (motion_y&1) + 8 > s->v_edge_pos){
- ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer;
}
}
@@ -2128,7 +2136,7 @@ static inline void MPV_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
|| src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
- ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
emu=1;
}
@@ -2137,7 +2145,7 @@ static inline void MPV_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
@@ -2676,7 +2684,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
- ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
ptr= s->edge_emu_buffer;
emu=1;
}
@@ -2708,14 +2716,14 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int wrap_c = s->uvlinesize;
ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
if(emu){
- ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
ptr= s->edge_emu_buffer;
}
s->dsp.get_pixels(s->block[4], ptr, wrap_c);
ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
if(emu){
- ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
ptr= s->edge_emu_buffer;
}
s->dsp.get_pixels(s->block[5], ptr, wrap_c);
@@ -2755,7 +2763,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
}
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
- ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
ptr_y= s->edge_emu_buffer;
emu=1;
}
@@ -2787,12 +2795,12 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
skip_dct[5]= 1;
}else{
if(emu){
- ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
ptr_cb= s->edge_emu_buffer;
}
s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
if(emu){
- ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
ptr_cr= s->edge_emu_buffer;
}
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
@@ -3129,7 +3137,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
/* Estimate motion for every MB */
s->mb_intra=0; //for the rate distoration & bit compare functions
if(s->pict_type != I_TYPE){
-#ifdef CONFIG_ENCODERS_FULL
if(s->pict_type != B_TYPE){
if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
s->me.pre_pass=1;
@@ -3167,7 +3174,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
ff_estimate_p_frame_motion(s, mb_x, mb_y);
}
}
-#endif
}else /* if(s->pict_type == I_TYPE) */{
/* I-Frame */
//FIXME do we need to zero them?
@@ -3202,7 +3208,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
}
-#ifdef CONFIG_ENCODERS_FULL
if(!s->umvplus){
if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
@@ -3227,7 +3232,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
}
}
-#endif
if (s->fixed_qscale)
s->frame_qscale = s->current_picture.quality;
diff --git a/src/libffmpeg/libavcodec/mpegvideo.h b/src/libffmpeg/libavcodec/mpegvideo.h
index c09b6a6f3..5f3c10d24 100644
--- a/src/libffmpeg/libavcodec/mpegvideo.h
+++ b/src/libffmpeg/libavcodec/mpegvideo.h
@@ -708,7 +708,7 @@ void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length);
void ff_clean_intra_table_entries(MpegEncContext *s);
void ff_init_scantable(uint8_t *, ScanTable *st, const uint8_t *src_scantable);
void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
-void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
+void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
int src_x, int src_y, int w, int h);
#define END_NOT_FOUND -100
int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size);
@@ -877,7 +877,7 @@ void ff_wmv2_encode_mb(MpegEncContext * s,
DCTELEM block[6][64],
int motion_x, int motion_y);
-/* mjpegenc.c */
+/* mjpeg.c */
int mjpeg_init(MpegEncContext *s);
void mjpeg_close(MpegEncContext *s);
void mjpeg_encode_mb(MpegEncContext *s,
diff --git a/src/libffmpeg/libavcodec/ra288.c b/src/libffmpeg/libavcodec/ra288.c
index 9bebfa88a..7b6df3bf5 100644
--- a/src/libffmpeg/libavcodec/ra288.c
+++ b/src/libffmpeg/libavcodec/ra288.c
@@ -24,7 +24,7 @@ typedef struct {
float output[40];
float pr1[36];
float pr2[10];
- int phase, phasep;
+ int phase, phasep;
float st1a[111],st1b[37],st1[37];
float st2a[38],st2b[11],st2[11];
@@ -231,12 +231,12 @@ static void unpack(unsigned short *tgt, unsigned char *src, int len)
{
int x,y,z;
int n,temp;
- int buffer[38];
+ int buffer[len];
for (x=0;x<len;tgt[x++]=0)
buffer[x]=9+(x&1);
- for (x=y=z=0;x<38;x++) {
+ for (x=y=z=0;x<len/*was 38*/;x++) {
n=buffer[y]-z;
temp=src[x];
if (n<8) temp&=255>>(8-n);
@@ -386,56 +386,63 @@ static void prodsum(float *tgt, float *src, int len, int n)
}
}
-#ifndef max
-#define max(a,b) ((a)>(b)?(a):(b))
-#endif
+void * decode_block(AVCodecContext * avctx, unsigned char *in, signed short int *out,unsigned len)
+{
+ int x,y;
+ Real288_internal *glob=avctx->priv_data;
+ unsigned short int buffer[len];
+
+ unpack(buffer,in,len);
+ for (x=0;x<32;x++)
+ {
+ glob->phasep=(glob->phase=x&7)*5;
+ decode(glob,buffer[x]);
+ for (y=0;y<5;*(out++)=8*glob->output[glob->phasep+(y++)]);
+ if (glob->phase==3) update(glob);
+ }
+ return out;
+}
/* Decode a block (celp) */
static int ra288_decode_frame(AVCodecContext * avctx,
void *data, int *data_size,
uint8_t * buf, int buf_size)
{
- int x,y,z,bret;
- unsigned short int buffer[buf_size];
- unsigned char b[buf_size],*bp;
- void *datao;
- Real288_internal *glob=avctx->priv_data;
- if(avctx->extradata_size>=6)
+ if(avctx->extradata_size>=6)
{
- int w=avctx->block_align;
- int h=((short*)(avctx->extradata))[1];
- int cfs=((short*)(avctx->extradata))[3]; /* coded frame size */
+//((short*)(avctx->extradata))[0]; /* subpacket size */
+//((short*)(avctx->extradata))[1]; /* subpacket height */
+//((short*)(avctx->extradata))[2]; /* subpacket flavour */
+//((short*)(avctx->extradata))[3]; /* coded frame size */
+//((short*)(avctx->extradata))[4]; /* codec's data length */
+//((short*)(avctx->extradata))[5...] /* codec's data */
+ int bret;
+ void *datao;
+ int w=avctx->block_align; /* 228 */
+ int h=((short*)(avctx->extradata))[1]; /* 12 */
+ int cfs=((short*)(avctx->extradata))[3]; /* coded frame size 38 */
int i,j;
- if(buf_size<w*h) goto no_interleave;
- bp = buf;
- for (j = 0; j < h; j++)
- for (i = 0; i < h/2; i++)
- {
- memcpy(&b[i*2*w+j*cfs], bp, cfs);
- bp += cfs;
- if(bp-buf>=buf_size) break;
- }
- bret=bp-buf;
- bp = b;
- }
- else { no_interleave: bret=buf_size; bp = buf; }
- datao = data;
- z=0;
- while(z<bret)
- {
- unpack(buffer,bp,32);
- for (x=0;x<32;x++)
+ if(buf_size<w*h)
{
- glob->phasep=(glob->phase=x&7)*5;
- decode(glob,buffer[x]);
- for (y=0;y<5;y++) *(((int16_t *)data)++)=8*glob->output[glob->phasep+y];
- if (glob->phase==3) update(glob);
+ fprintf(stderr,"ffra288: Error! Input buffer is too small [%d<%d]\n",buf_size,w*h);
+ return 0;
}
- z+=32;
- bp+=32;
+ datao = data;
+ bret = 0;
+ for (j = 0; j < h/2; j++)
+ for (i = 0; i < h; i++)
+ {
+ data=decode_block(avctx,&buf[j*cfs+cfs*i*h/2],(signed short *)data,cfs);
+ bret += cfs;
+ }
+ *data_size = data - datao;
+ return bret;
+ }
+ else
+ {
+ fprintf(stderr,"ffra288: Error: need extra data!!!\n");
+ return 0;
}
- *data_size = data - datao;
- return bret;
}
AVCodec ra_288_decoder =
diff --git a/src/libffmpeg/libavcodec/svq1_cb.h b/src/libffmpeg/libavcodec/svq1_cb.h
index 14372a255..77ee56dbb 100644
--- a/src/libffmpeg/libavcodec/svq1_cb.h
+++ b/src/libffmpeg/libavcodec/svq1_cb.h
@@ -764,7 +764,7 @@ static const int8_t svq1_inter_codebook_8x8[6144] = {
};
/* list of codebooks for inter-coded vectors */
-static const uint8_t* const svq1_inter_codebooks[4] = {
+static const int8_t* const svq1_inter_codebooks[4] = {
svq1_inter_codebook_4x2, svq1_inter_codebook_4x4,
svq1_inter_codebook_8x4, svq1_inter_codebook_8x8
};
@@ -1506,7 +1506,7 @@ static const int8_t svq1_intra_codebook_8x8[6144] = {
};
/* list of codebooks for intra-coded vectors */
-static const uint8_t* const svq1_intra_codebooks[4] = {
+static const int8_t* const svq1_intra_codebooks[4] = {
svq1_intra_codebook_4x2, svq1_intra_codebook_4x4,
svq1_intra_codebook_8x4, svq1_intra_codebook_8x8
};
diff --git a/src/libffmpeg/libavcodec/wmv2.c b/src/libffmpeg/libavcodec/wmv2.c
index ccac9fe77..ea4f407eb 100644
--- a/src/libffmpeg/libavcodec/wmv2.c
+++ b/src/libffmpeg/libavcodec/wmv2.c
@@ -650,7 +650,7 @@ void ff_mspel_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
|| src_y + h+1 >= v_edge_pos){
- ff_emulated_edge_mc(s, ptr - 1 - s->linesize, s->linesize, 19, 19,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19,
src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + 1 + s->linesize;
emu=1;
@@ -691,7 +691,7 @@ void ff_mspel_motion(MpegEncContext *s,
offset = (src_y * uvlinesize) + src_x;
ptr = ref_picture[1] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}
@@ -699,7 +699,7 @@ void ff_mspel_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if(emu){
- ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9,
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
}