summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/mpegvideo.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/libffmpeg/libavcodec/mpegvideo.c')
-rw-r--r--src/libffmpeg/libavcodec/mpegvideo.c827
1 files changed, 585 insertions, 242 deletions
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c
index 83a481e9f..0e04301a4 100644
--- a/src/libffmpeg/libavcodec/mpegvideo.c
+++ b/src/libffmpeg/libavcodec/mpegvideo.c
@@ -24,7 +24,6 @@
* The simplest mpeg encoder (well, it was the simplest!).
*/
-#include <ctype.h>
#include <limits.h>
#include "avcodec.h"
#include "dsputil.h"
@@ -50,8 +49,15 @@ static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
#ifdef CONFIG_ENCODERS
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
+static int sse_mb(MpegEncContext *s);
#endif //CONFIG_ENCODERS
+#ifdef HAVE_XVMC
+extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
+extern void XVMC_field_end(MpegEncContext *s);
+extern void XVMC_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
+#endif
+
void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
@@ -134,6 +140,13 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16
}
}
}
+
+static inline void update_qscale(MpegEncContext *s){
+ s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
+ s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
+
+ s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
+}
#endif //CONFIG_ENCODERS
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
@@ -160,6 +173,20 @@ void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_s
}
}
+#ifdef CONFIG_ENCODERS
+void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
+ int i;
+
+ if(matrix){
+ put_bits(pb, 1, 1);
+ for(i=0;i<64;i++) {
+ put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
+ }
+ }else
+ put_bits(pb, 1, 0);
+}
+#endif //CONFIG_ENCODERS
+
/* init common dct for both encoder and decoder */
int DCT_common_init(MpegEncContext *s)
{
@@ -212,6 +239,11 @@ int DCT_common_init(MpegEncContext *s)
return 0;
}
+static void copy_picture(Picture *dst, Picture *src){
+ *dst = *src;
+ dst->type= FF_BUFFER_TYPE_COPY;
+}
+
/**
* allocates a Picture
* The pixels are allocated/set by calling get_buffer() if shared=0
@@ -270,6 +302,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
}
}
pic->qstride= s->mb_stride;
+ CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
}
//it might be nicer if the application would keep track of these but it would require a API change
@@ -300,6 +333,7 @@ static void free_picture(MpegEncContext *s, Picture *pic){
av_freep(&pic->mbskip_table);
av_freep(&pic->qscale_table);
av_freep(&pic->mb_type_base);
+ av_freep(&pic->pan_scan);
pic->mb_type= NULL;
for(i=0; i<2; i++){
av_freep(&pic->motion_val[i]);
@@ -353,6 +387,11 @@ int MPV_common_init(MpegEncContext *s)
+ (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
+ (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
+ s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
+ + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
+ + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
+ + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
+
CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
@@ -401,6 +440,8 @@ int MPV_common_init(MpegEncContext *s)
/* Allocate MB type table */
CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding
+
+ CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
}
CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
@@ -476,6 +517,9 @@ void MPV_common_end(MpegEncContext *s)
{
int i;
+ av_freep(&s->parse_context.buffer);
+ s->parse_context.buffer_size=0;
+
av_freep(&s->mb_type);
av_freep(&s->p_mv_table_base);
av_freep(&s->b_forw_mv_table_base);
@@ -513,12 +557,16 @@ void MPV_common_end(MpegEncContext *s)
av_freep(&s->ac_stats);
av_freep(&s->error_status_table);
av_freep(&s->mb_index2xy);
+ av_freep(&s->lambda_table);
for(i=0; i<MAX_PICTURE_COUNT; i++){
free_picture(s, &s->picture[i]);
}
avcodec_default_free_buffers(s->avctx);
s->context_initialized = 0;
+ s->last_picture_ptr=
+ s->next_picture_ptr=
+ s->current_picture_ptr= NULL;
}
#ifdef CONFIG_ENCODERS
@@ -527,9 +575,11 @@ void MPV_common_end(MpegEncContext *s)
int MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
- int i;
+ int i, dummy;
int chroma_h_shift, chroma_v_shift;
+ avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
+
s->bit_rate = avctx->bit_rate;
s->bit_rate_tolerance = avctx->bit_rate_tolerance;
s->width = avctx->width;
@@ -594,7 +644,7 @@ int MPV_encode_init(AVCodecContext *avctx)
return -1;
}
- if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO){
+ if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
fprintf(stderr, "b frames not supporetd by codec\n");
return -1;
}
@@ -604,10 +654,15 @@ int MPV_encode_init(AVCodecContext *avctx)
return -1;
}
+ if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
+ fprintf(stderr, "CBP RD needs trellis quant\n");
+ return -1;
+ }
+
if(s->codec_id==CODEC_ID_MJPEG){
s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
s->inter_quant_bias= 0;
- }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO){
+ }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
s->inter_quant_bias= 0;
}else{
@@ -622,12 +677,22 @@ int MPV_encode_init(AVCodecContext *avctx)
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
+ av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
+ s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
+
switch(avctx->codec->id) {
case CODEC_ID_MPEG1VIDEO:
s->out_format = FMT_MPEG1;
s->low_delay= 0; //s->max_b_frames ? 0 : 1;
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
break;
+ case CODEC_ID_MPEG2VIDEO:
+ s->out_format = FMT_MPEG1;
+ s->low_delay= 0; //s->max_b_frames ? 0 : 1;
+ avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
+ s->rtp_mode= 1; // mpeg2 must have slices
+ if(s->rtp_payload_size == 0) s->rtp_payload_size= 256*256*256;
+ break;
case CODEC_ID_LJPEG:
case CODEC_ID_MJPEG:
s->out_format = FMT_MJPEG;
@@ -667,6 +732,14 @@ int MPV_encode_init(AVCodecContext *avctx)
avctx->delay=0;
s->low_delay=1;
break;
+ case CODEC_ID_FLV1:
+ s->out_format = FMT_H263;
+ s->h263_flv = 2; /* format = 1; 11-bit codes */
+ s->unrestricted_mv = 1;
+ s->rtp_mode=0; /* don't allow GOB */
+ avctx->delay=0;
+ s->low_delay=1;
+ break;
case CODEC_ID_RV10:
s->out_format = FMT_H263;
s->h263_rv10 = 1;
@@ -790,10 +863,14 @@ int MPV_encode_init(AVCodecContext *avctx)
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}else
#endif
- { /* mpeg1 */
+ { /* mpeg1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}
+ if(s->avctx->intra_matrix)
+ s->intra_matrix[j] = s->avctx->intra_matrix[i];
+ if(s->avctx->inter_matrix)
+ s->inter_matrix[j] = s->avctx->inter_matrix[i];
}
/* precompute matrix */
@@ -831,6 +908,8 @@ int MPV_encode_end(AVCodecContext *avctx)
MPV_common_end(s);
if (s->out_format == FMT_MJPEG)
mjpeg_close(s);
+
+ av_freep(&avctx->extradata);
return 0;
}
@@ -935,7 +1014,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
/* mark&release old frames */
- if (s->pict_type != B_TYPE && s->last_picture_ptr) {
+ if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
/* release forgotten pictures */
@@ -966,16 +1045,18 @@ alloc:
if(s->current_picture_ptr)
pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
- alloc_picture(s, (Picture*)pic, 0);
+ if( alloc_picture(s, (Picture*)pic, 0) < 0)
+ return -1;
s->current_picture_ptr= &s->picture[i];
}
s->current_picture_ptr->pict_type= s->pict_type;
- s->current_picture_ptr->quality= s->qscale;
+// if(s->flags && CODEC_FLAG_QSCALE)
+ // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
- s->current_picture= *s->current_picture_ptr;
+ copy_picture(&s->current_picture, s->current_picture_ptr);
if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
if (s->pict_type != B_TYPE) {
@@ -983,10 +1064,17 @@ alloc:
s->next_picture_ptr= s->current_picture_ptr;
}
- if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
- if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
- if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
+ if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
+ if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
+ if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
+ fprintf(stderr, "warning: first frame is no keyframe\n");
+ assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
+ goto alloc;
+ }
+
+ assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
+
if(s->picture_structure!=PICT_FRAME){
int i;
for(i=0; i<4; i++){
@@ -998,12 +1086,6 @@ alloc:
s->next_picture.linesize[i] *=2;
}
}
-
- if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
- fprintf(stderr, "warning: first frame is no keyframe\n");
- assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
- goto alloc;
- }
}
s->hurry_up= s->avctx->hurry_up;
@@ -1011,14 +1093,17 @@ alloc:
/* set dequantizer, we cant do it during init as it might change for mpeg4
and we cant do it in the header decode as init isnt called for mpeg4 there yet */
- if(s->out_format == FMT_H263){
- if(s->mpeg_quant)
- s->dct_unquantize = s->dct_unquantize_mpeg2;
- else
- s->dct_unquantize = s->dct_unquantize_h263;
- }else
+ if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO)
+ s->dct_unquantize = s->dct_unquantize_mpeg2;
+ else if(s->out_format == FMT_H263)
+ s->dct_unquantize = s->dct_unquantize_h263;
+ else
s->dct_unquantize = s->dct_unquantize_mpeg1;
+#ifdef HAVE_XVMC
+ if(s->avctx->xvmc_acceleration)
+ return XVMC_field_start(s, avctx);
+#endif
return 0;
}
@@ -1027,12 +1112,16 @@ void MPV_frame_end(MpegEncContext *s)
{
int i;
/* draw edge for correct motion prediction if outside */
- if(s->codec_id!=CODEC_ID_SVQ1){
- if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
+#ifdef HAVE_XVMC
+//just to make sure that all data is rendered.
+ if(s->avctx->xvmc_acceleration){
+ XVMC_field_end(s);
+ }else
+#endif
+ if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
- }
}
emms_c();
@@ -1291,11 +1380,12 @@ static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int st
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
- AVFrame *pic;
+ AVFrame *pic=NULL;
int i;
const int encoding_delay= s->max_b_frames;
int direct=1;
-
+
+ if(pic_arg){
if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
if(pic_arg->linesize[0] != s->linesize) direct=0;
if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
@@ -1315,26 +1405,20 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
}
alloc_picture(s, (Picture*)pic, 1);
}else{
+ int offset= 16;
i= find_unused_picture(s, 0);
pic= (AVFrame*)&s->picture[i];
pic->reference= 3;
alloc_picture(s, (Picture*)pic, 0);
- for(i=0; i<4; i++){
- /* the input will be 16 pixels to the right relative to the actual buffer start
- * and the current_pic, so the buffer can be reused, yes its not beatifull
- */
- pic->data[i]+= 16;
- }
- if( pic->data[0] == pic_arg->data[0]
- && pic->data[1] == pic_arg->data[1]
- && pic->data[2] == pic_arg->data[2]){
+ if( pic->data[0] + offset == pic_arg->data[0]
+ && pic->data[1] + offset == pic_arg->data[1]
+ && pic->data[2] + offset == pic_arg->data[2]){
// empty
}else{
int h_chroma_shift, v_chroma_shift;
-
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for(i=0; i<3; i++){
@@ -1345,7 +1429,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
int w= s->width >>h_shift;
int h= s->height>>v_shift;
uint8_t *src= pic_arg->data[i];
- uint8_t *dst= pic->data[i];
+ uint8_t *dst= pic->data[i] + offset;
if(src_stride==dst_stride)
memcpy(dst, src, src_stride*h);
@@ -1365,6 +1449,8 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
if(s->input_picture[encoding_delay])
pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
+
+ }
/* shift buffer entries */
for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
@@ -1377,7 +1463,6 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
static void select_input_picture(MpegEncContext *s){
int i;
- const int encoding_delay= s->max_b_frames;
int coded_pic_num=0;
if(s->reordered_input_picture[0])
@@ -1418,16 +1503,17 @@ static void select_input_picture(MpegEncContext *s){
}
}else if(s->b_frame_strategy==0){
b_frames= s->max_b_frames;
+ while(b_frames && !s->input_picture[b_frames]) b_frames--;
}else if(s->b_frame_strategy==1){
for(i=1; i<s->max_b_frames+1; i++){
- if(s->input_picture[i]->b_frame_score==0){
+ if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
s->input_picture[i]->b_frame_score=
get_intra_count(s, s->input_picture[i ]->data[0],
s->input_picture[i-1]->data[0], s->linesize) + 1;
}
}
for(i=0; i<s->max_b_frames; i++){
- if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
+ if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
}
b_frames= FFMAX(0, i-1);
@@ -1465,7 +1551,7 @@ static void select_input_picture(MpegEncContext *s){
if(s->reordered_input_picture[0]){
s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
- s->new_picture= *s->reordered_input_picture[0];
+ copy_picture(&s->new_picture, s->reordered_input_picture[0]);
if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
// input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
@@ -1483,6 +1569,7 @@ static void select_input_picture(MpegEncContext *s){
pic->quality = s->reordered_input_picture[0]->quality;
pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
pic->reference = s->reordered_input_picture[0]->reference;
+ pic->pts = s->reordered_input_picture[0]->pts;
alloc_picture(s, pic, 0);
@@ -1495,11 +1582,10 @@ static void select_input_picture(MpegEncContext *s){
s->current_picture_ptr= s->reordered_input_picture[0];
for(i=0; i<4; i++){
- //reverse the +16 we did before storing the input
- s->current_picture_ptr->data[i]-=16;
+ s->new_picture.data[i]+=16;
}
}
- s->current_picture= *s->current_picture_ptr;
+ copy_picture(&s->current_picture, s->current_picture_ptr);
s->picture_number= s->new_picture.display_picture_number;
//printf("dpn:%d\n", s->picture_number);
@@ -1515,7 +1601,12 @@ int MPV_encode_picture(AVCodecContext *avctx,
AVFrame *pic_arg = data;
int i;
- init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
+ if(avctx->pix_fmt != PIX_FMT_YUV420P){
+ fprintf(stderr, "this codec supports only YUV420P\n");
+ return -1;
+ }
+
+ init_put_bits(&s->pb, buf, buf_size);
s->picture_in_gop_number++;
@@ -1527,10 +1618,6 @@ int MPV_encode_picture(AVCodecContext *avctx,
if(s->new_picture.data[0]){
s->pict_type= s->new_picture.pict_type;
- if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
- s->qscale= (int)(s->new_picture.quality+0.5);
- assert(s->qscale);
- }
//emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
MPV_frame_start(s, avctx);
@@ -1605,8 +1692,8 @@ static inline void gmc1_motion(MpegEncContext *s,
dest_y+=dest_offset;
if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
- || src_y + 17 >= s->v_edge_pos){
+ if( (unsigned)src_x >= s->h_edge_pos - 17
+ || (unsigned)src_y >= s->v_edge_pos - 17){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer;
}
@@ -1644,8 +1731,8 @@ static inline void gmc1_motion(MpegEncContext *s,
offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
ptr = ref_picture[1] + offset;
if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
- || src_y + 9 >= s->v_edge_pos>>1){
+ if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
+ || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
emu=1;
@@ -1834,9 +1921,9 @@ if(s->quarter_sample)
ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
dest_y += dest_offset;
- if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
- || src_y + (motion_y&1) + h > v_edge_pos){
+ if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
+ || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + src_offset;
@@ -1920,8 +2007,8 @@ static inline void qpel_motion(MpegEncContext *s,
//printf("%d %d %d\n", src_x, src_y, dxy);
if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
- || src_y + (motion_y&3) + h > v_edge_pos){
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
+ || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,
src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + src_offset;
@@ -2072,8 +2159,8 @@ static inline void MPV_motion(MpegEncContext *s,
ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
- || src_y + (motion_y&3) + 8 > s->v_edge_pos){
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
+ || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer;
}
@@ -2103,8 +2190,8 @@ static inline void MPV_motion(MpegEncContext *s,
ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
- || src_y + (motion_y&1) + 8 > s->v_edge_pos){
+ if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 8
+ || (unsigned)src_y > s->v_edge_pos - (motion_y&1) - 8){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer;
}
@@ -2138,8 +2225,8 @@ static inline void MPV_motion(MpegEncContext *s,
offset = (src_y * (s->uvlinesize)) + src_x;
ptr = ref_picture[1] + offset;
if(s->flags&CODEC_FLAG_EMU_EDGE){
- if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
- || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
+ if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
+ || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
ptr= s->edge_emu_buffer;
emu=1;
@@ -2194,6 +2281,105 @@ static inline void MPV_motion(MpegEncContext *s,
s->mv[dir][0][0], s->mv[dir][0][1], 16);
}
break;
+ case MV_TYPE_16X8:{
+ int offset;
+ uint8_t ** ref2picture;
+
+ if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
+ ref2picture= ref_picture;
+ offset= s->field_select[dir][0] ? s->linesize : 0;
+ }else{
+ ref2picture= s->current_picture.data;
+ offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
+ }
+
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
+ ref2picture, offset,
+ 0, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 8);
+
+
+ if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
+ ref2picture= ref_picture;
+ offset= s->field_select[dir][1] ? s->linesize : 0;
+ }else{
+ ref2picture= s->current_picture.data;
+ offset= s->field_select[dir][1] ? s->linesize : -s->linesize;
+ }
+ // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
+ mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
+ 0,
+ ref2picture, offset,
+ 0, pix_op,
+ s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
+ }
+
+ break;
+ case MV_TYPE_DMV:
+ {
+ op_pixels_func (*dmv_pix_op)[4];
+ int offset;
+
+ dmv_pix_op = s->dsp.put_pixels_tab;
+
+ if(s->picture_structure == PICT_FRAME){
+ //put top field from top field
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
+ ref_picture, 0,
+ 1, dmv_pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 8);
+ //put bottom field from bottom field
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
+ ref_picture, s->linesize,
+ 1, dmv_pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 8);
+
+ dmv_pix_op = s->dsp.avg_pixels_tab;
+
+ //avg top field from bottom field
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
+ ref_picture, s->linesize,
+ 1, dmv_pix_op,
+ s->mv[dir][2][0], s->mv[dir][2][1], 8);
+ //avg bottom field from top field
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
+ ref_picture, 0,
+ 1, dmv_pix_op,
+ s->mv[dir][3][0], s->mv[dir][3][1], 8);
+
+ }else{
+ offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
+ s->linesize : 0;
+
+ //put field from the same parity
+ //same parity is never in the same frame
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
+ ref_picture,offset,
+ 0,dmv_pix_op,
+ s->mv[dir][0][0],s->mv[dir][0][1],16);
+
+ // after put we make avg of the same block
+ dmv_pix_op=s->dsp.avg_pixels_tab;
+
+ //opposite parity is always in the same frame if this is second field
+ if(!s->first_field){
+ ref_picture = s->current_picture.data;
+ //top field is one linesize from frame beginig
+ offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
+ -s->linesize : s->linesize;
+ }else
+ offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
+ 0 : s->linesize;
+
+ //avg field from the opposite parity
+ mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
+ ref_picture, offset,
+ 0,dmv_pix_op,
+ s->mv[dir][2][0],s->mv[dir][2][1],16);
+ }
+ }
+ break;
+
}
}
@@ -2272,6 +2458,12 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int mb_x, mb_y;
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
+#ifdef HAVE_XVMC
+ if(s->avctx->xvmc_acceleration){
+ XVMC_decode_mb(s,block);
+ return;
+ }
+#endif
mb_x = s->mb_x;
mb_y = s->mb_y;
@@ -2292,50 +2484,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
else if (s->h263_pred || s->h263_aic)
s->mbintra_table[mb_xy]=1;
- /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
- if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
- //FIXME a lot of thet is only needed for !low_delay
- const int wrap = s->block_wrap[0];
- const int xy = s->block_index[0];
- if(s->mv_type != MV_TYPE_8X8){
- int motion_x, motion_y;
- if (s->mb_intra) {
- motion_x = 0;
- motion_y = 0;
- } else if (s->mv_type == MV_TYPE_16X16) {
- motion_x = s->mv[0][0][0];
- motion_y = s->mv[0][0][1];
- } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
- int i;
- motion_x = s->mv[0][0][0] + s->mv[0][1][0];
- motion_y = s->mv[0][0][1] + s->mv[0][1][1];
- motion_x = (motion_x>>1) | (motion_x&1);
- for(i=0; i<2; i++){
- s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
- s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
- s->field_select_table[mb_xy][i]= s->field_select[0][i];
- }
- }
-
- /* no update if 8X8 because it has been done during parsing */
- s->motion_val[xy][0] = motion_x;
- s->motion_val[xy][1] = motion_y;
- s->motion_val[xy + 1][0] = motion_x;
- s->motion_val[xy + 1][1] = motion_y;
- s->motion_val[xy + wrap][0] = motion_x;
- s->motion_val[xy + wrap][1] = motion_y;
- s->motion_val[xy + 1 + wrap][0] = motion_x;
- s->motion_val[xy + 1 + wrap][1] = motion_y;
- }
-
- if(s->encoding){ //FIXME encoding MUST be cleaned up
- if (s->mv_type == MV_TYPE_8X8)
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
- else
- s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
- }
- }
-
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
@@ -2345,10 +2493,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
const int uvlinesize= s->current_picture.linesize[1];
/* avoid copy if macroblock skipped in last frame too */
- if (s->pict_type != B_TYPE) {
- s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
- }
-
/* skip only during decoding as we might trash the buffers during encoding a bit */
if(!s->encoding){
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
@@ -2373,17 +2517,6 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
} else{
*mbskip_ptr = 0; /* not skipped */
}
- }else
- s->mb_skiped= 0;
-
- if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ //FIXME precalc
- dest_y = s->current_picture.data[0] + mb_x * 16;
- dest_cb = s->current_picture.data[1] + mb_x * 8;
- dest_cr = s->current_picture.data[2] + mb_x * 8;
- }else{
- dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
- dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8;
- dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8;
}
if (s->interlaced_dct) {
@@ -2393,11 +2526,15 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
dct_linesize = linesize;
dct_offset = linesize * 8;
}
+
+ dest_y= s->dest[0];
+ dest_cb= s->dest[1];
+ dest_cr= s->dest[2];
if (!s->mb_intra) {
/* motion handling */
/* decoding or more than one mb_type (MC was allready done otherwise) */
- if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
+ if(!s->encoding){
if ((!s->no_rounding) || s->pict_type==B_TYPE){
op_pix = s->dsp.put_pixels_tab;
op_qpix= s->dsp.put_qpel_pixels_tab;
@@ -2420,7 +2557,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
if(s->hurry_up>1) return;
/* add dct residue */
- if(s->encoding || !( s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO
+ if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
|| (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
@@ -2449,7 +2586,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
#endif
} else {
/* dct only in intra block */
- if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
+ if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
put_dct(s, block[0], 0, dest_y, dct_linesize);
put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
@@ -2619,33 +2756,66 @@ static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move
* @param h is the normal height, this will be reduced automatically if needed for the last row
*/
void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
- if ( s->avctx->draw_horiz_band
- && (s->last_picture_ptr || s->low_delay) ) {
- uint8_t *src_ptr[3];
- int offset;
+ if (s->avctx->draw_horiz_band) {
+ AVFrame *src;
+ int offset[4];
+
+ if(s->picture_structure != PICT_FRAME){
+ h <<= 1;
+ y <<= 1;
+ if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
+ }
+
h= FFMIN(h, s->height - y);
- if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME)
- offset = 0;
+ if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
+ src= (AVFrame*)s->current_picture_ptr;
+ else if(s->last_picture_ptr)
+ src= (AVFrame*)s->last_picture_ptr;
else
- offset = y * s->linesize;
-
- if(s->pict_type==B_TYPE || s->low_delay){
- src_ptr[0] = s->current_picture.data[0] + offset;
- src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
- src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
- } else {
- src_ptr[0] = s->last_picture.data[0] + offset;
- src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
- src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
+ return;
+
+ if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
+ offset[0]=
+ offset[1]=
+ offset[2]=
+ offset[3]= 0;
+ }else{
+ offset[0]= y * s->linesize;;
+ offset[1]=
+ offset[2]= (y>>1) * s->uvlinesize;;
+ offset[3]= 0;
}
+
emms_c();
- s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
- y, s->width, h);
+ s->avctx->draw_horiz_band(s->avctx, src, offset,
+ y, s->picture_structure, h);
}
}
+void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
+ const int uvlinesize= s->current_picture.linesize[1];
+
+ s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
+ s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2;
+ s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
+ s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2;
+ s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
+ s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
+
+ if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
+ s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
+ s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
+ s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
+ }else{
+ s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16 - 16;
+ s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
+ s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
+ }
+}
+
#ifdef CONFIG_ENCODERS
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
@@ -2659,22 +2829,23 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
for(i=0; i<6; i++) skip_dct[i]=0;
if(s->adaptive_quant){
- s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
+ const int last_qp= s->qscale;
+ const int mb_xy= mb_x + mb_y*s->mb_stride;
- if(s->out_format==FMT_H263){
- if (s->dquant> 2) s->dquant= 2;
- else if(s->dquant<-2) s->dquant=-2;
- }
+ s->lambda= s->lambda_table[mb_xy];
+ update_qscale(s);
+ s->dquant= s->qscale - last_qp;
+
+ if(s->out_format==FMT_H263)
+ s->dquant= clip(s->dquant, -2, 2); //FIXME RD
if(s->codec_id==CODEC_ID_MPEG4){
if(!s->mb_intra){
- if(s->mv_dir&MV_DIRECT)
+ if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8)
s->dquant=0;
-
- assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
}
}
- s->qscale+= s->dquant;
+ s->qscale= last_qp + s->dquant;
s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
}
@@ -2740,9 +2911,9 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int wrap_y, wrap_c;
int emu=0;
- dest_y = s->current_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
- dest_cb = s->current_picture.data[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
- dest_cr = s->current_picture.data[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
+ dest_y = s->dest[0];
+ dest_cb = s->dest[1];
+ dest_cr = s->dest[2];
wrap_y = s->linesize;
wrap_c = s->uvlinesize;
ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
@@ -2837,19 +3008,6 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
}
-#if 0
- {
- float adap_parm;
-
- adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
- ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
-
- printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
- (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P',
- s->qscale, adap_parm, s->qscale*adap_parm,
- s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
- }
-#endif
/* DCT & quantize */
if(s->out_format==FMT_MJPEG){
for(i=0;i<6;i++) {
@@ -2869,12 +3027,20 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
}else
s->block_last_index[i]= -1;
}
+
if(s->luma_elim_threshold && !s->mb_intra)
for(i=0; i<4; i++)
dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
if(s->chroma_elim_threshold && !s->mb_intra)
for(i=4; i<6; i++)
dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
+
+ if(s->flags & CODEC_FLAG_CBP_RD){
+ for(i=0;i<6;i++) {
+ if(s->block_last_index[i] == -1)
+ s->coded_score[i]= INT_MAX/256;
+ }
+ }
}
if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
@@ -2887,6 +3053,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
/* huffman encode */
switch(s->codec_id){ //FIXME funct ptr could be slightly faster
case CODEC_ID_MPEG1VIDEO:
+ case CODEC_ID_MPEG2VIDEO:
mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
#ifdef CONFIG_RISKY
case CODEC_ID_MPEG4:
@@ -2899,6 +3066,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
case CODEC_ID_H263:
case CODEC_ID_H263P:
+ case CODEC_ID_FLV1:
case CODEC_ID_RV10:
h263_encode_mb(s, s->block, motion_x, motion_y); break;
#endif
@@ -2969,6 +3137,25 @@ int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size)
return 0;
}
+void ff_mpeg_flush(AVCodecContext *avctx){
+ int i;
+ MpegEncContext *s = avctx->priv_data;
+
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
+ || s->picture[i].type == FF_BUFFER_TYPE_USER))
+ avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
+ }
+ s->last_picture_ptr = s->next_picture_ptr = NULL;
+
+ s->parse_context.state= -1;
+ s->parse_context.frame_start_found= 0;
+ s->parse_context.overread= 0;
+ s->parse_context.overread_index= 0;
+ s->parse_context.index= 0;
+ s->parse_context.last_index= 0;
+}
+
#ifdef CONFIG_ENCODERS
void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
{
@@ -3003,7 +3190,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext
d->misc_bits= s->misc_bits;
d->last_bits= 0;
- d->mb_skiped= s->mb_skiped;
+ d->mb_skiped= 0;
d->qscale= s->qscale;
}
@@ -3048,7 +3235,8 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
int *dmin, int *next_block, int motion_x, int motion_y)
{
- int bits_count;
+ int score;
+ uint8_t *dest_backup[3];
copy_context_before_encode(s, backup, type);
@@ -3058,24 +3246,44 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
s->pb2 = pb2 [*next_block];
s->tex_pb= tex_pb[*next_block];
}
+
+ if(*next_block){
+ memcpy(dest_backup, s->dest, sizeof(s->dest));
+ s->dest[0] = s->me.scratchpad;
+ s->dest[1] = s->me.scratchpad + 16;
+ s->dest[2] = s->me.scratchpad + 16 + 8;
+ assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
+ assert(s->linesize >= 64); //FIXME
+ }
encode_mb(s, motion_x, motion_y);
-
- bits_count= get_bit_count(&s->pb);
+
+ score= get_bit_count(&s->pb);
if(s->data_partitioning){
- bits_count+= get_bit_count(&s->pb2);
- bits_count+= get_bit_count(&s->tex_pb);
+ score+= get_bit_count(&s->pb2);
+ score+= get_bit_count(&s->tex_pb);
+ }
+
+ if(s->avctx->mb_decision == FF_MB_DECISION_RD){
+ MPV_decode_mb(s, s->block);
+
+ score *= s->lambda2;
+ score += sse_mb(s) << FF_LAMBDA_SHIFT;
+ }
+
+ if(*next_block){
+ memcpy(s->dest, dest_backup, sizeof(s->dest));
}
- if(bits_count<*dmin){
- *dmin= bits_count;
+ if(score<*dmin){
+ *dmin= score;
*next_block^=1;
copy_context_after_encode(best, s, type);
}
}
-static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
+static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
uint32_t *sq = squareTbl + 256;
int acc=0;
int x,y;
@@ -3096,6 +3304,23 @@ static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, in
return acc;
}
+static int sse_mb(MpegEncContext *s){
+ int w= 16;
+ int h= 16;
+
+ if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
+ if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
+
+ if(w==16 && h==16)
+ return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize)
+ +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize)
+ +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize);
+ else
+ return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
+ +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
+ +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
+}
+
static void encode_picture(MpegEncContext *s, int picture_number)
{
int mb_x, mb_y, pdif = 0;
@@ -3108,9 +3333,9 @@ static void encode_picture(MpegEncContext *s, int picture_number)
PutBitContext pb[2], pb2[2], tex_pb[2];
for(i=0; i<2; i++){
- init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL);
- init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL);
- init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
+ init_put_bits(&pb [i], bit_buf [i], 3000);
+ init_put_bits(&pb2 [i], bit_buf2 [i], 3000);
+ init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
}
s->picture_number = picture_number;
@@ -3122,13 +3347,13 @@ static void encode_picture(MpegEncContext *s, int picture_number)
#ifdef CONFIG_RISKY
/* we need to initialize some time vars before we can encode b-frames */
// RAL: Condition added for MPEG1VIDEO
- if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4))
+ if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
ff_set_mpeg4_time(s, s->picture_number);
#endif
s->scene_change_score=0;
- s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
+ s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
if(s->pict_type==I_TYPE){
if(s->msmpeg4_version >= 3) s->no_rounding=1;
@@ -3208,7 +3433,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}
emms_c();
- if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
+ if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
s->pict_type= I_TYPE;
memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
@@ -3241,10 +3466,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}
#endif
- if (s->fixed_qscale)
- s->frame_qscale = s->current_picture.quality;
- else
- s->frame_qscale = ff_rate_estimate_qscale(s);
+ if (!s->fixed_qscale)
+ s->current_picture.quality = ff_rate_estimate_qscale(s);
if(s->adaptive_quant){
#ifdef CONFIG_RISKY
@@ -3254,14 +3477,21 @@ static void encode_picture(MpegEncContext *s, int picture_number)
break;
case CODEC_ID_H263:
case CODEC_ID_H263P:
+ case CODEC_ID_FLV1:
ff_clean_h263_qscales(s);
break;
}
#endif
- s->qscale= s->current_picture.qscale_table[0];
+ s->lambda= s->lambda_table[0];
+ //FIXME broken
}else
- s->qscale= (int)(s->frame_qscale + 0.5);
+ s->lambda= s->current_picture.quality;
+//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
+ update_qscale(s);
+
+ if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
+ s->qscale= 3; //reduce cliping problems
if (s->out_format == FMT_MJPEG) {
/* for mjpeg, we do include qscale in the matrix */
@@ -3297,6 +3527,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
mpeg4_encode_picture_header(s, picture_number);
else if (s->h263_rv10)
rv10_encode_picture_header(s, picture_number);
+ else if (s->codec_id == CODEC_ID_FLV1)
+ ff_flv_encode_picture_header(s, picture_number);
else
h263_encode_picture_header(s, picture_number);
break;
@@ -3304,6 +3536,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
case FMT_MPEG1:
mpeg1_encode_picture_header(s, picture_number);
break;
+ case FMT_H264:
+ break;
}
bits= get_bit_count(&s->pb);
s->header_bits= bits - s->last_bits;
@@ -3333,11 +3567,17 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->last_mv_dir = 0;
#ifdef CONFIG_RISKY
- if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
+ switch(s->codec_id){
+ case CODEC_ID_H263:
+ case CODEC_ID_H263P:
+ case CODEC_ID_FLV1:
s->gob_index = ff_h263_get_gob_height(s);
-
- if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
- ff_mpeg4_init_partitions(s);
+ break;
+ case CODEC_ID_MPEG4:
+ if(s->partitioned_frame)
+ ff_mpeg4_init_partitions(s);
+ break;
+ }
#endif
s->resync_mb_x=0;
@@ -3345,41 +3585,32 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->first_slice_line = 1;
s->ptr_lastgob = s->pb.buf;
for(mb_y=0; mb_y < s->mb_height; mb_y++) {
+ s->mb_x=0;
+ s->mb_y= mb_y;
+
s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
+ ff_init_block_index(s);
- s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
- s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
- s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
- s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
- s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
- s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
const int xy= mb_y*s->mb_stride + mb_x;
int mb_type= s->mb_type[xy];
// int d;
- int dmin=10000000;
+ int dmin= INT_MAX;
s->mb_x = mb_x;
- s->mb_y = mb_y;
- s->block_index[0]+=2;
- s->block_index[1]+=2;
- s->block_index[2]+=2;
- s->block_index[3]+=2;
- s->block_index[4]++;
- s->block_index[5]++;
+ ff_update_block_index(s);
/* write gob / video packet header */
#ifdef CONFIG_RISKY
- if(s->rtp_mode){
+ if(s->rtp_mode && mb_y + mb_x>0){
int current_packet_size, is_gob_start;
current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
is_gob_start=0;
if(s->codec_id==CODEC_ID_MPEG4){
- if(current_packet_size >= s->rtp_payload_size
- && s->mb_y + s->mb_x>0){
+ if(current_packet_size >= s->rtp_payload_size){
if(s->partitioned_frame){
ff_mpeg4_merge_partitions(s);
@@ -3397,14 +3628,21 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}
}else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
if( current_packet_size >= s->rtp_payload_size
- && s->mb_y + s->mb_x>0 && s->mb_skip_run==0){
+ && s->mb_skip_run==0){
+ ff_mpeg1_encode_slice_header(s);
+ ff_mpeg1_clean_buffers(s);
+ is_gob_start=1;
+ }
+ }else if(s->codec_id==CODEC_ID_MPEG2VIDEO){
+ if( ( current_packet_size >= s->rtp_payload_size || mb_x==0)
+ && s->mb_skip_run==0){
ff_mpeg1_encode_slice_header(s);
ff_mpeg1_clean_buffers(s);
is_gob_start=1;
}
}else{
if(current_packet_size >= s->rtp_payload_size
- && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
+ && s->mb_x==0 && s->mb_y%s->gob_index==0){
h263_encode_gob_header(s, mb_y);
is_gob_start=1;
@@ -3425,6 +3663,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->first_slice_line=0;
}
+ s->mb_skiped=0;
+
if(mb_type & (mb_type-1)){ // more than 1 MB type possible
int next_block=0;
int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
@@ -3447,6 +3687,15 @@ static void encode_picture(MpegEncContext *s, int picture_number)
encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
&dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
}
+ if(mb_type&MB_TYPE_SKIPED){
+ s->mv_dir = MV_DIR_FORWARD;
+ s->mv_type = MV_TYPE_16X16;
+ s->mb_intra= 0;
+ s->mv[0][0][0] = 0;
+ s->mv[0][0][1] = 0;
+ encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_SKIPED, pb, pb2, tex_pb,
+ &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
+ }
if(mb_type&MB_TYPE_INTER4V){
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_8X8;
@@ -3507,9 +3756,12 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->mv[0][0][1] = 0;
encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
&dmin, &next_block, 0, 0);
- /* force cleaning of ac/dc pred stuff if needed ... */
- if(s->h263_pred || s->h263_aic)
- s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
+ if(s->h263_pred || s->h263_aic){
+ if(best_s.mb_intra)
+ s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
+ else
+ ff_clean_intra_table_entries(s); //old mode?
+ }
}
copy_context_after_encode(s, &best_s, -1);
@@ -3530,12 +3782,26 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->tex_pb= backup_s.tex_pb;
}
s->last_bits= get_bit_count(&s->pb);
+
+#ifdef CONFIG_RISKY
+ if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
+ ff_h263_update_motion_val(s);
+#endif
+
+ if(next_block==0){
+ s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad , s->linesize ,16);
+ s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
+ s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
+ }
+
+ if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
+ MPV_decode_mb(s, s->block);
} else {
int motion_x, motion_y;
int intra_score;
int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
- if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
+ if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all
/* get luma score */
if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
@@ -3683,6 +3949,13 @@ static void encode_picture(MpegEncContext *s, int picture_number)
// RAL: Update last macrobloc type
s->last_mv_dir = s->mv_dir;
+
+#ifdef CONFIG_RISKY
+ if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
+ ff_h263_update_motion_val(s);
+#endif
+
+ MPV_decode_mb(s, s->block);
}
/* clean the MV table in IPS frames for direct mode in B frames */
@@ -3690,8 +3963,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->p_mv_table[xy][0]=0;
s->p_mv_table[xy][1]=0;
}
-
- MPV_decode_mb(s, s->block);
if(s->flags&CODEC_FLAG_PSNR){
int w= 16;
@@ -3701,20 +3972,14 @@ static void encode_picture(MpegEncContext *s, int picture_number)
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
s->current_picture_ptr->error[0] += sse(
- s,
- s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
- s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
- w, h, s->linesize);
+ s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
+ s->dest[0], w, h, s->linesize);
s->current_picture_ptr->error[1] += sse(
- s,
- s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- w>>1, h>>1, s->uvlinesize);
+ s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
+ s->dest[1], w>>1, h>>1, s->uvlinesize);
s->current_picture_ptr->error[2] += sse(
- s,
- s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
- w>>1, h>>1, s->uvlinesize);
+ s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
+ s->dest[2], w>>1, h>>1, s->uvlinesize);
}
//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
}
@@ -3762,14 +4027,17 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
int last_level=0;
int last_score= 0;
int last_i= 0;
+ int not_coded_score= 0;
int coeff[3][64];
int coeff_count[64];
- int lambda, qmul, qadd, start_i, last_non_zero, i;
+ int qmul, qadd, start_i, last_non_zero, i, dc;
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
int score_limit=0;
int left_limit= 0;
+ const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
+ const int patch_table= s->out_format == FMT_MPEG1 && !s->mb_intra;
s->dsp.fdct (block);
@@ -3795,7 +4063,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
start_i = 1;
last_non_zero = 0;
qmat = s->q_intra_matrix[qscale];
- if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO)
+ if(s->mpeg_quant || s->out_format == FMT_MPEG1)
bias= 1<<(QMAT_SHIFT-1);
length = s->intra_ac_vlc_length;
last_length= s->intra_ac_vlc_last_length;
@@ -3831,6 +4099,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
// coeff[2][k]= -level+2;
}
coeff_count[k]= FFMIN(level, 2);
+ assert(coeff_count[k]);
max |=level;
last_non_zero = i;
}else{
@@ -3846,9 +4115,13 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
return last_non_zero;
}
- lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
-
score_tab[0]= 0;
+
+ if(patch_table){
+// length[UNI_AC_ENC_INDEX(0, 63)]=
+// length[UNI_AC_ENC_INDEX(0, 65)]= 2;
+ }
+
for(i=0; i<=last_non_zero - start_i; i++){
int level_index, run, j;
const int dct_coeff= block[ scantable[i + start_i] ];
@@ -3856,6 +4129,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
int best_score=256*256*256*120;
last_score += zero_distoration;
+ not_coded_score += zero_distoration;
for(level_index=0; level_index < coeff_count[i]; level_index++){
int distoration;
int level= coeff[level_index][i];
@@ -3954,10 +4228,13 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
//Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
while(score_tab[ left_limit ] > score_limit + lambda) left_limit++;
+
+ if(patch_table){
+// length[UNI_AC_ENC_INDEX(0, 63)]=
+// length[UNI_AC_ENC_INDEX(0, 65)]= 3;
+ }
}
- //FIXME add some cbp penalty
-
if(s->out_format != FMT_H263){
last_score= 256*256*256*120;
for(i= left_limit; i<=last_non_zero - start_i + 1; i++){
@@ -3972,13 +4249,60 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
}
}
}
+
+ s->coded_score[n] = last_score - not_coded_score;
+ dc= block[0];
last_non_zero= last_i - 1 + start_i;
memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
if(last_non_zero < start_i)
return last_non_zero;
-
+
+ if(last_non_zero == 0 && start_i == 0){
+ int best_level= 0;
+ int best_score= dc * dc;
+
+ for(i=0; i<coeff_count[0]; i++){
+ int level= coeff[i][0];
+ int unquant_coeff, score, distoration;
+
+ if(s->out_format == FMT_H263){
+ if(level>0){
+ unquant_coeff= (level*qmul + qadd)>>3;
+ }else{
+ unquant_coeff= (level*qmul - qadd)>>3;
+ }
+ }else{ //MPEG1
+ if (level < 0) {
+ unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
+ unquant_coeff = -((unquant_coeff - 1) | 1);
+ } else {
+ unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
+ unquant_coeff = (unquant_coeff - 1) | 1;
+ }
+ }
+ unquant_coeff = (unquant_coeff + 4) >> 3;
+ unquant_coeff<<= 3 + 3;
+
+ distoration= (unquant_coeff - dc) * (unquant_coeff - dc);
+ level+=64;
+ if((level&(~127)) == 0)
+ score= distoration + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
+ else
+ score= distoration + esc_length*lambda;
+
+ if(score < best_score){
+ best_score= score;
+ best_level= level - 64;
+ }
+ }
+ block[0]= best_level;
+ s->coded_score[n] = best_score - dc*dc;
+ if(best_level == 0) return -1;
+ else return last_non_zero;
+ }
+
i= last_i;
assert(last_level);
//FIXME use permutated scantable
@@ -4239,7 +4563,6 @@ static void dct_unquantize_h263_c(MpegEncContext *s,
static const AVOption mpeg4_options[] =
{
AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
- AVOPTION_CODEC_FLAG("vhq", "very high quality", flags, CODEC_FLAG_HQ, 0),
AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
"the reference can be CBR (for CBR pass1) or VBR (for pass2)",
bit_rate_tolerance, 4, 240000000, 8000),
@@ -4325,6 +4648,16 @@ AVCodec mpeg1video_encoder = {
#ifdef CONFIG_RISKY
+AVCodec mpeg2video_encoder = {
+ "mpeg2video",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_MPEG2VIDEO,
+ sizeof(MpegEncContext),
+ MPV_encode_init,
+ MPV_encode_picture,
+ MPV_encode_end,
+};
+
AVCodec h263_encoder = {
"h263",
CODEC_TYPE_VIDEO,
@@ -4345,6 +4678,16 @@ AVCodec h263p_encoder = {
MPV_encode_end,
};
+AVCodec flv_encoder = {
+ "flv",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_FLV1,
+ sizeof(MpegEncContext),
+ MPV_encode_init,
+ MPV_encode_picture,
+ MPV_encode_end,
+};
+
AVCodec rv10_encoder = {
"rv10",
CODEC_TYPE_VIDEO,