diff options
Diffstat (limited to 'src/libffmpeg/libavcodec/mpegvideo.c')
-rw-r--r-- | src/libffmpeg/libavcodec/mpegvideo.c | 393 |
1 files changed, 258 insertions, 135 deletions
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c index 393e3828b..c576695f3 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.c +++ b/src/libffmpeg/libavcodec/mpegvideo.c @@ -81,7 +81,7 @@ static const uint8_t h263_chroma_roundtab[16] = { }; #ifdef CONFIG_ENCODERS -static uint16_t (*default_mv_penalty)[MAX_MV*2+1]=NULL; +static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL; static uint8_t default_fcode_tab[MAX_MV*2+1]; enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1}; @@ -135,16 +135,6 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 } #endif //CONFIG_ENCODERS -// move into common.c perhaps -#define CHECKED_ALLOCZ(p, size)\ -{\ - p= av_mallocz(size);\ - if(p==NULL){\ - perror("malloc");\ - goto fail;\ - }\ -} - void ff_init_scantable(MpegEncContext *s, ScanTable *st, const uint8_t *src_scantable){ int i; int end; @@ -226,6 +216,9 @@ int DCT_common_init(MpegEncContext *s) * The pixels are allocated/set by calling get_buffer() if shared=0 */ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ + const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11 + const int mb_array_size= s->mb_stride*s->mb_height; + int i; if(shared){ assert(pic->data[0]); @@ -259,17 +252,25 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ if(pic->qscale_table==NULL){ if (s->encoding) { - CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(int16_t)) - CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(int16_t)) - CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(int8_t)) - CHECKED_ALLOCZ(pic->mb_cmp_score, s->mb_num * sizeof(int32_t)) + CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t)) + CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t)) + CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t)) + CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t)) } - CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(uint8_t)+1) //the +1 is for the slice end check - CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(uint8_t)) - pic->qstride= s->mb_width; + CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check + CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t)) + CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int)) + pic->mb_type= pic->mb_type_base + s->mb_stride+1; + if(s->out_format == FMT_H264){ + for(i=0; i<2; i++){ + CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t)) + CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t)) + } + } + pic->qstride= s->mb_stride; } - + //it might be nicer if the application would keep track of these but it would require a API change memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); s->prev_pict_types[0]= s->pict_type; @@ -297,7 +298,13 @@ static void free_picture(MpegEncContext *s, Picture *pic){ av_freep(&pic->mb_cmp_score); av_freep(&pic->mbskip_table); av_freep(&pic->qscale_table); - + av_freep(&pic->mb_type_base); + pic->mb_type= NULL; + for(i=0; i<2; i++){ + av_freep(&pic->motion_val[i]); + av_freep(&pic->ref_index[i]); + } + if(pic->type == FF_BUFFER_TYPE_INTERNAL){ for(i=0; i<4; i++){ av_freep(&pic->base[i]); @@ -317,7 +324,7 @@ static void free_picture(MpegEncContext *s, Picture *pic){ /* init common structure for both encoder and decoder */ int MPV_common_init(MpegEncContext *s) { - int y_size, c_size, yc_size, i; + int y_size, c_size, yc_size, i, mb_array_size, x, y; dsputil_init(&s->dsp, s->avctx); DCT_common_init(s); @@ -326,12 +333,21 @@ int MPV_common_init(MpegEncContext *s) s->mb_width = (s->width + 15) / 16; s->mb_height = (s->height + 15) / 16; + s->mb_stride = s->mb_width + 1; + mb_array_size= s->mb_height * s->mb_stride; /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; s->v_edge_pos= s->mb_height*16; s->mb_num = s->mb_width * s->mb_height; + + s->block_wrap[0]= + s->block_wrap[1]= + s->block_wrap[2]= + s->block_wrap[3]= s->mb_width*2 + 2; + s->block_wrap[4]= + s->block_wrap[5]= s->mb_width + 2; y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); c_size = (s->mb_width + 2) * (s->mb_height + 2); @@ -348,16 +364,30 @@ int MPV_common_init(MpegEncContext *s) s->avctx->coded_frame= (AVFrame*)&s->current_picture; + CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this + for(y=0; y<s->mb_height; y++){ + for(x=0; x<s->mb_width; x++){ + s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride; + } + } + s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed? + if (s->encoding) { - int mv_table_size= (s->mb_width+2)*(s->mb_height+2); + int mv_table_size= s->mb_stride * (s->mb_height+2) + 1; /* Allocate MV tables */ - CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->b_forw_mv_table , mv_table_size * 2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) + s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1; + s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1; + s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1; + s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1; + s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1; + s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1; //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer() CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t)) @@ -374,14 +404,15 @@ int MPV_common_init(MpegEncContext *s) CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int)); } CHECKED_ALLOCZ(s->avctx->stats_out, 256); + + /* Allocate MB type table */ + CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding } - CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(uint8_t)) + CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t)) if (s->out_format == FMT_H263 || s->encoding) { int size; - /* Allocate MB type table */ - CHECKED_ALLOCZ(s->mb_type , s->mb_num * sizeof(uint8_t)) /* MV prediction */ size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); @@ -390,12 +421,9 @@ int MPV_common_init(MpegEncContext *s) if(s->codec_id==CODEC_ID_MPEG4){ /* interlaced direct mode decoding tables */ - CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(int8_t)) + CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t)) + CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t)) } - /* 4mv b frame decoding table */ - //note this is needed for h263 without b frames too (segfault on damaged streams otherwise) - CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(uint8_t)) if (s->out_format == FMT_H263) { /* ac values */ CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16); @@ -409,8 +437,8 @@ int MPV_common_init(MpegEncContext *s) CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE); /* cbp, ac_pred, pred_dir */ - CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(uint8_t)) - CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(uint8_t)) + CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t)) + CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t)) } if (s->h263_pred || s->h263_plus || !s->encoding) { @@ -424,14 +452,14 @@ int MPV_common_init(MpegEncContext *s) } /* which mb is a intra block */ - CHECKED_ALLOCZ(s->mbintra_table, s->mb_num); - memset(s->mbintra_table, 1, s->mb_num); + CHECKED_ALLOCZ(s->mbintra_table, mb_array_size); + memset(s->mbintra_table, 1, mb_array_size); /* default structure is frame */ s->picture_structure = PICT_FRAME; /* init macroblock skip table */ - CHECKED_ALLOCZ(s->mbskip_table, s->mb_num+1); + CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2); //Note the +1 is for a quicker mpeg4 slice_end detection CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE); @@ -455,12 +483,19 @@ void MPV_common_end(MpegEncContext *s) int i; av_freep(&s->mb_type); - av_freep(&s->p_mv_table); - av_freep(&s->b_forw_mv_table); - av_freep(&s->b_back_mv_table); - av_freep(&s->b_bidir_forw_mv_table); - av_freep(&s->b_bidir_back_mv_table); - av_freep(&s->b_direct_mv_table); + av_freep(&s->p_mv_table_base); + av_freep(&s->b_forw_mv_table_base); + av_freep(&s->b_back_mv_table_base); + av_freep(&s->b_bidir_forw_mv_table_base); + av_freep(&s->b_bidir_back_mv_table_base); + av_freep(&s->b_direct_mv_table_base); + s->p_mv_table= NULL; + s->b_forw_mv_table= NULL; + s->b_back_mv_table= NULL; + s->b_bidir_forw_mv_table= NULL; + s->b_bidir_back_mv_table= NULL; + s->b_direct_mv_table= NULL; + av_freep(&s->motion_val); av_freep(&s->dc_val[0]); av_freep(&s->ac_val[0]); @@ -478,12 +513,12 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->tex_pb_buffer); av_freep(&s->pb2_buffer); av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL; - av_freep(&s->co_located_type_table); av_freep(&s->field_mv_table); av_freep(&s->field_select_table); av_freep(&s->avctx->stats_out); av_freep(&s->ac_stats); av_freep(&s->error_status_table); + av_freep(&s->mb_index2xy); for(i=0; i<MAX_PICTURE_COUNT; i++){ free_picture(s, &s->picture[i]); @@ -550,6 +585,31 @@ int MPV_encode_init(AVCodecContext *avctx) s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT); + if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){ + fprintf(stderr, "4MV not supporetd by codec\n"); + return -1; + } + + if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){ + fprintf(stderr, "qpel not supporetd by codec\n"); + return -1; + } + + if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){ + fprintf(stderr, "data partitioning not supporetd by codec\n"); + return -1; + } + + if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO){ + fprintf(stderr, "b frames not supporetd by codec\n"); + return -1; + } + + if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too + fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n"); + return -1; + } + if(s->codec_id==CODEC_ID_MJPEG){ s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x s->inter_quant_bias= 0; @@ -647,6 +707,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 3; + s->flipflop_rounding=1; avctx->delay=0; s->low_delay=1; break; @@ -656,6 +717,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 4; + s->flipflop_rounding=1; avctx->delay=0; s->low_delay=1; break; @@ -665,6 +727,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 5; + s->flipflop_rounding=1; avctx->delay=0; s->low_delay=1; break; @@ -679,8 +742,8 @@ int MPV_encode_init(AVCodecContext *avctx) int i; done=1; - default_mv_penalty= av_mallocz( sizeof(uint16_t)*(MAX_FCODE+1)*(2*MAX_MV+1) ); - memset(default_mv_penalty, 0, sizeof(uint16_t)*(MAX_FCODE+1)*(2*MAX_MV+1)); + default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) ); + memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1)); memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1)); for(i=-16; i<16; i++){ @@ -851,7 +914,7 @@ static int find_unused_picture(MpegEncContext *s, int shared){ } }else{ for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; + if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME } for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0]==NULL) break; @@ -869,7 +932,9 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) AVFrame *pic; s->mb_skiped = 0; - + + assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264); + /* mark&release old frames */ if (s->pict_type != B_TYPE && s->last_picture_ptr) { avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr); @@ -891,7 +956,7 @@ alloc: i= find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; - pic->reference= s->pict_type != B_TYPE; + pic->reference= s->pict_type != B_TYPE ? 3 : 0; if(s->current_picture_ptr) pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1; @@ -901,11 +966,17 @@ alloc: s->current_picture_ptr= &s->picture[i]; } + s->current_picture_ptr->pict_type= s->pict_type; + s->current_picture_ptr->quality= s->qscale; + s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; + + s->current_picture= *s->current_picture_ptr; + + if(s->out_format != FMT_H264){ if (s->pict_type != B_TYPE) { s->last_picture_ptr= s->next_picture_ptr; s->next_picture_ptr= s->current_picture_ptr; } - s->current_picture= *s->current_picture_ptr; if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr; if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr; if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr; @@ -927,6 +998,7 @@ alloc: assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference goto alloc; } + } s->hurry_up= s->avctx->hurry_up; s->error_resilience= avctx->error_resilience; @@ -972,28 +1044,13 @@ void MPV_frame_end(MpegEncContext *s) } assert(i<MAX_PICTURE_COUNT); #endif - s->current_picture_ptr->quality= s->qscale; //FIXME get average of qscale_table - s->current_picture_ptr->pict_type= s->pict_type; - s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; /* release non refernce frames */ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/) s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]); } - if(s->avctx->debug&FF_DEBUG_SKIP){ - int x,y; - for(y=0; y<s->mb_height; y++){ - for(x=0; x<s->mb_width; x++){ - int count= s->mbskip_table[x + y*s->mb_width]; - if(count>9) count=9; - printf(" %1d", count); - } - printf("\n"); - } - printf("pict type: %d\n", s->pict_type); - } - + // clear copies, to avoid confusion #if 0 memset(&s->last_picture, 0, sizeof(Picture)); @@ -1002,6 +1059,82 @@ void MPV_frame_end(MpegEncContext *s) #endif } +/** + * prints debuging info for the given picture. + */ +void ff_print_debug_info(MpegEncContext *s, Picture *pict){ + + if(!pict || !pict->mb_type) return; + + if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){ + int x,y; + + for(y=0; y<s->mb_height; y++){ + for(x=0; x<s->mb_width; x++){ + if(s->avctx->debug&FF_DEBUG_SKIP){ + int count= s->mbskip_table[x + y*s->mb_stride]; + if(count>9) count=9; + printf("%1d", count); + } + if(s->avctx->debug&FF_DEBUG_QP){ + printf("%2d", pict->qscale_table[x + y*s->mb_stride]); + } + if(s->avctx->debug&FF_DEBUG_MB_TYPE){ + int mb_type= pict->mb_type[x + y*s->mb_stride]; + + //Type & MV direction + if(IS_PCM(mb_type)) + printf("P"); + else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type)) + printf("A"); + else if(IS_INTRA4x4(mb_type)) + printf("i"); + else if(IS_INTRA16x16(mb_type)) + printf("I"); + else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)) + printf("d"); + else if(IS_DIRECT(mb_type)) + printf("D"); + else if(IS_GMC(mb_type) && IS_SKIP(mb_type)) + printf("g"); + else if(IS_GMC(mb_type)) + printf("G"); + else if(IS_SKIP(mb_type)) + printf("S"); + else if(!USES_LIST(mb_type, 1)) + printf(">"); + else if(!USES_LIST(mb_type, 0)) + printf("<"); + else{ + assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); + printf("X"); + } + + //segmentation + if(IS_8X8(mb_type)) + printf("+"); + else if(IS_16X8(mb_type)) + printf("-"); + else if(IS_8X16(mb_type)) + printf("¦"); + else if(IS_INTRA(mb_type) || IS_16X16(mb_type)) + printf(" "); + else + printf("?"); + + + if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264) + printf("="); + else + printf(" "); + } +// printf(" "); + } + printf("\n"); + } + } +} + #ifdef CONFIG_ENCODERS static int get_sae(uint8_t *src, int ref, int stride){ @@ -1055,7 +1188,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ i= find_unused_picture(s, 1); pic= (AVFrame*)&s->picture[i]; - pic->reference= 1; + pic->reference= 3; for(i=0; i<4; i++){ pic->data[i]= pic_arg->data[i]; @@ -1066,7 +1199,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ i= find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; - pic->reference= 1; + pic->reference= 3; alloc_picture(s, (Picture*)pic, 0); for(i=0; i<4; i++){ @@ -1211,7 +1344,7 @@ static void select_input_picture(MpegEncContext *s){ } if(s->reordered_input_picture[0]){ - s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE; + s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0; s->new_picture= *s->reordered_input_picture[0]; @@ -1979,7 +2112,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s) memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t)); memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t)); - s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0; + s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0; } /* generic function called after a macroblock has been parsed by the @@ -1995,7 +2128,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s) void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int mb_x, mb_y; - const int mb_xy = s->mb_y * s->mb_width + s->mb_x; + const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; mb_x = s->mb_x; mb_y = s->mb_y; @@ -2021,33 +2154,26 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) //FIXME a lot of thet is only needed for !low_delay const int wrap = s->block_wrap[0]; const int xy = s->block_index[0]; - const int mb_index= s->mb_x + s->mb_y*s->mb_width; - if(s->mv_type == MV_TYPE_8X8){ - s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV; - } else { + if(s->mv_type != MV_TYPE_8X8){ int motion_x, motion_y; if (s->mb_intra) { motion_x = 0; motion_y = 0; - if(s->co_located_type_table) - s->co_located_type_table[mb_index]= 0; } else if (s->mv_type == MV_TYPE_16X16) { motion_x = s->mv[0][0][0]; motion_y = s->mv[0][0][1]; - if(s->co_located_type_table) - s->co_located_type_table[mb_index]= 0; } else /*if (s->mv_type == MV_TYPE_FIELD)*/ { int i; motion_x = s->mv[0][0][0] + s->mv[0][1][0]; motion_y = s->mv[0][0][1] + s->mv[0][1][1]; motion_x = (motion_x>>1) | (motion_x&1); for(i=0; i<2; i++){ - s->field_mv_table[mb_index][i][0]= s->mv[0][i][0]; - s->field_mv_table[mb_index][i][1]= s->mv[0][i][1]; - s->field_select_table[mb_index][i]= s->field_select[0][i]; + s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0]; + s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1]; + s->field_select_table[mb_xy][i]= s->field_select[0][i]; } - s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV; } + /* no update if 8X8 because it has been done during parsing */ s->motion_val[xy][0] = motion_x; s->motion_val[xy][1] = motion_y; @@ -2058,6 +2184,13 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->motion_val[xy + 1 + wrap][0] = motion_x; s->motion_val[xy + 1 + wrap][1] = motion_y; } + + if(s->encoding){ //FIXME encoding MUST be cleaned up + if (s->mv_type == MV_TYPE_8X8) + s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8; + else + s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16; + } } if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc @@ -2383,7 +2516,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) for(i=0; i<6; i++) skip_dct[i]=0; if(s->adaptive_quant){ - s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_width] - s->qscale; + s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale; if(s->out_format==FMT_H263){ if (s->dquant> 2) s->dquant= 2; @@ -2392,10 +2525,10 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) if(s->codec_id==CODEC_ID_MPEG4){ if(!s->mb_intra){ - assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8); - if(s->mv_dir&MV_DIRECT) s->dquant=0; + + assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8); } } s->qscale+= s->dquant; @@ -2534,7 +2667,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); } /* pre quantization */ - if(s->current_picture.mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){ + if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ //FIXME optimize if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1; if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1; @@ -2565,13 +2698,13 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) { float adap_parm; - adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) / - ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0); + adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) / + ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0); printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", - (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P', + (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P', s->qscale, adap_parm, s->qscale*adap_parm, - s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var); + s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var); } #endif /* DCT & quantize */ @@ -2683,7 +2816,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? /* mpeg1 */ - d->mb_incr= s->mb_incr; + d->mb_skip_run= s->mb_skip_run; for(i=0; i<3; i++) d->last_dc[i]= s->last_dc[i]; @@ -2709,7 +2842,7 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext * memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? /* mpeg1 */ - d->mb_incr= s->mb_incr; + d->mb_skip_run= s->mb_skip_run; for(i=0; i<3; i++) d->last_dc[i]= s->last_dc[i]; @@ -2809,13 +2942,6 @@ static void encode_picture(MpegEncContext *s, int picture_number) } s->picture_number = picture_number; - - s->block_wrap[0]= - s->block_wrap[1]= - s->block_wrap[2]= - s->block_wrap[3]= s->mb_width*2 + 2; - s->block_wrap[4]= - s->block_wrap[5]= s->mb_width + 2; /* Reset the average MB variance */ s->current_picture.mb_var_sum = 0; @@ -2833,8 +2959,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration if(s->pict_type==I_TYPE){ - if(s->msmpeg4_version) s->no_rounding=1; - else s->no_rounding=0; + if(s->msmpeg4_version >= 3) s->no_rounding=1; + else s->no_rounding=0; }else if(s->pict_type!=B_TYPE){ if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) s->no_rounding ^= 1; @@ -2884,8 +3010,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) /* I-Frame */ //FIXME do we need to zero them? memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2); - memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_width+2)*(s->mb_height+2)*2); - memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height); + memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2); + memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height); if(!s->fixed_qscale){ /* finding spatial complexity for I-frame rate control */ @@ -2899,8 +3025,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; - s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc; - s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8; + s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc; + s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8; s->current_picture.mb_var_sum += varc; } } @@ -2910,7 +3036,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) if(s->scene_change_score > 0 && s->pict_type == P_TYPE){ s->pict_type= I_TYPE; - memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height); + memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height); //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); } @@ -3022,7 +3148,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->current_picture_ptr->error[i] = 0; } - s->mb_incr = 1; + s->mb_skip_run = 0; s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; s->last_mv[1][0][0] = 0; @@ -3042,7 +3168,6 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->resync_mb_y=0; s->first_slice_line = 1; s->ptr_lastgob = s->pb.buf; - s->ptr_last_mb_line = s->pb.buf; for(mb_y=0; mb_y < s->mb_height; mb_y++) { s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; @@ -3054,8 +3179,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2); s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2); for(mb_x=0; mb_x < s->mb_width; mb_x++) { - int mb_type= s->mb_type[mb_y * s->mb_width + mb_x]; - const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1; + const int xy= mb_y*s->mb_stride + mb_x; + int mb_type= s->mb_type[xy]; // int d; int dmin=10000000; @@ -3077,7 +3202,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) is_gob_start=0; if(s->codec_id==CODEC_ID_MPEG4){ - if(current_packet_size + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size + if(current_packet_size >= s->rtp_payload_size && s->mb_y + s->mb_x>0){ if(s->partitioned_frame){ @@ -3094,8 +3219,15 @@ static void encode_picture(MpegEncContext *s, int picture_number) ff_mpeg4_clean_buffers(s); is_gob_start=1; } + }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){ + if( current_packet_size >= s->rtp_payload_size + && s->mb_y + s->mb_x>0 && s->mb_skip_run==0){ + ff_mpeg1_encode_slice_header(s); + ff_mpeg1_clean_buffers(s); + is_gob_start=1; + } }else{ - if(current_packet_size + s->mb_line_avgsize*s->gob_index >= s->rtp_payload_size + if(current_packet_size >= s->rtp_payload_size && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){ h263_encode_gob_header(s, mb_y); @@ -3201,7 +3333,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) &dmin, &next_block, 0, 0); /* force cleaning of ac/dc pred stuff if needed ... */ if(s->h263_pred || s->h263_aic) - s->mbintra_table[mb_x + mb_y*s->mb_width]=1; + s->mbintra_table[mb_x + mb_y*s->mb_stride]=1; } copy_context_after_encode(s, &best_s, -1); @@ -3225,16 +3357,16 @@ static void encode_picture(MpegEncContext *s, int picture_number) } else { int motion_x, motion_y; int intra_score; - int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_width]; + int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride]; if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){ /* get luma score */ if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){ - intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_width]<<8) - 500; //FIXME dont scale it down so we dont have to fix it + intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it }else{ uint8_t *dest_y; - int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_width]; //FIXME + int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME mean*= 0x01010101; dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16; @@ -3250,8 +3382,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize); /* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, - s->current_picture.mb_var[mb_x + mb_y*s->mb_width], - s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_width]);*/ + s->current_picture.mb_var[mb_x + mb_y*s->mb_stride], + s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/ } /* get chroma score */ @@ -3408,18 +3540,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, w>>1, h>>1, s->uvlinesize); } -//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb)); - } - - - /* Obtain average mb_row size for RTP */ - if (s->rtp_mode) { - if (mb_y==0) - s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line; - else { - s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1; - } - s->ptr_last_mb_line = pbBufPtr(&s->pb); +//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb)); } } emms_c(); @@ -3945,6 +4066,8 @@ char ff_get_pict_type_char(int pict_type){ case P_TYPE: return 'P'; case B_TYPE: return 'B'; case S_TYPE: return 'S'; + case SI_TYPE:return 'i'; + case SP_TYPE:return 'p'; default: return '?'; } } |