diff options
Diffstat (limited to 'src/libffmpeg/libavcodec/mpegvideo.c')
-rw-r--r-- | src/libffmpeg/libavcodec/mpegvideo.c | 323 |
1 files changed, 202 insertions, 121 deletions
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c index 32a92917c..bef088a41 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.c +++ b/src/libffmpeg/libavcodec/mpegvideo.c @@ -283,7 +283,9 @@ static void copy_picture(Picture *dst, Picture *src){ dst->type= FF_BUFFER_TYPE_COPY; } -static void copy_picture_attributes(AVFrame *dst, AVFrame *src){ +static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){ + int i; + dst->pict_type = src->pict_type; dst->quality = src->quality; dst->coded_picture_number = src->coded_picture_number; @@ -292,6 +294,32 @@ static void copy_picture_attributes(AVFrame *dst, AVFrame *src){ dst->pts = src->pts; dst->interlaced_frame = src->interlaced_frame; dst->top_field_first = src->top_field_first; + + if(s->avctx->me_threshold){ + if(!src->motion_val[0]) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n"); + if(!src->mb_type) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n"); + if(!src->ref_index[0]) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n"); + if(src->motion_subsample_log2 != dst->motion_subsample_log2) + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)\n", + src->motion_subsample_log2, dst->motion_subsample_log2); + + memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0])); + + for(i=0; i<2; i++){ + int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1; + int height= ((16*s->mb_height)>>src->motion_subsample_log2); + + if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){ + memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t)); + } + if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){ + memcpy(dst->ref_index[i], src->ref_index[i], s->b8_stride*2*s->mb_height*sizeof(int8_t)); + } + } + } } /** @@ -350,13 +378,14 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ for(i=0; i<2; i++){ CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+2) * sizeof(int16_t)) pic->motion_val[i]= pic->motion_val_base[i]+2; - CHECKED_ALLOCZ(pic->ref_index[i] , b8_array_size * sizeof(uint8_t)) + CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } pic->motion_subsample_log2= 2; }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ for(i=0; i<2; i++){ - CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t)*2) //FIXME + CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t)) pic->motion_val[i]= pic->motion_val_base[i]+2; + CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } pic->motion_subsample_log2= 3; } @@ -510,7 +539,68 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContex #undef COPY } -/* init common structure for both encoder and decoder */ +/** + * sets the given MpegEncContext to common defaults (same for encoding and decoding). + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ +static void MPV_common_defaults(MpegEncContext *s){ + s->y_dc_scale_table= + s->c_dc_scale_table= ff_mpeg1_dc_scale_table; + s->chroma_qscale_table= ff_default_chroma_qscale_table; + s->progressive_frame= 1; + s->progressive_sequence= 1; + s->picture_structure= PICT_FRAME; + + s->coded_picture_number = 0; + s->picture_number = 0; + s->input_picture_number = 0; + + s->picture_in_gop_number = 0; + + s->f_code = 1; + s->b_code = 1; +} + +/** + * sets the given MpegEncContext to defaults for decoding. + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ +void MPV_decode_defaults(MpegEncContext *s){ + MPV_common_defaults(s); +} + +/** + * sets the given MpegEncContext to defaults for encoding. + * the changed fields will not depend upon the prior state of the MpegEncContext. + */ + +#ifdef CONFIG_ENCODERS +void MPV_encode_defaults(MpegEncContext *s){ + static int done=0; + + MPV_common_defaults(s); + + if(!done){ + int i; + done=1; + + default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) ); + memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1)); + memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1)); + + for(i=-16; i<16; i++){ + default_fcode_tab[i + MAX_MV]= 1; + } + } + s->me.mv_penalty= default_mv_penalty; + s->fcode_tab= default_fcode_tab; +} +#endif //CONFIG_ENCODERS + +/** + * init common structure for both encoder and decoder. + * this assumes that some variables like width/height are already set + */ int MPV_common_init(MpegEncContext *s) { int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y; @@ -538,31 +628,14 @@ int MPV_common_init(MpegEncContext *s) s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= - s->block_wrap[3]= s->mb_width*2 + 2; + s->block_wrap[3]= s->b8_stride; s->block_wrap[4]= - s->block_wrap[5]= s->mb_width + 2; - - s->y_dc_scale_table= - s->c_dc_scale_table= ff_mpeg1_dc_scale_table; - s->chroma_qscale_table= ff_default_chroma_qscale_table; - if( s->codec_id != CODEC_ID_MPEG1VIDEO && - s->codec_id != CODEC_ID_MPEG2VIDEO) - { - /* default structure is frame */ - s->progressive_frame= 1; - s->picture_structure= PICT_FRAME; - - s->y_dc_scale_table= - s->c_dc_scale_table= ff_mpeg1_dc_scale_table; - if (!s->encoding) - s->progressive_sequence= 1; - } - s->coded_picture_number = 0; - - y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); - c_size = (s->mb_width + 2) * (s->mb_height + 2); + s->block_wrap[5]= s->mb_stride; + + y_size = s->b8_stride * (2 * s->mb_height + 1); + c_size = s->mb_stride * (s->mb_height + 1); yc_size = y_size + 2 * c_size; - + /* convert fourcc to upper case */ s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF) + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 ) @@ -642,12 +715,14 @@ int MPV_common_init(MpegEncContext *s) } if (s->out_format == FMT_H263) { /* ac values */ - CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16); - s->ac_val[1] = s->ac_val[0] + y_size; + CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16); + s->ac_val[0] = s->ac_val_base + s->b8_stride + 1; + s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1; s->ac_val[2] = s->ac_val[1] + c_size; /* cbp values */ - CHECKED_ALLOCZ(s->coded_block, y_size); + CHECKED_ALLOCZ(s->coded_block_base, y_size); + s->coded_block= s->coded_block_base + s->b8_stride + 1; /* divx501 bitstream reorder buffer */ CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE); @@ -660,20 +735,18 @@ int MPV_common_init(MpegEncContext *s) if (s->h263_pred || s->h263_plus || !s->encoding) { /* dc values */ //MN: we need these for error resilience of intra-frames - CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t)); - s->dc_val[1] = s->dc_val[0] + y_size; + CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t)); + s->dc_val[0] = s->dc_val_base + s->b8_stride + 1; + s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1; s->dc_val[2] = s->dc_val[1] + c_size; for(i=0;i<yc_size;i++) - s->dc_val[0][i] = 1024; + s->dc_val_base[i] = 1024; } /* which mb is a intra block */ CHECKED_ALLOCZ(s->mbintra_table, mb_array_size); memset(s->mbintra_table, 1, mb_array_size); - /* default structure is frame */ - s->picture_structure = PICT_FRAME; - /* init macroblock skip table */ CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2); //Note the +1 is for a quicker mpeg4 slice_end detection @@ -748,9 +821,9 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->p_field_select_table[i]); } - av_freep(&s->dc_val[0]); - av_freep(&s->ac_val[0]); - av_freep(&s->coded_block); + av_freep(&s->dc_val_base); + av_freep(&s->ac_val_base); + av_freep(&s->coded_block_base); av_freep(&s->mbintra_table); av_freep(&s->cbp_table); av_freep(&s->pred_dir_table); @@ -782,9 +855,9 @@ void MPV_common_end(MpegEncContext *s) s->last_picture_ptr= s->next_picture_ptr= s->current_picture_ptr= NULL; + for(i=0; i<3; i++) - if (s->visualization_buffer[i]) - av_free(s->visualization_buffer[i]); + av_freep(&s->visualization_buffer[i]); } #ifdef CONFIG_ENCODERS @@ -795,6 +868,8 @@ int MPV_encode_init(AVCodecContext *avctx) MpegEncContext *s = avctx->priv_data; int i, dummy; int chroma_h_shift, chroma_v_shift; + + MPV_encode_defaults(s); avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME @@ -850,8 +925,15 @@ int MPV_encode_init(AVCodecContext *avctx) if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n"); - } + } + + if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate + && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) + && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){ + av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n"); + } + if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){ av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); @@ -882,7 +964,13 @@ int MPV_encode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); return -1; } - + + if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)) + && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); + return -1; + } + if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n"); return -1; @@ -1081,28 +1169,6 @@ int MPV_encode_init(AVCodecContext *avctx) return -1; } - { /* set up some save defaults, some codecs might override them later */ - static int done=0; - if(!done){ - int i; - done=1; - - default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) ); - memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1)); - memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1)); - - for(i=-16; i<16; i++){ - default_fcode_tab[i + MAX_MV]= 1; - } - } - } - s->me.mv_penalty= default_mv_penalty; - s->fcode_tab= default_fcode_tab; - - /* dont use mv_penalty table for crap MV as it would be confused */ - //FIXME remove after fixing / removing old ME - if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty; - s->encoding = 1; /* init */ @@ -1122,22 +1188,22 @@ int MPV_encode_init(AVCodecContext *avctx) ff_init_me(s); #endif /* #if 0 */ +#ifdef CONFIG_ENCODERS /* xine: do not need this for decode or MPEG-1 encoding modes */ #if 0 -#ifdef CONFIG_ENCODERS #ifdef CONFIG_RISKY if (s->out_format == FMT_H263) h263_encode_init(s); if(s->msmpeg4_version) ff_msmpeg4_encode_init(s); #endif -#endif #endif /* #if 0 */ /* xine: we do want this for MPEG-1 encoding */ if (s->out_format == FMT_MPEG1) ff_mpeg1_encode_init(s); +#endif - /* init default q matrix */ + /* init q matrix */ for(i=0;i<64;i++) { int j= s->dsp.idct_permutation[i]; #ifdef CONFIG_RISKY @@ -1170,14 +1236,7 @@ int MPV_encode_init(AVCodecContext *avctx) if(ff_rate_control_init(s) < 0) return -1; - - s->picture_number = 0; - s->input_picture_number = 0; - s->picture_in_gop_number = 0; - /* motion detector init */ - s->f_code = 1; - s->b_code = 1; - + return 0; } @@ -1321,7 +1380,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); /* mark&release old frames */ - if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) { + if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) { avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr); /* release forgotten pictures */ @@ -1351,7 +1410,7 @@ alloc: pic= (AVFrame*)&s->picture[i]; } - pic->reference= s->pict_type != B_TYPE ? 3 : 0; + pic->reference= s->pict_type != B_TYPE && !s->dropable ? 3 : 0; pic->coded_picture_number= s->coded_picture_number++; @@ -1373,8 +1432,14 @@ alloc: if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ if (s->pict_type != B_TYPE) { s->last_picture_ptr= s->next_picture_ptr; - s->next_picture_ptr= s->current_picture_ptr; + if(!s->dropable) + s->next_picture_ptr= s->current_picture_ptr; } +/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, + s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL, + s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL, + s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL, + s->pict_type, s->dropable);*/ if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr); if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr); @@ -1486,7 +1551,7 @@ void MPV_frame_end(MpegEncContext *s) * @param color color of the arrow */ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ - int t, x, y, f; + int t, x, y, fr, f; sx= clip(sx, 0, w-1); sy= clip(sy, 0, h-1); @@ -1504,8 +1569,10 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h ex-= sx; f= ((ey-sy)<<16)/ex; for(x= 0; x <= ex; x++){ - y= ((x*f) + (1<<15))>>16; - buf[y*stride + x]+= color; + y = (x*f)>>16; + fr= (x*f)&0xFFFF; + buf[ y *stride + x]+= (color*(0x10000-fr))>>16; + buf[(y+1)*stride + x]+= (color* fr )>>16; } }else{ if(sy > ey){ @@ -1517,8 +1584,10 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h if(ey) f= ((ex-sx)<<16)/ey; else f= 0; for(y= 0; y <= ey; y++){ - x= ((y*f) + (1<<15))>>16; - buf[y*stride + x]+= color; + x = (y*f)>>16; + fr= (y*f)&0xFFFF; + buf[y*stride + x ]+= (color*(0x10000-fr))>>16;; + buf[y*stride + x+1]+= (color* fr )>>16;; } } } @@ -1680,12 +1749,13 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ if(!USES_LIST(pict->mb_type[mb_index], direction)) continue; + //FIXME for h264 if(IS_8X8(pict->mb_type[mb_index])){ int i; for(i=0; i<4; i++){ int sx= mb_x*16 + 4 + 8*(i&1); int sy= mb_y*16 + 4 + 8*(i>>1); - int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2); + int xy= mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride; int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; int my= (pict->motion_val[direction][xy][1]>>shift) + sy; draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); @@ -1695,15 +1765,19 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ for(i=0; i<2; i++){ int sx=mb_x*16 + 8; int sy=mb_y*16 + 4 + 8*i; - int xy=1 + mb_x*2 + (mb_y*2 + 1 + i)*(s->mb_width*2 + 2); - int mx=(pict->motion_val[direction][xy][0]>>shift) + sx; - int my=(pict->motion_val[direction][xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + int xy= mb_x*2 + (mb_y*2 + i)*s->b8_stride; + int mx=(pict->motion_val[direction][xy][0]>>shift); + int my=(pict->motion_val[direction][xy][1]>>shift); + + if(IS_INTERLACED(pict->mb_type[mb_index])) + my*=2; + + draw_arrow(ptr, sx, sy, mx+sx, my+sy, s->width, s->height, s->linesize, 100); } }else{ int sx= mb_x*16 + 8; int sy= mb_y*16 + 8; - int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); + int xy= mb_x*2 + mb_y*2*s->b8_stride; int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; int my= (pict->motion_val[direction][xy][1]>>shift) + sy; draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); @@ -1880,7 +1954,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ } } } - copy_picture_attributes(pic, pic_arg); + copy_picture_attributes(s, pic, pic_arg); pic->display_picture_number= s->input_picture_number++; if(pic->pts != AV_NOPTS_VALUE){ @@ -2009,11 +2083,12 @@ static void select_input_picture(MpegEncContext *s){ s->reordered_input_picture[0]->data[i]= NULL; s->reordered_input_picture[0]->type= 0; - copy_picture_attributes((AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); pic->reference = s->reordered_input_picture[0]->reference; alloc_picture(s, pic, 0); + copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); + s->current_picture_ptr= pic; }else{ // input is not a shared pix -> reuse buffer for current_pix @@ -2125,7 +2200,8 @@ int MPV_encode_picture(AVCodecContext *avctx, } /* update mpeg1/2 vbv_delay for CBR */ - if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1){ + if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1 + && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){ int vbv_delay; assert(s->repeat_first_field==0); @@ -2432,9 +2508,17 @@ if(s->quarter_sample) src_y = s->mb_y*(16>>field_based) + (motion_y >> 1); if (s->out_format == FMT_H263) { - uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); - uvsrc_x = src_x>>1; - uvsrc_y = src_y>>1; + if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){ + mx = (motion_x>>1)|(motion_x&1); + my = motion_y >>1; + uvdxy = ((my & 1) << 1) | (mx & 1); + uvsrc_x = s->mb_x* 8 + (mx >> 1); + uvsrc_y = s->mb_y*(8>>field_based) + (my >> 1); + }else{ + uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); + uvsrc_x = src_x>>1; + uvsrc_y = src_y>>1; + } } else { mx = motion_x / 2; my = motion_y / 2; @@ -2736,8 +2820,8 @@ static inline void MPV_motion(MpegEncContext *s, if(s->obmc && s->pict_type != B_TYPE){ int16_t mv_cache[4][4][2]; const int xy= s->mb_x + s->mb_y*s->mb_stride; - const int mot_stride= s->mb_width*2 + 2; - const int mot_xy= 1 + mb_x*2 + (mb_y*2 + 1)*mot_stride; + const int mot_stride= s->b8_stride; + const int mot_xy= mb_x*2 + mb_y*2*mot_stride; assert(!s->mb_skiped); @@ -2993,7 +3077,7 @@ static inline void add_dequant_dct(MpegEncContext *s, */ void ff_clean_intra_table_entries(MpegEncContext *s) { - int wrap = s->block_wrap[0]; + int wrap = s->b8_stride; int xy = s->block_index[0]; s->dc_val[0][xy ] = @@ -3010,15 +3094,15 @@ void ff_clean_intra_table_entries(MpegEncContext *s) s->coded_block[xy + 1 + wrap] = 0; } /* chroma */ - wrap = s->block_wrap[4]; - xy = s->mb_x + 1 + (s->mb_y + 1) * wrap; + wrap = s->mb_stride; + xy = s->mb_x + s->mb_y * wrap; s->dc_val[1][xy] = s->dc_val[2][xy] = 1024; /* ac pred */ memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t)); memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t)); - s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0; + s->mbintra_table[xy]= 0; } /* generic function called after a macroblock has been parsed by the @@ -3338,12 +3422,12 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; - s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2; - s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2; - s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2; - s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2; - s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x; - s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x; + s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; + s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2; + s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2; + s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2; + s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; + s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16; @@ -3392,7 +3476,6 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) int dct_offset = s->linesize*8; //default for progressive frames uint8_t *ptr_y, *ptr_cb, *ptr_cr; int wrap_y, wrap_c; - int emu=0; for(i=0; i<6; i++) skip_dct[i]=0; @@ -4065,8 +4148,6 @@ static int encode_thread(AVCodecContext *c, void *arg){ ff_update_block_index(s); /* write gob / video packet header */ -/* xine: do not need this for decode or MPEG-1 encoding modes */ -#if 0 #ifdef CONFIG_RISKY if(s->rtp_mode){ int current_packet_size, is_gob_start; @@ -4121,19 +4202,25 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->avctx->rtp_callback(s->ptr_lastgob, current_packet_size, 0); switch(s->codec_id){ +/* xine: do not need this for decode or MPEG-1 encoding modes */ +#if 0 case CODEC_ID_MPEG4: ff_mpeg4_encode_video_packet_header(s); ff_mpeg4_clean_buffers(s); break; +#endif /* #if 0 */ case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: ff_mpeg1_encode_slice_header(s); ff_mpeg1_clean_buffers(s); break; +/* xine: do not need this for decode or MPEG-1 encoding modes */ +#if 0 case CODEC_ID_H263: case CODEC_ID_H263P: h263_encode_gob_header(s, mb_y); break; +#endif /* #if 0 */ } if(s->flags&CODEC_FLAG_PASS1){ @@ -4149,8 +4236,6 @@ static int encode_thread(AVCodecContext *c, void *arg){ } } #endif -#endif /* #if 0 */ - if( (s->resync_mb_x == s->mb_x) && s->resync_mb_y+1 == s->mb_y){ @@ -4615,7 +4700,6 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src) static void encode_picture(MpegEncContext *s, int picture_number) { - int mb_x, mb_y; int i, j; int bits; @@ -4651,19 +4735,18 @@ static void encode_picture(MpegEncContext *s, int picture_number) for(i=1; i<s->avctx->thread_count; i++){ ff_update_duplicate_context(s->thread_context[i], s); } - + + ff_init_me(s); + /* Estimate motion for every MB */ if(s->pict_type != I_TYPE){ -/* xine: do not need this for decode or MPEG-1 encoding modes */ -#if 0 - if(s->pict_type != B_TYPE){ + if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){ if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){ s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); } } s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); -#endif /* #if 0 */ }else /* if(s->pict_type == I_TYPE) */{ /* I-Frame */ for(i=0; i<s->mb_stride*s->mb_height; i++) @@ -5194,7 +5277,6 @@ static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise? int prev_run=0; int prev_level=0; int qmul, qadd, start_i, last_non_zero, i, dc; - const int esc_length= s->ac_esc_length; uint8_t * length; uint8_t * last_length; int lambda; @@ -5302,7 +5384,6 @@ STOP_TIMER("init rem[]") #endif for(;;){ int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0); - int nochange_score= best_score; int best_coeff=0; int best_change=0; int run2, best_unquant_change, analyze_gradient; |