diff options
Diffstat (limited to 'src/libffmpeg/libavcodec/mpegvideo.c')
-rw-r--r-- | src/libffmpeg/libavcodec/mpegvideo.c | 1318 |
1 files changed, 837 insertions, 481 deletions
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c index 53534736a..4761b6137 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.c +++ b/src/libffmpeg/libavcodec/mpegvideo.c @@ -18,26 +18,32 @@ * * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at> */ + +#include <ctype.h> #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" +#include "simple_idct.h" #ifdef USE_FASTMEMCPY #include "fastmemcpy.h" #endif +//#undef NDEBUG +//#include <assert.h> + static void encode_picture(MpegEncContext *s, int picture_number); -static void dct_unquantize_mpeg1_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg2_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void dct_unquantize_h263_c(MpegEncContext *s, +static void dct_unquantize_h263_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w); static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c; -static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, +static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h); #define EDGE_WIDTH 16 @@ -63,6 +69,18 @@ static const unsigned short aanscales[64] = { 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 }; +/* Input permutation for the simple_idct_mmx */ +static const UINT8 simple_mmx_permutation[64]={ + 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, + 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, + 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, + 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, + 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, + 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, + 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, + 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F, +}; + static UINT8 h263_chroma_roundtab[16] = { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, }; @@ -70,49 +88,48 @@ static UINT8 h263_chroma_roundtab[16] = { static UINT16 default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1]; static UINT8 default_fcode_tab[MAX_MV*2+1]; -extern UINT8 zigzag_end[64]; - /* default motion estimation */ int motion_estimation_method = ME_EPZS; static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64], - const UINT16 *quant_matrix, int bias) + const UINT16 *quant_matrix, int bias, int qmin, int qmax) { int qscale; - for(qscale=1; qscale<32; qscale++){ + for(qscale=qmin; qscale<=qmax; qscale++){ int i; if (s->fdct == ff_jpeg_fdct_islow) { for(i=0;i<64;i++) { - const int j= block_permute_op(i); + const int j= s->idct_permutation[i]; /* 16 <= qscale * quant_matrix[i] <= 7905 */ /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ - - qmat[qscale][j] = (int)((UINT64_C(1) << QMAT_SHIFT) / + + qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); } } else if (s->fdct == fdct_ifast) { for(i=0;i<64;i++) { - const int j= block_permute_op(i); + const int j= s->idct_permutation[i]; /* 16 <= qscale * quant_matrix[i] <= 7905 */ /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ - - qmat[qscale][j] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / + + qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / (aanscales[i] * qscale * quant_matrix[j])); } } else { for(i=0;i<64;i++) { + const int j= s->idct_permutation[i]; /* We can safely suppose that 16 <= quant_matrix[i] <= 255 So 16 <= qscale * quant_matrix[i] <= 7905 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 */ qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); - qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[block_permute_op(i)]); + qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1; qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]); @@ -120,7 +137,7 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 } } } -// move into common.c perhaps +// move into common.c perhaps #define CHECKED_ALLOCZ(p, size)\ {\ p= av_mallocz(size);\ @@ -130,11 +147,44 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 }\ } -/* init common structure for both encoder and decoder */ -int MPV_common_init(MpegEncContext *s) +void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scantable){ + int i; + int end; + + st->scantable= src_scantable; + + for(i=0; i<64; i++){ + int j; + j = src_scantable[i]; + st->permutated[i] = s->idct_permutation[j]; + } + + end=-1; + for(i=0; i<64; i++){ + int j; + j = st->permutated[i]; + if(j>end) end=j; + st->raster_end[i]= end; + } +} + +/* XXX: those functions should be suppressed ASAP when all IDCTs are + converted */ +static void ff_jref_idct_put(UINT8 *dest, int line_size, DCTELEM *block) { - int c_size, i; - UINT8 *pict; + j_rev_dct (block); + put_pixels_clamped(block, dest, line_size); +} +static void ff_jref_idct_add(UINT8 *dest, int line_size, DCTELEM *block) +{ + j_rev_dct (block); + add_pixels_clamped(block, dest, line_size); +} + +/* init common dct for both encoder and decoder */ +int DCT_common_init(MpegEncContext *s) +{ + int i; s->dct_unquantize_h263 = dct_unquantize_h263_c; s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c; @@ -144,69 +194,141 @@ int MPV_common_init(MpegEncContext *s) if(s->avctx->dct_algo==FF_DCT_FASTINT) s->fdct = fdct_ifast; else - s->fdct = ff_jpeg_fdct_islow; - + s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default + + if(s->avctx->idct_algo==FF_IDCT_INT){ + s->idct_put= ff_jref_idct_put; + s->idct_add= ff_jref_idct_add; + s->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; + }else{ //accurate/default + s->idct_put= simple_idct_put; + s->idct_add= simple_idct_add; + s->idct_permutation_type= FF_NO_IDCT_PERM; + } + #ifdef HAVE_MMX MPV_common_init_mmx(s); #endif #ifdef ARCH_ALPHA MPV_common_init_axp(s); #endif -#if defined(HAVE_MLIB) && !defined(HAVE_MMX) +#ifdef HAVE_MLIB MPV_common_init_mlib(s); #endif +#ifdef HAVE_MMI + MPV_common_init_mmi(s); +#endif +#ifdef ARCH_ARMV4L + MPV_common_init_armv4l(); +#endif + + switch(s->idct_permutation_type){ + case FF_NO_IDCT_PERM: + for(i=0; i<64; i++) + s->idct_permutation[i]= i; + break; + case FF_LIBMPEG2_IDCT_PERM: + for(i=0; i<64; i++) + s->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2); + break; + case FF_SIMPLE_IDCT_PERM: + for(i=0; i<64; i++) + s->idct_permutation[i]= simple_mmx_permutation[i]; + break; + case FF_TRANSPOSE_IDCT_PERM: + for(i=0; i<64; i++) + s->idct_permutation[i]= ((i&7)<<3) | (i>>3); + break; + default: + fprintf(stderr, "Internal error, IDCT permutation not set\n"); + return -1; + } + + + /* load & permutate scantables + note: only wmv uses differnt ones + */ + ff_init_scantable(s, &s->inter_scantable , ff_zigzag_direct); + ff_init_scantable(s, &s->intra_scantable , ff_zigzag_direct); + ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan); + ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan); + + return 0; +} + +/* init common structure for both encoder and decoder */ +int MPV_common_init(MpegEncContext *s) +{ + UINT8 *pict; + int y_size, c_size, yc_size, i; + + DCT_common_init(s); + + s->flags= s->avctx->flags; s->mb_width = (s->width + 15) / 16; s->mb_height = (s->height + 15) / 16; - + + y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); + c_size = (s->mb_width + 2) * (s->mb_height + 2); + yc_size = y_size + 2 * c_size; + /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; s->v_edge_pos= s->mb_height*16; + + /* convert fourcc to upper case */ + s->avctx->fourcc= toupper( s->avctx->fourcc &0xFF) + + (toupper((s->avctx->fourcc>>8 )&0xFF)<<8 ) + + (toupper((s->avctx->fourcc>>16)&0xFF)<<16) + + (toupper((s->avctx->fourcc>>24)&0xFF)<<24); s->mb_num = s->mb_width * s->mb_height; + if(!(s->flags&CODEC_FLAG_DR1)){ s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH; s->uvlinesize = s->mb_width * 8 + EDGE_WIDTH; for(i=0;i<3;i++) { - int w, h, shift, pict_start; + int w, h, shift, pict_start, size; w = s->linesize; h = s->mb_height * 16 + 2 * EDGE_WIDTH; shift = (i == 0) ? 0 : 1; - c_size = (s->linesize>>shift) * (h >> shift); + size = (s->linesize>>shift) * (h >> shift); pict_start = (s->linesize>>shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift); - CHECKED_ALLOCZ(pict, c_size) + CHECKED_ALLOCZ(pict, size) s->last_picture_base[i] = pict; s->last_picture[i] = pict + pict_start; - if(i>0) memset(s->last_picture_base[i], 128, c_size); - - CHECKED_ALLOCZ(pict, c_size) + if(i>0) memset(s->last_picture_base[i], 128, size); + + CHECKED_ALLOCZ(pict, size) s->next_picture_base[i] = pict; s->next_picture[i] = pict + pict_start; - if(i>0) memset(s->next_picture_base[i], 128, c_size); - + if(i>0) memset(s->next_picture_base[i], 128, size); + if (s->has_b_frames || s->codec_id==CODEC_ID_MPEG4) { - /* Note the MPEG4 stuff is here cuz of buggy encoders which dont set the low_delay flag but + /* Note the MPEG4 stuff is here cuz of buggy encoders which dont set the low_delay flag but do low-delay encoding, so we cant allways distinguish b-frame containing streams from low_delay streams */ - CHECKED_ALLOCZ(pict, c_size) + CHECKED_ALLOCZ(pict, size) s->aux_picture_base[i] = pict; s->aux_picture[i] = pict + pict_start; - if(i>0) memset(s->aux_picture_base[i], 128, c_size); + if(i>0) memset(s->aux_picture_base[i], 128, size); } } s->ip_buffer_count= 2; } - + CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance - + if (s->encoding) { int j; int mv_table_size= (s->mb_width+2)*(s->mb_height+2); - + CHECKED_ALLOCZ(s->mb_var , s->mb_num * sizeof(INT16)) CHECKED_ALLOCZ(s->mc_mb_var, s->mb_num * sizeof(INT16)) + CHECKED_ALLOCZ(s->mb_mean , s->mb_num * sizeof(INT8)) /* Allocate MV tables */ CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(INT16)) @@ -219,7 +341,7 @@ int MPV_common_init(MpegEncContext *s) CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16)) CHECKED_ALLOCZ(s->me_scratchpad, s->linesize*16*3*sizeof(uint8_t)) - + CHECKED_ALLOCZ(s->me_map , ME_MAP_SIZE*sizeof(uint32_t)) CHECKED_ALLOCZ(s->me_score_map, ME_MAP_SIZE*sizeof(uint16_t)) @@ -227,14 +349,14 @@ int MPV_common_init(MpegEncContext *s) for(j=0; j<REORDER_BUFFER_SIZE; j++){ int i; for(i=0;i<3;i++) { - int w, h, shift; + int w, h, shift, size; w = s->linesize; h = s->mb_height * 16; shift = (i == 0) ? 0 : 1; - c_size = (w >> shift) * (h >> shift); + size = (w >> shift) * (h >> shift); - CHECKED_ALLOCZ(pict, c_size); + CHECKED_ALLOCZ(pict, size); s->picture_buffer[j][i] = pict; } } @@ -244,13 +366,15 @@ int MPV_common_init(MpegEncContext *s) CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE); CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE); } - + if(s->msmpeg4_version){ CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int)); } CHECKED_ALLOCZ(s->avctx->stats_out, 256); } - + + CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(UINT8)) + if (s->out_format == FMT_H263 || s->encoding) { int size; /* Allocate MB type table */ @@ -262,53 +386,53 @@ int MPV_common_init(MpegEncContext *s) } if(s->codec_id==CODEC_ID_MPEG4){ - /* 4mv and interlaced direct mode decoding tables */ - CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8)) + /* interlaced direct mode decoding tables */ CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16)) CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8)) } - - if (s->h263_pred || s->h263_plus) { - int y_size, c_size, i, size; - - /* dc values */ - - y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); - c_size = (s->mb_width + 2) * (s->mb_height + 2); - size = y_size + 2 * c_size; - CHECKED_ALLOCZ(s->dc_val[0], size * sizeof(INT16)); - s->dc_val[1] = s->dc_val[0] + y_size; - s->dc_val[2] = s->dc_val[1] + c_size; - for(i=0;i<size;i++) - s->dc_val[0][i] = 1024; - + /* 4mv b frame decoding table */ + //note this is needed for h263 without b frames too (segfault on damaged streams otherwise) + CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8)) + if (s->out_format == FMT_H263) { /* ac values */ - CHECKED_ALLOCZ(s->ac_val[0], size * sizeof(INT16) * 16); + CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(INT16) * 16); s->ac_val[1] = s->ac_val[0] + y_size; s->ac_val[2] = s->ac_val[1] + c_size; - + /* cbp values */ CHECKED_ALLOCZ(s->coded_block, y_size); - + /* divx501 bitstream reorder buffer */ CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE); - + /* cbp, ac_pred, pred_dir */ CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(UINT8)) CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8)) } - CHECKED_ALLOCZ(s->qscale_table , s->mb_num * sizeof(UINT8)) + + if (s->h263_pred || s->h263_plus || !s->encoding) { + /* dc values */ + //MN: we need these for error resilience of intra-frames + CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(INT16)); + s->dc_val[1] = s->dc_val[0] + y_size; + s->dc_val[2] = s->dc_val[1] + c_size; + for(i=0;i<yc_size;i++) + s->dc_val[0][i] = 1024; + } + CHECKED_ALLOCZ(s->qscale_table , s->mb_num * sizeof(UINT8)) + /* which mb is a intra block */ CHECKED_ALLOCZ(s->mbintra_table, s->mb_num); memset(s->mbintra_table, 1, s->mb_num); - + /* default structure is frame */ s->picture_structure = PICT_FRAME; - + /* init macroblock skip table */ - CHECKED_ALLOCZ(s->mbskip_table, s->mb_num); - + CHECKED_ALLOCZ(s->mbskip_table, s->mb_num+1); + //Note the +1 is for a quicker mpeg4 slice_end detection + s->block= s->blocks[0]; s->context_initialized = 1; @@ -329,6 +453,7 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->mb_type); av_freep(&s->mb_var); av_freep(&s->mc_mb_var); + av_freep(&s->mb_mean); av_freep(&s->p_mv_table); av_freep(&s->b_forw_mv_table); av_freep(&s->b_back_mv_table); @@ -348,7 +473,7 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->me_scratchpad); av_freep(&s->me_map); av_freep(&s->me_score_map); - + av_freep(&s->mbskip_table); av_freep(&s->bitstream_buffer); av_freep(&s->tex_pb_buffer); @@ -359,7 +484,8 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->field_select_table); av_freep(&s->avctx->stats_out); av_freep(&s->ac_stats); - + av_freep(&s->error_status_table); + for(i=0;i<3;i++) { int j; if(!(s->flags&CODEC_FLAG_DR1)){ @@ -441,6 +567,15 @@ int MPV_encode_init(AVCodecContext *avctx) /* Fixed QSCALE */ s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE); + + s->adaptive_quant= ( s->avctx->lumi_masking + || s->avctx->dark_masking + || s->avctx->temporal_cplx_masking + || s->avctx->spatial_cplx_masking + || s->avctx->p_masking) + && !s->fixed_qscale; + + s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT); switch(avctx->codec->id) { case CODEC_ID_MPEG1VIDEO: @@ -454,10 +589,10 @@ int MPV_encode_init(AVCodecContext *avctx) s->mjpeg_data_only_frames = 0; /* write all the needed headers */ s->mjpeg_vsample[0] = 2; /* set up default sampling factors */ s->mjpeg_vsample[1] = 1; /* the only currently supported values */ - s->mjpeg_vsample[2] = 1; + s->mjpeg_vsample[2] = 1; s->mjpeg_hsample[0] = 2; - s->mjpeg_hsample[1] = 1; - s->mjpeg_hsample[2] = 1; + s->mjpeg_hsample[1] = 1; + s->mjpeg_hsample[2] = 1; if (mjpeg_init(s) < 0) return -1; avctx->delay=0; @@ -472,12 +607,10 @@ int MPV_encode_init(AVCodecContext *avctx) break; case CODEC_ID_H263P: s->out_format = FMT_H263; - s->rtp_mode = 1; - s->rtp_payload_size = 1200; s->h263_plus = 1; s->unrestricted_mv = 1; s->h263_aic = 1; - + /* These are just to be sure */ s->umvplus = 0; s->umvplus_dec = 0; @@ -493,7 +626,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->h263_pred = 1; s->unrestricted_mv = 1; s->has_b_frames= s->max_b_frames ? 1 : 0; - s->low_delay=0; + s->low_delay= !s->has_b_frames; avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); break; case CODEC_ID_MSMPEG4V1: @@ -539,7 +672,7 @@ int MPV_encode_init(AVCodecContext *avctx) default: return -1; } - + { /* set up some save defaults, some codecs might override them later */ static int done=0; if(!done){ @@ -557,14 +690,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->fcode_tab= default_fcode_tab; s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; - - if (s->out_format == FMT_H263) - h263_encode_init(s); - else if (s->out_format == FMT_MPEG1) - ff_mpeg1_encode_init(s); - if(s->msmpeg4_version) - ff_msmpeg4_encode_init(s); - + /* dont use mv_penalty table for crap MV as it would be confused */ if (s->me_method < ME_EPZS) s->mv_penalty = default_mv_penalty; @@ -573,28 +699,36 @@ int MPV_encode_init(AVCodecContext *avctx) /* init */ if (MPV_common_init(s) < 0) return -1; + + if (s->out_format == FMT_H263) + h263_encode_init(s); + else if (s->out_format == FMT_MPEG1) + ff_mpeg1_encode_init(s); + if(s->msmpeg4_version) + ff_msmpeg4_encode_init(s); /* init default q matrix */ for(i=0;i<64;i++) { + int j= s->idct_permutation[i]; if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ - s->intra_matrix[i] = ff_mpeg4_default_intra_matrix[i]; - s->inter_matrix[i] = ff_mpeg4_default_non_intra_matrix[i]; + s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; + s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; }else if(s->out_format == FMT_H263){ - s->intra_matrix[i] = - s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i]; + s->intra_matrix[j] = + s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; }else{ /* mpeg1 */ - s->intra_matrix[i] = ff_mpeg1_default_intra_matrix[i]; - s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i]; + s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; + s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; } } /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ if (s->out_format != FMT_MJPEG) { - convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, - s->intra_matrix, s->intra_quant_bias); - convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, - s->inter_matrix, s->inter_quant_bias); + convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, + s->intra_matrix, s->intra_quant_bias, 1, 31); + convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, + s->inter_matrix, s->inter_quant_bias, 1, 31); } if(ff_rate_control_init(s) < 0) @@ -623,7 +757,7 @@ int MPV_encode_end(AVCodecContext *avctx) MPV_common_end(s); if (s->out_format == FMT_MJPEG) mjpeg_close(s); - + return 0; } @@ -657,29 +791,31 @@ static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w) } /* generic function for encode/decode called before a frame is coded/decoded */ -void MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) +int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i; UINT8 *tmp; s->mb_skiped = 0; - s->decoding_error=0; avctx->mbskip_table= s->mbskip_table; if(avctx->flags&CODEC_FLAG_DR1){ - avctx->get_buffer_callback(avctx, s->width, s->height, s->pict_type); + if(avctx->get_buffer_callback(avctx, s->width, s->height, s->pict_type) < 0){ + fprintf(stderr, "get_buffer() failed\n"); + return -1; + } s->linesize = avctx->dr_stride; s->uvlinesize= avctx->dr_uvstride; s->ip_buffer_count= avctx->dr_ip_buffer_count; } avctx->dr_ip_buffer_count= s->ip_buffer_count; - + if (s->pict_type == B_TYPE) { for(i=0;i<3;i++) { if(avctx->flags&CODEC_FLAG_DR1) s->aux_picture[i]= avctx->dr_buffer[i]; - + //FIXME the following should never be needed, the decoder should drop b frames if no reference is available if(s->next_picture[i]==NULL) s->next_picture[i]= s->aux_picture[i]; @@ -719,8 +855,10 @@ void MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->dct_unquantize = s->dct_unquantize_mpeg2; else s->dct_unquantize = s->dct_unquantize_h263; - }else + }else s->dct_unquantize = s->dct_unquantize_mpeg1; + + return 0; } /* generic function for encode/decode called after a frame has been coded/decoded */ @@ -736,7 +874,7 @@ void MPV_frame_end(MpegEncContext *s) draw_edges(s->current_picture[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2); } emms_c(); - + s->last_pict_type = s->pict_type; if(s->pict_type!=B_TYPE){ s->last_non_b_pict_type= s->pict_type; @@ -749,7 +887,7 @@ void MPV_frame_end(MpegEncContext *s) void reorder_input(MpegEncContext *s, AVPicture *pict) { int i, j, index; - + if(s->max_b_frames > FF_MAX_B_FRAMES) s->max_b_frames= FF_MAX_B_FRAMES; // delay= s->max_b_frames+1; (or 0 if no b frames cuz decoder diff) @@ -761,17 +899,17 @@ void reorder_input(MpegEncContext *s, AVPicture *pict) s->coded_order[j].pict_type=0; switch(s->input_pict_type){ - default: + default: case I_TYPE: case S_TYPE: case P_TYPE: index= s->max_b_frames - s->b_frames_since_non_b; s->b_frames_since_non_b=0; - break; + break; case B_TYPE: index= s->max_b_frames + 1; s->b_frames_since_non_b++; - break; + break; } //printf("index:%d type:%d strides: %d %d\n", index, s->input_pict_type, pict->linesize[0], s->linesize); if( (index==0 || (s->flags&CODEC_FLAG_INPUT_PRESERVED)) @@ -861,9 +999,9 @@ int MPV_encode_picture(AVCodecContext *avctx, if(s->input_pict_type==I_TYPE) s->input_picture_in_gop_number=0; - + reorder_input(s, pict); - + /* output? */ if(s->coded_order[0].picture[0]){ @@ -877,7 +1015,7 @@ int MPV_encode_picture(AVCodecContext *avctx, MPV_frame_start(s, avctx); encode_picture(s, s->picture_number); - + avctx->real_pict_num = s->picture_number; avctx->header_bits = s->header_bits; avctx->mv_bits = s->mv_bits; @@ -893,11 +1031,12 @@ int MPV_encode_picture(AVCodecContext *avctx, if (s->out_format == FMT_MJPEG) mjpeg_picture_trailer(s); - avctx->quality = s->qscale; - + if(!s->fixed_qscale) + avctx->quality = s->qscale; + if(s->flags&CODEC_FLAG_PASS1) ff_write_pass1_stats(s); - + } s->input_picture_number++; @@ -905,10 +1044,10 @@ int MPV_encode_picture(AVCodecContext *avctx, flush_put_bits(&s->pb); s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; - + s->total_bits += s->frame_bits; avctx->frame_bits = s->frame_bits; -//printf("fcode: %d, type: %d, head: %d, mv: %d, misc: %d, frame: %d, itex: %d, ptex: %d\n", +//printf("fcode: %d, type: %d, head: %d, mv: %d, misc: %d, frame: %d, itex: %d, ptex: %d\n", //s->f_code, avctx->key_frame, s->header_bits, s->mv_bits, s->misc_bits, s->frame_bits, s->i_tex_bits, s->p_tex_bits); #if 0 //dump some stats to stats.txt for testing/debuging if(s->max_b_frames==0) @@ -934,15 +1073,13 @@ if(s->max_b_frames==0) static inline void gmc1_motion(MpegEncContext *s, UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, int dest_offset, - UINT8 **ref_picture, int src_offset, - int h) + UINT8 **ref_picture, int src_offset) { UINT8 *ptr; int offset, src_x, src_y, linesize, uvlinesize; int motion_x, motion_y; int emu=0; - if(s->real_sprite_warping_points>1) printf("more than 1 warp point isnt supported\n"); motion_x= s->sprite_offset[0][0]; motion_y= s->sprite_offset[0][1]; src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1)); @@ -958,19 +1095,34 @@ static inline void gmc1_motion(MpegEncContext *s, linesize = s->linesize; uvlinesize = s->uvlinesize; + ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset; dest_y+=dest_offset; if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&15) + 16 > s->h_edge_pos - || src_y + (motion_y&15) + h > s->v_edge_pos){ - emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, s->v_edge_pos); + || src_y + (motion_y&15) + 16 > s->v_edge_pos){ + emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer; emu=1; } } - gmc1(dest_y , ptr , linesize, h, motion_x&15, motion_y&15, s->no_rounding); - gmc1(dest_y+8, ptr+8, linesize, h, motion_x&15, motion_y&15, s->no_rounding); + + if((motion_x|motion_y)&7){ + ff_gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); + ff_gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding); + }else{ + int dxy; + + dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2); + if (s->no_rounding){ + put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); + }else{ + put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16); + } + } + + if(s->flags&CODEC_FLAG_GRAY) return; motion_x= s->sprite_offset[1][0]; motion_y= s->sprite_offset[1][1]; @@ -988,27 +1140,91 @@ static inline void gmc1_motion(MpegEncContext *s, offset = (src_y * uvlinesize) + src_x + (src_offset>>1); ptr = ref_picture[1] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } - gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding); - + ff_gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); + ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } - gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding); - + ff_gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); + return; } -static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, +static inline void gmc_motion(MpegEncContext *s, + UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, + int dest_offset, + UINT8 **ref_picture, int src_offset) +{ + UINT8 *ptr; + int linesize, uvlinesize; + const int a= s->sprite_warping_accuracy; + int ox, oy; + + linesize = s->linesize; + uvlinesize = s->uvlinesize; + + ptr = ref_picture[0] + src_offset; + + dest_y+=dest_offset; + + ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16; + oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16; + + ff_gmc(dest_y, ptr, linesize, 16, + ox, + oy, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos, s->v_edge_pos); + ff_gmc(dest_y+8, ptr, linesize, 16, + ox + s->sprite_delta[0][0]*8, + oy + s->sprite_delta[1][0]*8, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos, s->v_edge_pos); + + if(s->flags&CODEC_FLAG_GRAY) return; + + + dest_cb+=dest_offset>>1; + dest_cr+=dest_offset>>1; + + ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8; + oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8; + + ptr = ref_picture[1] + (src_offset>>1); + ff_gmc(dest_cb, ptr, uvlinesize, 8, + ox, + oy, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos>>1, s->v_edge_pos>>1); + + ptr = ref_picture[2] + (src_offset>>1); + ff_gmc(dest_cr, ptr, uvlinesize, 8, + ox, + oy, + s->sprite_delta[0][0], s->sprite_delta[0][1], + s->sprite_delta[1][0], s->sprite_delta[1][1], + a+1, (1<<(2*a+1)) - s->no_rounding, + s->h_edge_pos>>1, s->v_edge_pos>>1); +} + + +static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h){ int x, y; int start_y, start_x, end_y, end_x; UINT8 *buf= s->edge_emu_buffer; - + if(src_y>= h){ src+= (h-1-src_y)*linesize; src_y=h-1; @@ -1049,13 +1265,13 @@ static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int bl buf[x + y*linesize]= buf[x + (end_y-1)*linesize]; } } - + for(y=0; y<block_h; y++){ //left for(x=0; x<start_x; x++){ buf[x + y*linesize]= buf[start_x + y*linesize]; } - + //right for(x=end_x; x<block_w; x++){ buf[x + y*linesize]= buf[end_x - 1 + y*linesize]; @@ -1075,7 +1291,7 @@ static inline void mpeg_motion(MpegEncContext *s, UINT8 *ptr; int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize; int emu=0; -#if 0 +#if 0 if(s->quarter_sample) { motion_x>>=1; @@ -1085,7 +1301,7 @@ if(s->quarter_sample) dxy = ((motion_y & 1) << 1) | (motion_x & 1); src_x = s->mb_x * 16 + (motion_x >> 1); src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1); - + /* WARNING: do no forget half pels */ height = s->height >> field_based; v_edge_pos = s->v_edge_pos >> field_based; @@ -1103,8 +1319,9 @@ if(s->quarter_sample) if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos || src_y + (motion_y&1) + h > v_edge_pos){ - emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, v_edge_pos); - ptr= s->edge_emu_buffer; + emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, + src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos); + ptr= s->edge_emu_buffer + src_offset; emu=1; } } @@ -1127,7 +1344,7 @@ if(s->quarter_sample) mx >>= 1; my >>= 1; } - + src_x = s->mb_x * 8 + mx; src_y = s->mb_y * (8 >> field_based) + my; src_x = clip(src_x, -8, s->width >> 1); @@ -1139,15 +1356,17 @@ if(s->quarter_sample) offset = (src_y * uvlinesize) + src_x + (src_offset >> 1); ptr = ref_picture[1] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1); - ptr= s->edge_emu_buffer; + emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, + src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer + (src_offset >> 1); } pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1); ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1); - ptr= s->edge_emu_buffer; + emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, + src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer + (src_offset >> 1); } pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1); } @@ -1181,12 +1400,13 @@ static inline void qpel_motion(MpegEncContext *s, ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset; dest_y += dest_offset; //printf("%d %d %d\n", src_x, src_y, dxy); - + if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos || src_y + (motion_y&3) + h > v_edge_pos){ - emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, v_edge_pos); - ptr= s->edge_emu_buffer; + emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, + src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos); + ptr= s->edge_emu_buffer + src_offset; emu=1; } } @@ -1204,7 +1424,7 @@ static inline void qpel_motion(MpegEncContext *s, if(field_based){ mx= motion_x/2; my= motion_y>>1; - }else if(s->divx_version){ + }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){ mx= (motion_x>>1)|(motion_x&1); my= (motion_y>>1)|(motion_y&1); }else{ @@ -1229,23 +1449,25 @@ static inline void qpel_motion(MpegEncContext *s, offset = (src_y * uvlinesize) + src_x + (src_offset >> 1); ptr = ref_picture[1] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1); - ptr= s->edge_emu_buffer; + emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, + src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer + (src_offset >> 1); } pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1); - + ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1); - ptr= s->edge_emu_buffer; + emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, + src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer + (src_offset >> 1); } pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1); } -static inline void MPV_motion(MpegEncContext *s, +static inline void MPV_motion(MpegEncContext *s, UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, - int dir, UINT8 **ref_picture, + int dir, UINT8 **ref_picture, op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16]) { int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y; @@ -1259,9 +1481,13 @@ static inline void MPV_motion(MpegEncContext *s, switch(s->mv_type) { case MV_TYPE_16X16: if(s->mcsel){ - gmc1_motion(s, dest_y, dest_cb, dest_cr, 0, - ref_picture, 0, - 16); + if(s->real_sprite_warping_points==1){ + gmc1_motion(s, dest_y, dest_cb, dest_cr, 0, + ref_picture, 0); + }else{ + gmc_motion(s, dest_y, dest_cb, dest_cr, 0, + ref_picture, 0); + } }else if(s->quarter_sample){ qpel_motion(s, dest_y, dest_cb, dest_cr, 0, ref_picture, 0, @@ -1272,7 +1498,7 @@ static inline void MPV_motion(MpegEncContext *s, ref_picture, 0, 0, pix_op, s->mv[dir][0][0], s->mv[dir][0][1], 16); - } + } break; case MV_TYPE_8X8: mx = 0; @@ -1285,7 +1511,7 @@ static inline void MPV_motion(MpegEncContext *s, dxy = ((motion_y & 3) << 2) | (motion_x & 3); src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8; src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8; - + /* WARNING: do no forget half pels */ src_x = clip(src_x, -16, s->width); if (src_x == s->width) @@ -1293,7 +1519,7 @@ static inline void MPV_motion(MpegEncContext *s, src_y = clip(src_y, -16, s->height); if (src_y == s->height) dxy &= ~12; - + ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos @@ -1316,7 +1542,7 @@ static inline void MPV_motion(MpegEncContext *s, dxy = ((motion_y & 1) << 1) | (motion_x & 1); src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8; src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8; - + /* WARNING: do no forget half pels */ src_x = clip(src_x, -16, s->width); if (src_x == s->width) @@ -1324,7 +1550,7 @@ static inline void MPV_motion(MpegEncContext *s, src_y = clip(src_y, -16, s->height); if (src_y == s->height) dxy &= ~2; - + ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos @@ -1370,7 +1596,7 @@ static inline void MPV_motion(MpegEncContext *s, src_y = clip(src_y, -8, s->height/2); if (src_y == s->height/2) dxy &= ~2; - + offset = (src_y * (s->uvlinesize)) + src_x; ptr = ref_picture[1] + offset; if(s->flags&CODEC_FLAG_EMU_EDGE){ @@ -1404,7 +1630,7 @@ static inline void MPV_motion(MpegEncContext *s, 1, pix_op, qpix_op, s->mv[dir][1][0], s->mv[dir][1][1], 8); }else{ - /* top field */ + /* top field */ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, ref_picture, s->field_select[dir][0] ? s->linesize : 0, 1, pix_op, @@ -1416,7 +1642,7 @@ static inline void MPV_motion(MpegEncContext *s, s->mv[dir][1][0], s->mv[dir][1][1], 8); } } else { - + } break; @@ -1425,30 +1651,29 @@ static inline void MPV_motion(MpegEncContext *s, /* put block[] to dest[] */ -static inline void put_dct(MpegEncContext *s, +static inline void put_dct(MpegEncContext *s, DCTELEM *block, int i, UINT8 *dest, int line_size) { - if (!s->mpeg2) - s->dct_unquantize(s, block, i, s->qscale); - ff_idct_put (dest, line_size, block); + s->dct_unquantize(s, block, i, s->qscale); + s->idct_put (dest, line_size, block); } /* add block[] to dest[] */ -static inline void add_dct(MpegEncContext *s, +static inline void add_dct(MpegEncContext *s, DCTELEM *block, int i, UINT8 *dest, int line_size) { if (s->block_last_index[i] >= 0) { - ff_idct_add (dest, line_size, block); + s->idct_add (dest, line_size, block); } } -static inline void add_dequant_dct(MpegEncContext *s, +static inline void add_dequant_dct(MpegEncContext *s, DCTELEM *block, int i, UINT8 *dest, int line_size) { if (s->block_last_index[i] >= 0) { s->dct_unquantize(s, block, i, s->qscale); - ff_idct_add (dest, line_size, block); + s->idct_add (dest, line_size, block); } } @@ -1459,9 +1684,9 @@ void ff_clean_intra_table_entries(MpegEncContext *s) { int wrap = s->block_wrap[0]; int xy = s->block_index[0]; - - s->dc_val[0][xy ] = - s->dc_val[0][xy + 1 ] = + + s->dc_val[0][xy ] = + s->dc_val[0][xy + 1 ] = s->dc_val[0][xy + wrap] = s->dc_val[0][xy + 1 + wrap] = 1024; /* ac pred */ @@ -1481,7 +1706,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s) /* ac pred */ memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16)); memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16)); - + s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0; } @@ -1529,7 +1754,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */ if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here - + const int wrap = s->block_wrap[0]; const int xy = s->block_index[0]; const int mb_index= s->mb_x + s->mb_y*s->mb_width; @@ -1570,14 +1795,14 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->motion_val[xy + 1 + wrap][1] = motion_y; } } - + if (!(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { UINT8 *dest_y, *dest_cb, *dest_cr; int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; - /* avoid copy if macroblock skipped in last frame too + /* avoid copy if macroblock skipped in last frame too dont touch it for B-frames as they need the skip info from the next p-frame */ if (s->pict_type != B_TYPE) { UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy]; @@ -1587,18 +1812,24 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) (*mbskip_ptr) ++; /* indicate that this time we skiped it */ if(*mbskip_ptr >99) *mbskip_ptr= 99; - /* if previous was skipped too, then nothing to do ! + /* if previous was skipped too, then nothing to do ! skip only during decoding as we might trash the buffers during encoding a bit */ - if (*mbskip_ptr >= s->ip_buffer_count && !s->encoding) + if (*mbskip_ptr >= s->ip_buffer_count && !s->encoding) goto the_end; } else { *mbskip_ptr = 0; /* not skipped */ } } - dest_y = s->current_picture [0] + (mb_y * 16* s->linesize ) + mb_x * 16; - dest_cb = s->current_picture[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; - dest_cr = s->current_picture[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; + if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){ + dest_y = s->current_picture [0] + mb_x * 16; + dest_cb = s->current_picture[1] + mb_x * 8; + dest_cr = s->current_picture[2] + mb_x * 8; + }else{ + dest_y = s->current_picture [0] + (mb_y * 16* s->linesize ) + mb_x * 16; + dest_cb = s->current_picture[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; + dest_cr = s->current_picture[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; + } if (s->interlaced_dct) { dct_linesize = s->linesize * 2; @@ -1612,7 +1843,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) /* motion handling */ /* decoding or more than one mb_type (MC was allready done otherwise) */ if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){ - if ((!s->no_rounding) || s->pict_type==B_TYPE){ + if ((!s->no_rounding) || s->pict_type==B_TYPE){ op_pix = put_pixels_tab; op_qpix= put_qpel_pixels_tab; }else{ @@ -1634,7 +1865,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) if(s->hurry_up>1) goto the_end; /* add dct residue */ - if(s->encoding || !(s->mpeg2 || s->h263_msmpeg4 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){ + if(s->encoding || !( s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO + || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){ add_dequant_dct(s, block[0], 0, dest_y, dct_linesize); add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize); add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); @@ -1657,14 +1889,26 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) } } else { /* dct only in intra block */ - put_dct(s, block[0], 0, dest_y, dct_linesize); - put_dct(s, block[1], 1, dest_y + 8, dct_linesize); - put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); - put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); - - if(!(s->flags&CODEC_FLAG_GRAY)){ - put_dct(s, block[4], 4, dest_cb, s->uvlinesize); - put_dct(s, block[5], 5, dest_cr, s->uvlinesize); + if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){ + put_dct(s, block[0], 0, dest_y, dct_linesize); + put_dct(s, block[1], 1, dest_y + 8, dct_linesize); + put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); + put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); + + if(!(s->flags&CODEC_FLAG_GRAY)){ + put_dct(s, block[4], 4, dest_cb, s->uvlinesize); + put_dct(s, block[5], 5, dest_cr, s->uvlinesize); + } + }else{ + s->idct_put(dest_y , dct_linesize, block[0]); + s->idct_put(dest_y + 8, dct_linesize, block[1]); + s->idct_put(dest_y + dct_offset , dct_linesize, block[2]); + s->idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]); + + if(!(s->flags&CODEC_FLAG_GRAY)){ + s->idct_put(dest_cb, s->uvlinesize, block[4]); + s->idct_put(dest_cr, s->uvlinesize, block[5]); + } } } } @@ -1700,7 +1944,7 @@ static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int th if(last_index<=skip_dc - 1) return; for(i=0; i<=last_index; i++){ - const int j = zigzag_direct[i]; + const int j = s->intra_scantable.permutated[i]; const int level = ABS(block[j]); if(level==1){ if(skip_dc && i==0) continue; @@ -1714,7 +1958,7 @@ static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int th } if(score >= threshold) return; for(i=skip_dc; i<=last_index; i++){ - const int j = zigzag_direct[i]; + const int j = s->intra_scantable.permutated[i]; block[j]=0; } if(block[0]) s->block_last_index[n]= 0; @@ -1726,69 +1970,267 @@ static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index int i; const int maxlevel= s->max_qcoeff; const int minlevel= s->min_qcoeff; - - for(i=0;i<=last_index; i++){ - const int j = zigzag_direct[i]; + + if(s->mb_intra){ + i=1; //skip clipping of intra dc + }else + i=0; + + for(;i<=last_index; i++){ + const int j= s->intra_scantable.permutated[i]; int level = block[j]; - + if (level>maxlevel) level=maxlevel; else if(level<minlevel) level=minlevel; block[j]= level; } } +static inline void requantize_coeffs(MpegEncContext *s, DCTELEM block[64], int oldq, int newq, int n) +{ + int i; + + if(s->mb_intra){ + i=1; //skip clipping of intra dc + //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...) + }else + i=0; + + for(;i<=s->block_last_index[n]; i++){ + const int j = s->intra_scantable.permutated[i]; + int level = block[j]; + + block[j]= ROUNDED_DIV(level*oldq, newq); + } + + for(i=s->block_last_index[n]; i>=0; i--){ + const int j = s->intra_scantable.permutated[i]; + if(block[j]) break; + } + s->block_last_index[n]= i; +} + +static inline void auto_requantize_coeffs(MpegEncContext *s, DCTELEM block[6][64]) +{ + int i,n, newq; + const int maxlevel= s->max_qcoeff; + const int minlevel= s->min_qcoeff; + int largest=0, smallest=0; + + assert(s->adaptive_quant); + + for(n=0; n<6; n++){ + if(s->mb_intra){ + i=1; //skip clipping of intra dc + //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...) + }else + i=0; + + for(;i<=s->block_last_index[n]; i++){ + const int j = s->intra_scantable.permutated[i]; + int level = block[n][j]; + if(largest < level) largest = level; + if(smallest > level) smallest= level; + } + } + + for(newq=s->qscale+1; newq<32; newq++){ + if( ROUNDED_DIV(smallest*s->qscale, newq) >= minlevel + && ROUNDED_DIV(largest *s->qscale, newq) <= maxlevel) + break; + } + + if(s->out_format==FMT_H263){ + /* h263 like formats cannot change qscale by more than 2 easiely */ + if(s->avctx->qmin + 2 < newq) + newq= s->avctx->qmin + 2; + } + + for(n=0; n<6; n++){ + requantize_coeffs(s, block[n], s->qscale, newq, n); + clip_coeffs(s, block[n], s->block_last_index[n]); + } + + s->dquant+= newq - s->qscale; + s->qscale= newq; +} +#if 0 +static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize + int score=0; + int x,y; + + for(y=0; y<7; y++){ + for(x=0; x<16; x+=4){ + score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride]) + +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]); + } + s+= stride; + } + + return score; +} + +static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize + int score=0; + int x,y; + + for(y=0; y<7; y++){ + for(x=0; x<16; x++){ + score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]); + } + s1+= stride; + s2+= stride; + } + + return score; +} +#else +#define SQ(a) ((a)*(a)) + +static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize + int score=0; + int x,y; + + for(y=0; y<7; y++){ + for(x=0; x<16; x+=4){ + score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) + +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); + } + s+= stride; + } + + return score; +} + +static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize + int score=0; + int x,y; + + for(y=0; y<7; y++){ + for(x=0; x<16; x++){ + score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]); + } + s1+= stride; + s2+= stride; + } + + return score; +} + +#endif + +void ff_draw_horiz_band(MpegEncContext *s){ + if ( s->avctx->draw_horiz_band + && (s->num_available_buffers>=1 || (!s->has_b_frames)) ) { + UINT8 *src_ptr[3]; + int y, h, offset; + y = s->mb_y * 16; + h = s->height - y; + if (h > 16) + h = 16; + + if(s->pict_type==B_TYPE) + offset = 0; + else + offset = y * s->linesize; + + if(s->pict_type==B_TYPE || (!s->has_b_frames)){ + src_ptr[0] = s->current_picture[0] + offset; + src_ptr[1] = s->current_picture[1] + (offset >> 2); + src_ptr[2] = s->current_picture[2] + (offset >> 2); + } else { + src_ptr[0] = s->last_picture[0] + offset; + src_ptr[1] = s->last_picture[1] + (offset >> 2); + src_ptr[2] = s->last_picture[2] + (offset >> 2); + } + s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize, + y, s->width, h); + } +} + static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) { const int mb_x= s->mb_x; const int mb_y= s->mb_y; int i; int skip_dct[6]; -#if 0 - if (s->interlaced_dct) { - dct_linesize = s->linesize * 2; - dct_offset = s->linesize; - } else { - dct_linesize = s->linesize; - dct_offset = s->linesize * 8; - } -#endif + int dct_offset = s->linesize*8; //default for progressive frames + for(i=0; i<6; i++) skip_dct[i]=0; + + if(s->adaptive_quant){ + s->dquant= s->qscale_table[mb_x + mb_y*s->mb_width] - s->qscale; + + if(s->out_format==FMT_H263){ + if (s->dquant> 2) s->dquant= 2; + else if(s->dquant<-2) s->dquant=-2; + } + + if(s->codec_id==CODEC_ID_MPEG4){ + if(!s->mb_intra){ + assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8); + + if(s->mv_dir&MV_DIRECT) + s->dquant=0; + } + } + s->qscale+= s->dquant; + s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; + s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + } if (s->mb_intra) { UINT8 *ptr; - int wrap; + int wrap_y; int emu=0; - wrap = s->linesize; - ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16; + wrap_y = s->linesize; + ptr = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16; + if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ - emulated_edge_mc(s, ptr, wrap, 16, 16, mb_x*16, mb_y*16, s->width, s->height); + emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); ptr= s->edge_emu_buffer; emu=1; } - get_pixels(s->block[0], ptr , wrap); - get_pixels(s->block[1], ptr + 8, wrap); - get_pixels(s->block[2], ptr + 8 * wrap , wrap); - get_pixels(s->block[3], ptr + 8 * wrap + 8, wrap); + + if(s->flags&CODEC_FLAG_INTERLACED_DCT){ + int progressive_score, interlaced_score; + + progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y ); + interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2); + + if(progressive_score > interlaced_score + 100){ + s->interlaced_dct=1; + + dct_offset= wrap_y; + wrap_y<<=1; + }else + s->interlaced_dct=0; + } + + get_pixels(s->block[0], ptr , wrap_y); + get_pixels(s->block[1], ptr + 8, wrap_y); + get_pixels(s->block[2], ptr + dct_offset , wrap_y); + get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y); if(s->flags&CODEC_FLAG_GRAY){ skip_dct[4]= 1; skip_dct[5]= 1; }else{ - wrap >>=1; - ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8; + int wrap_c = s->uvlinesize; + ptr = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8; if(emu){ - emulated_edge_mc(s, ptr, wrap, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr= s->edge_emu_buffer; } - get_pixels(s->block[4], ptr, wrap); + get_pixels(s->block[4], ptr, wrap_c); - ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8; + ptr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8; if(emu){ - emulated_edge_mc(s, ptr, wrap, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr= s->edge_emu_buffer; } - get_pixels(s->block[5], ptr, wrap); + get_pixels(s->block[5], ptr, wrap_c); } }else{ op_pixels_func (*op_pix)[4]; @@ -1802,7 +2244,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; wrap_y = s->linesize; - wrap_c = wrap_y>>1; + wrap_c = s->uvlinesize; ptr_y = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16; ptr_cb = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8; ptr_cr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8; @@ -1829,11 +2271,29 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) ptr_y= s->edge_emu_buffer; emu=1; } + + if(s->flags&CODEC_FLAG_INTERLACED_DCT){ + int progressive_score, interlaced_score; + + progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y ) + + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y ); + interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2) + + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2); + + if(progressive_score > interlaced_score + 600){ + s->interlaced_dct=1; + + dct_offset= wrap_y; + wrap_y<<=1; + }else + s->interlaced_dct=0; + } + diff_pixels(s->block[0], ptr_y , dest_y , wrap_y); diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y); - diff_pixels(s->block[2], ptr_y + 8 * wrap_y , dest_y + 8 * wrap_y , wrap_y); - diff_pixels(s->block[3], ptr_y + 8 * wrap_y + 8, dest_y + 8 * wrap_y + 8, wrap_y); - + diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y); + diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y); + if(s->flags&CODEC_FLAG_GRAY){ skip_dct[4]= 1; skip_dct[5]= 1; @@ -1850,12 +2310,13 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); } - /* pre quantization */ + /* pre quantization */ if(s->mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){ + //FIXME optimize if(pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1; if(pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1; - if(pix_abs8x8(ptr_y + 8*wrap_y , dest_y + 8*wrap_y , wrap_y) < 20*s->qscale) skip_dct[2]= 1; - if(pix_abs8x8(ptr_y + 8*wrap_y + 8, dest_y + 8*wrap_y + 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1; + if(pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1; + if(pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1; if(pix_abs8x8(ptr_cb , dest_cb , wrap_y) < 20*s->qscale) skip_dct[4]= 1; if(pix_abs8x8(ptr_cr , dest_cr , wrap_y) < 20*s->qscale) skip_dct[5]= 1; #if 0 @@ -1865,7 +2326,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) for(i=0; i<6; i++) if(skip_dct[i]) num++; stat[num]++; - + if(s->mb_x==0 && s->mb_y==0){ for(i=0; i<7; i++){ printf("%6d %1d\n", stat[i], i); @@ -1876,16 +2337,16 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) } } - + #if 0 { float adap_parm; - + adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) / ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0); - - printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", - (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P', + + printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d", + (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P', s->qscale, adap_parm, s->qscale*adap_parm, s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var); } @@ -1964,7 +2425,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext d->mb_incr= s->mb_incr; for(i=0; i<3; i++) d->last_dc[i]= s->last_dc[i]; - + /* statistics */ d->mv_bits= s->mv_bits; d->i_tex_bits= s->i_tex_bits; @@ -1982,14 +2443,14 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){ int i; - memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); + memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? - + /* mpeg1 */ d->mb_incr= s->mb_incr; for(i=0; i<3; i++) d->last_dc[i]= s->last_dc[i]; - + /* statistics */ d->mv_bits= s->mv_bits; d->i_tex_bits= s->i_tex_bits; @@ -2012,14 +2473,15 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext * d->block= s->block; for(i=0; i<6; i++) d->block_last_index[i]= s->block_last_index[i]; + d->interlaced_dct= s->interlaced_dct; } -static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, +static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y) { int bits_count; - + copy_context_before_encode(s, backup, type); s->block= s->blocks[*next_block]; @@ -2047,7 +2509,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE static void encode_picture(MpegEncContext *s, int picture_number) { - int mb_x, mb_y, last_gob, pdif = 0; + int mb_x, mb_y, pdif = 0; int i; int bits; MpegEncContext best_s, backup_s; @@ -2070,16 +2532,18 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->block_wrap[3]= s->mb_width*2 + 2; s->block_wrap[4]= s->block_wrap[5]= s->mb_width + 2; - + /* Reset the average MB variance */ s->mb_var_sum = 0; s->mc_mb_var_sum = 0; /* we need to initialize some time vars before we can encode b-frames */ if (s->h263_pred && !s->h263_msmpeg4) - ff_set_mpeg4_time(s, s->picture_number); + ff_set_mpeg4_time(s, s->picture_number); s->scene_change_score=0; + + s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration /* Estimate motion for every MB */ if(s->pict_type != I_TYPE){ @@ -2111,7 +2575,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2); memset(s->p_mv_table , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2); memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height); - + if(!s->fixed_qscale){ /* finding spatial complexity for I-frame rate control */ for(mb_y=0; mb_y < s->mb_height; mb_y++) { @@ -2121,11 +2585,12 @@ static void encode_picture(MpegEncContext *s, int picture_number) uint8_t *pix = s->new_picture[0] + (yy * s->linesize) + xx; int varc; int sum = pix_sum(pix, s->linesize); - + sum= (sum+8)>>4; varc = (pix_norm1(pix, s->linesize) - sum*sum + 500 + 128)>>8; - s->mb_var[s->mb_width * mb_y + mb_x] = varc; + s->mb_var [s->mb_width * mb_y + mb_x] = varc; + s->mb_mean[s->mb_width * mb_y + mb_x] = (sum+7)>>4; s->mb_var_sum += varc; } } @@ -2140,8 +2605,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) } //printf("Scene change detected, encoding as I Frame %d %d\n", s->mb_var_sum, s->mc_mb_var_sum); } - - if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) + + if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER); ff_fix_long_p_mvs(s); if(s->pict_type==B_TYPE){ @@ -2153,20 +2618,37 @@ static void encode_picture(MpegEncContext *s, int picture_number) ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR); ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR); } + + if (s->fixed_qscale) + s->frame_qscale = s->avctx->quality; + else + s->frame_qscale = ff_rate_estimate_qscale(s); -//printf("f_code %d ///\n", s->f_code); - -// printf("%d %d\n", s->avg_mb_var, s->mc_mb_var); - if (!s->fixed_qscale) - s->qscale = ff_rate_estimate_qscale(s); + if(s->adaptive_quant){ + switch(s->codec_id){ + case CODEC_ID_MPEG4: + ff_clean_mpeg4_qscales(s); + break; + case CODEC_ID_H263: + case CODEC_ID_H263P: + ff_clean_h263_qscales(s); + break; + } + s->qscale= s->qscale_table[0]; + }else + s->qscale= (int)(s->frame_qscale + 0.5); + if (s->out_format == FMT_MJPEG) { /* for mjpeg, we do include qscale in the matrix */ s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; - for(i=1;i<64;i++) - s->intra_matrix[i] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); - convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, - s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias); + for(i=1;i<64;i++){ + int j= s->idct_permutation[i]; + + s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); + } + convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, + s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8); } s->last_bits= get_bit_count(&s->pb); @@ -2175,11 +2657,11 @@ static void encode_picture(MpegEncContext *s, int picture_number) mjpeg_picture_header(s); break; case FMT_H263: - if (s->h263_msmpeg4) + if (s->h263_msmpeg4) msmpeg4_encode_picture_header(s, picture_number); else if (s->h263_pred) mpeg4_encode_picture_header(s, picture_number); - else if (s->h263_rv10) + else if (s->h263_rv10) rv10_encode_picture_header(s, picture_number); else h263_encode_picture_header(s, picture_number); @@ -2209,46 +2691,21 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; - /* Get the GOB height based on picture height */ - if (s->out_format == FMT_H263 && !s->h263_pred && !s->h263_msmpeg4) { - if (s->height <= 400) - s->gob_index = 1; - else if (s->height <= 800) - s->gob_index = 2; - else - s->gob_index = 4; - }else if(s->codec_id==CODEC_ID_MPEG4){ - s->gob_index = 1; - } + if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P) + s->gob_index = ff_h263_get_gob_height(s); - if(s->codec_id==CODEC_ID_MPEG4 && s->data_partitioning && s->pict_type!=B_TYPE) + if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame) ff_mpeg4_init_partitions(s); s->resync_mb_x=0; s->resync_mb_y=0; + s->first_slice_line = 1; + s->ptr_lastgob = s->pb.buf; + s->ptr_last_mb_line = s->pb.buf; for(mb_y=0; mb_y < s->mb_height; mb_y++) { - /* Put GOB header based on RTP MTU for formats which support it per line (H263*)*/ - /* TODO: Put all this stuff in a separate generic function */ - if (s->rtp_mode) { - if (!mb_y) { - s->ptr_lastgob = s->pb.buf; - s->ptr_last_mb_line = s->pb.buf; - } else if (s->out_format == FMT_H263 && !s->h263_pred && !s->h263_msmpeg4 && !(mb_y % s->gob_index)) { - // MN: we could move the space check from h263 -> here, as its not h263 specific - last_gob = h263_encode_gob_header(s, mb_y); - if (last_gob) { - s->first_slice_line = 1; - }else{ - /*MN: we reset it here instead at the end of each line cuz mpeg4 can have - slice lines starting & ending in the middle*/ - s->first_slice_line = 0; - } - } - } - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; - + s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1; @@ -2270,14 +2727,18 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->block_index[4]++; s->block_index[5]++; - /* write gob / video packet header for formats which support it at any MB (MPEG4) */ - if(s->rtp_mode && s->mb_y>0 && s->codec_id==CODEC_ID_MPEG4){ - int pdif= pbBufPtr(&s->pb) - s->ptr_lastgob; - - //the *2 is there so we stay below the requested size - if(pdif + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size){ - if(s->codec_id==CODEC_ID_MPEG4){ - if(s->data_partitioning && s->pict_type!=B_TYPE){ + /* write gob / video packet header */ + if(s->rtp_mode){ + int current_packet_size, is_gob_start; + + current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; + is_gob_start=0; + + if(s->codec_id==CODEC_ID_MPEG4){ + if(current_packet_size + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size + && s->mb_y + s->mb_x>0){ + + if(s->partitioned_frame){ ff_mpeg4_merge_partitions(s); ff_mpeg4_init_partitions(s); } @@ -2289,17 +2750,28 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->last_bits= bits; } ff_mpeg4_clean_buffers(s); + is_gob_start=1; } + }else{ + if(current_packet_size + s->mb_line_avgsize*s->gob_index >= s->rtp_payload_size + && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){ + + h263_encode_gob_header(s, mb_y); + is_gob_start=1; + } + } + + if(is_gob_start){ s->ptr_lastgob = pbBufPtr(&s->pb); s->first_slice_line=1; s->resync_mb_x=mb_x; s->resync_mb_y=mb_y; } + } - if( (s->resync_mb_x == s->mb_x) - && s->resync_mb_y+1 == s->mb_y){ - s->first_slice_line=0; - } + if( (s->resync_mb_x == s->mb_x) + && s->resync_mb_y+1 == s->mb_y){ + s->first_slice_line=0; } if(mb_type & (mb_type-1)){ // more than 1 MB type possible @@ -2309,6 +2781,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) copy_context_before_encode(&backup_s, s, -1); backup_s.pb= s->pb; best_s.data_partitioning= s->data_partitioning; + best_s.partitioned_frame= s->partitioned_frame; if(s->data_partitioning){ backup_s.pb2= s->pb2; backup_s.tex_pb= s->tex_pb; @@ -2320,10 +2793,10 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mb_intra= 0; s->mv[0][0][0] = s->p_mv_table[xy][0]; s->mv[0][0][1] = s->p_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } - if(mb_type&MB_TYPE_INTER4V){ + if(mb_type&MB_TYPE_INTER4V){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; @@ -2331,7 +2804,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mv[0][i][0] = s->motion_val[s->block_index[i]][0]; s->mv[0][i][1] = s->motion_val[s->block_index[i]][1]; } - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&MB_TYPE_FORWARD){ @@ -2340,7 +2813,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mb_intra= 0; s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&MB_TYPE_BACKWARD){ @@ -2349,7 +2822,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mb_intra= 0; s->mv[1][0][0] = s->b_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]); } if(mb_type&MB_TYPE_BIDIR){ @@ -2360,7 +2833,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&MB_TYPE_DIRECT){ @@ -2371,7 +2844,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, &dmin, &next_block, s->b_direct_mv_table[xy][0], s->b_direct_mv_table[xy][1]); } if(mb_type&MB_TYPE_INTRA){ @@ -2380,25 +2853,25 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mb_intra= 1; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); /* force cleaning of ac/dc pred stuff if needed ... */ if(s->h263_pred || s->h263_aic) s->mbintra_table[mb_x + mb_y*s->mb_width]=1; } copy_context_after_encode(s, &best_s, -1); - + pb_bits_count= get_bit_count(&s->pb); flush_put_bits(&s->pb); ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count); s->pb= backup_s.pb; - + if(s->data_partitioning){ pb2_bits_count= get_bit_count(&s->pb2); flush_put_bits(&s->pb2); ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count); s->pb2= backup_s.pb2; - + tex_pb_bits_count= get_bit_count(&s->tex_pb); flush_put_bits(&s->tex_pb); ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count); @@ -2482,34 +2955,31 @@ static void encode_picture(MpegEncContext *s, int picture_number) } - /* Obtain average GOB size for RTP */ + /* Obtain average mb_row size for RTP */ if (s->rtp_mode) { - if (!mb_y) + if (mb_y==0) s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line; - else if (!(mb_y % s->gob_index)) { + else { s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1; - s->ptr_last_mb_line = pbBufPtr(&s->pb); } - //fprintf(stderr, "\nMB line: %d\tSize: %u\tAvg. Size: %u", s->mb_y, - // (s->pb.buf_ptr - s->ptr_last_mb_line), s->mb_line_avgsize); - if(s->codec_id!=CODEC_ID_MPEG4) s->first_slice_line = 0; //FIXME clean + s->ptr_last_mb_line = pbBufPtr(&s->pb); } } emms_c(); - if(s->codec_id==CODEC_ID_MPEG4 && s->data_partitioning && s->pict_type!=B_TYPE) + if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame) ff_mpeg4_merge_partitions(s); if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE) msmpeg4_encode_ext_header(s); - if(s->codec_id==CODEC_ID_MPEG4) + if(s->codec_id==CODEC_ID_MPEG4) ff_mpeg4_stuffing(&s->pb); //if (s->gob_number) // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number); - - /* Send the last GOB if RTP */ + + /* Send the last GOB if RTP */ if (s->rtp_mode) { flush_put_bits(&s->pb); pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; @@ -2521,22 +2991,19 @@ static void encode_picture(MpegEncContext *s, int picture_number) } } -static int dct_quantize_c(MpegEncContext *s, +static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow) { int i, j, level, last_non_zero, q; const int *qmat; + const UINT8 *scantable= s->intra_scantable.scantable; int bias; int max=0; unsigned int threshold1, threshold2; - + s->fdct (block); - /* we need this permutation so that we correct the IDCT - permutation. will be moved into DCT code */ - block_permute(block); - if (s->mb_intra) { if (!s->h263_aic) { if (n < 4) @@ -2547,7 +3014,7 @@ static int dct_quantize_c(MpegEncContext *s, } else /* For AIC we skip quant/dequant of INTRADC */ q = 1 << 3; - + /* note: block[0] is assumed to be positive */ block[0] = (block[0] + (q >> 1)) / q; i = 1; @@ -2564,7 +3031,7 @@ static int dct_quantize_c(MpegEncContext *s, threshold2= (threshold1<<1); for(;i<64;i++) { - j = zigzag_direct[i]; + j = scantable[i]; level = block[j]; level = level * qmat[j]; @@ -2585,28 +3052,30 @@ static int dct_quantize_c(MpegEncContext *s, } } *overflow= s->max_qcoeff < max; //overflow might have happend + + /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */ + ff_block_permute(block, s->idct_permutation, scantable, last_non_zero); return last_non_zero; } -static void dct_unquantize_mpeg1_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; const UINT16 *quant_matrix; - if(s->alternate_scan) nCoeffs= 64; - else nCoeffs= s->block_last_index[n]+1; - + nCoeffs= s->block_last_index[n]; + if (s->mb_intra) { - if (n < 4) + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; /* XXX: only mpeg1 */ quant_matrix = s->intra_matrix; - for(i=1;i<nCoeffs;i++) { - int j= zigzag_direct[i]; + for(i=1;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { @@ -2628,8 +3097,8 @@ static void dct_unquantize_mpeg1_c(MpegEncContext *s, } else { i = 0; quant_matrix = s->inter_matrix; - for(;i<nCoeffs;i++) { - int j= zigzag_direct[i]; + for(;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { @@ -2653,23 +3122,23 @@ static void dct_unquantize_mpeg1_c(MpegEncContext *s, } } -static void dct_unquantize_mpeg2_c(MpegEncContext *s, +static void dct_unquantize_mpeg2_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; const UINT16 *quant_matrix; - if(s->alternate_scan) nCoeffs= 64; - else nCoeffs= s->block_last_index[n]+1; - + if(s->alternate_scan) nCoeffs= 63; + else nCoeffs= s->block_last_index[n]; + if (s->mb_intra) { - if (n < 4) + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; quant_matrix = s->intra_matrix; - for(i=1;i<nCoeffs;i++) { - int j= zigzag_direct[i]; + for(i=1;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { @@ -2690,8 +3159,8 @@ static void dct_unquantize_mpeg2_c(MpegEncContext *s, int sum=-1; i = 0; quant_matrix = s->inter_matrix; - for(;i<nCoeffs;i++) { - int j= zigzag_direct[i]; + for(;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { @@ -2716,33 +3185,33 @@ static void dct_unquantize_mpeg2_c(MpegEncContext *s, } -static void dct_unquantize_h263_c(MpegEncContext *s, +static void dct_unquantize_h263_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, qmul, qadd; int nCoeffs; - + + assert(s->block_last_index[n]>=0); + + qadd = (qscale - 1) | 1; + qmul = qscale << 1; + if (s->mb_intra) { if (!s->h263_aic) { - if (n < 4) + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; - } + }else + qadd = 0; i = 1; - nCoeffs= 64; //does not allways use zigzag table + nCoeffs= 63; //does not allways use zigzag table } else { i = 0; - nCoeffs= zigzag_end[ s->block_last_index[n] ]; + nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; } - qmul = s->qscale << 1; - if (s->h263_aic && s->mb_intra) - qadd = 0; - else - qadd = (s->qscale - 1) | 1; - - for(;i<nCoeffs;i++) { + for(;i<=nCoeffs;i++) { level = block[i]; if (level) { if (level < 0) { @@ -2759,119 +3228,6 @@ static void dct_unquantize_h263_c(MpegEncContext *s, } } -static void remove_ac(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y) -{ - int dc, dcb, dcr, y, i; - for(i=0; i<4; i++){ - dc= s->dc_val[0][mb_x*2+1 + (i&1) + (mb_y*2+1 + (i>>1))*(s->mb_width*2+2)]; - for(y=0; y<8; y++){ - int x; - for(x=0; x<8; x++){ - dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8; - } - } - } - dcb = s->dc_val[1][mb_x+1 + (mb_y+1)*(s->mb_width+2)]; - dcr= s->dc_val[2][mb_x+1 + (mb_y+1)*(s->mb_width+2)]; - for(y=0; y<8; y++){ - int x; - for(x=0; x<8; x++){ - dest_cb[x + y*(s->uvlinesize)]= dcb/8; - dest_cr[x + y*(s->uvlinesize)]= dcr/8; - } - } -} - -/** - * will conceal past errors, and allso drop b frames if needed - * - */ -void ff_conceal_past_errors(MpegEncContext *s, int unknown_pos) -{ - int mb_x= s->mb_x; - int mb_y= s->mb_y; - int mb_dist=0; - int i, intra_count=0, inter_count=0; - int intra_conceal= s->msmpeg4_version ? 50 : 50; //FIXME finetune - int inter_conceal= s->msmpeg4_version ? 50 : 50; - - // for last block - if(mb_x>=s->mb_width) mb_x= s->mb_width -1; - if(mb_y>=s->mb_height) mb_y= s->mb_height-1; - - if(s->decoding_error==0 && unknown_pos){ - if(s->data_partitioning && s->pict_type!=B_TYPE) - s->decoding_error= DECODING_AC_LOST; - else - s->decoding_error= DECODING_DESYNC; - } - - if(s->decoding_error==DECODING_DESYNC && s->pict_type!=B_TYPE) s->next_p_frame_damaged=1; - - for(i=mb_x + mb_y*s->mb_width; i>=0; i--){ - if(s->mbintra_table[i]) intra_count++; - else inter_count++; - } - - if(s->decoding_error==DECODING_AC_LOST){ - intra_conceal*=2; - inter_conceal*=2; - }else if(s->decoding_error==DECODING_ACDC_LOST){ - intra_conceal*=2; - inter_conceal*=2; - } - - if(unknown_pos && (intra_count<inter_count)){ - intra_conceal= inter_conceal= s->mb_num; -// printf("%d %d\n",intra_count, inter_count); - } - - fprintf(stderr, "concealing errors\n"); - - /* for all MBs from the current one back until the last resync marker */ - for(; mb_y>=0 && mb_y>=s->resync_mb_y; mb_y--){ - for(; mb_x>=0; mb_x--){ - uint8_t *dest_y = s->current_picture[0] + (mb_y * 16* s->linesize ) + mb_x * 16; - uint8_t *dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; - uint8_t *dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; - int mb_x_backup= s->mb_x; //FIXME pass xy to mpeg_motion - int mb_y_backup= s->mb_y; - s->mb_x=mb_x; - s->mb_y=mb_y; - if(s->mbintra_table[mb_y*s->mb_width + mb_x] && mb_dist<intra_conceal){ - if(s->decoding_error==DECODING_AC_LOST){ - remove_ac(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); -// printf("remove ac to %d %d\n", mb_x, mb_y); - }else{ - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - s->last_picture, 0, 0, put_pixels_tab, - 0/*mx*/, 0/*my*/, 16); - } - } - else if(!s->mbintra_table[mb_y*s->mb_width + mb_x] && mb_dist<inter_conceal){ - int mx=0; - int my=0; - - if(s->decoding_error!=DECODING_DESYNC){ - int xy= mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2); - mx= s->motion_val[ xy ][0]; - my= s->motion_val[ xy ][1]; - } - - mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, - s->last_picture, 0, 0, put_pixels_tab, - mx, my, 16); - } - s->mb_x= mb_x_backup; - s->mb_y= mb_y_backup; - - if(mb_x== s->resync_mb_x && mb_y== s->resync_mb_y) return; - if(!s->mbskip_table[mb_x + mb_y*s->mb_width]) mb_dist++; - } - mb_x=s->mb_width-1; - } -} - AVCodec mpeg1video_encoder = { "mpeg1video", CODEC_TYPE_VIDEO, |