diff options
author | Miguel Freitas <miguelfreitas@users.sourceforge.net> | 2003-01-08 13:18:42 +0000 |
---|---|---|
committer | Miguel Freitas <miguelfreitas@users.sourceforge.net> | 2003-01-08 13:18:42 +0000 |
commit | 6f1c8d4eafabd914b87e9171bf4d04f4ef9160ea (patch) | |
tree | e70be493d1222b10f96aa5efac01c0ec0d5bcc97 /src/libffmpeg/libavcodec/mpegvideo.c | |
parent | 1fb58a63872660424777d41389e426dc90f1b660 (diff) | |
download | xine-lib-6f1c8d4eafabd914b87e9171bf4d04f4ef9160ea.tar.gz xine-lib-6f1c8d4eafabd914b87e9171bf4d04f4ef9160ea.tar.bz2 |
syncing ffmpeg (with some compilation fixes)
- fixes wma bugs
- mace, huffyuv and mp3 decoders imported (but not enabled)
tested: wma (v1 and v2), mpeg4, msmpeg4 v1, v2 and v3, divx3, divx4, divx5, xvid and
dv decoders. everything looks fine.
CVS patchset: 3828
CVS date: 2003/01/08 13:18:42
Diffstat (limited to 'src/libffmpeg/libavcodec/mpegvideo.c')
-rw-r--r-- | src/libffmpeg/libavcodec/mpegvideo.c | 914 |
1 files changed, 673 insertions, 241 deletions
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c index c1a0b0b55..8206df470 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.c +++ b/src/libffmpeg/libavcodec/mpegvideo.c @@ -41,10 +41,9 @@ static void dct_unquantize_h263_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w); static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); +static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c; -static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, - int src_x, int src_y, int w, int h); /* enable all paranoid tests for rounding, overflows, etc... */ @@ -64,8 +63,8 @@ static const uint16_t aanscales[64] = { 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, - 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446, - 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 + 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446, + 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247 }; /* Input permutation for the simple_idct_mmx */ @@ -87,9 +86,6 @@ static const uint8_t h263_chroma_roundtab[16] = { static UINT16 (*default_mv_penalty)[MAX_MV*2+1]=NULL; static UINT8 default_fcode_tab[MAX_MV*2+1]; -/* default motion estimation */ -int motion_estimation_method = ME_EPZS; - static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64], const UINT16 *quant_matrix, int bias, int qmin, int qmax) { @@ -127,7 +123,8 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 */ - qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); + qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); +// qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1; @@ -233,6 +230,10 @@ int DCT_common_init(MpegEncContext *s) MPV_common_init_ppc(s); #endif + if(s->flags&CODEC_FLAG_TRELLIS_QUANT){ + s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_* + } + switch(s->idct_permutation_type){ case FF_NO_IDCT_PERM: for(i=0; i<64; i++) @@ -268,40 +269,87 @@ int DCT_common_init(MpegEncContext *s) } /** - * allocates various arrays for a Picture structure, except the pixels themself. - * The pixels are allocated/set in te get_buffer() + * allocates a Picture + * The pixels are allocated/set by calling get_buffer() if shared=0 */ -static int alloc_picture(MpegEncContext *s, Picture *pic){ - if (s->encoding) { - CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(INT16)) - CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16)) - CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(INT8)) +static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ + + if(shared){ + assert(pic->data[0]); + assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED); + pic->type= FF_BUFFER_TYPE_SHARED; + }else{ + int r; + + assert(!pic->data[0]); + + r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic); + + if(r<0 || !pic->age || !pic->type || !pic->data[0]){ + fprintf(stderr, "get_buffer() failed (%d %d %d %X)\n", r, pic->age, pic->type, (int)pic->data[0]); + return -1; + } + + if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){ + fprintf(stderr, "get_buffer() failed (stride changed)\n"); + return -1; + } + + if(pic->linesize[1] != pic->linesize[2]){ + fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n"); + return -1; + } + + s->linesize = pic->linesize[0]; + s->uvlinesize= pic->linesize[1]; } + + if(pic->qscale_table==NULL){ + if (s->encoding) { + CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(INT16)) + CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16)) + CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(INT8)) + } - CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check - CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8)) - pic->qstride= s->mb_width; + CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check + CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8)) + pic->qstride= s->mb_width; + } return 0; fail: //for the CHECKED_ALLOCZ macro return -1; } +/** + * deallocates a picture + */ static void free_picture(MpegEncContext *s, Picture *pic){ int i; - + + if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){ + s->avctx->release_buffer(s->avctx, (AVFrame*)pic); + } + av_freep(&pic->mb_var); av_freep(&pic->mc_mb_var); av_freep(&pic->mb_mean); av_freep(&pic->mbskip_table); av_freep(&pic->qscale_table); - if(s->avctx->get_buffer == avcodec_default_get_buffer){ + if(pic->type == FF_BUFFER_TYPE_INTERNAL){ for(i=0; i<4; i++){ av_freep(&pic->base[i]); pic->data[i]= NULL; } av_freep(&pic->opaque); + pic->type= 0; + }else if(pic->type == FF_BUFFER_TYPE_SHARED){ + for(i=0; i<4; i++){ + pic->base[i]= + pic->data[i]= NULL; + } + pic->type= 0; } } @@ -334,9 +382,10 @@ int MPV_common_init(MpegEncContext *s) + (toupper((s->avctx->fourcc>>16)&0xFF)<<16) + (toupper((s->avctx->fourcc>>24)&0xFF)<<24); - CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance + CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance + s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17; - s->avctx->coded_picture= (AVVideoFrame*)&s->current_picture; + s->avctx->coded_frame= (AVFrame*)&s->current_picture; if (s->encoding) { int mv_table_size= (s->mb_width+2)*(s->mb_height+2); @@ -347,15 +396,13 @@ int MPV_common_init(MpegEncContext *s) CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(INT16)) CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16)) CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16)) - CHECKED_ALLOCZ(s->b_direct_forw_mv_table, mv_table_size * 2 * sizeof(INT16)) - CHECKED_ALLOCZ(s->b_direct_back_mv_table, mv_table_size * 2 * sizeof(INT16)) CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16)) //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer() - CHECKED_ALLOCZ(s->me_scratchpad, s->width*2*16*3*sizeof(uint8_t)) + CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t)) - CHECKED_ALLOCZ(s->me_map , ME_MAP_SIZE*sizeof(uint32_t)) - CHECKED_ALLOCZ(s->me_score_map, ME_MAP_SIZE*sizeof(uint16_t)) + CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t)) + CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t)) if(s->codec_id==CODEC_ID_MPEG4){ CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE); @@ -445,20 +492,12 @@ void MPV_common_end(MpegEncContext *s) { int i; - for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]){ - s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]); - } - } - av_freep(&s->mb_type); av_freep(&s->p_mv_table); av_freep(&s->b_forw_mv_table); av_freep(&s->b_back_mv_table); av_freep(&s->b_bidir_forw_mv_table); av_freep(&s->b_bidir_back_mv_table); - av_freep(&s->b_direct_forw_mv_table); - av_freep(&s->b_direct_back_mv_table); av_freep(&s->b_direct_mv_table); av_freep(&s->motion_val); av_freep(&s->dc_val[0]); @@ -467,15 +506,15 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->mbintra_table); av_freep(&s->cbp_table); av_freep(&s->pred_dir_table); - av_freep(&s->me_scratchpad); - av_freep(&s->me_map); - av_freep(&s->me_score_map); + av_freep(&s->me.scratchpad); + av_freep(&s->me.map); + av_freep(&s->me.score_map); av_freep(&s->mbskip_table); av_freep(&s->bitstream_buffer); av_freep(&s->tex_pb_buffer); av_freep(&s->pb2_buffer); - av_freep(&s->edge_emu_buffer); + av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL; av_freep(&s->co_located_type_table); av_freep(&s->field_mv_table); av_freep(&s->field_select_table); @@ -525,6 +564,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->chroma_elim_threshold= avctx->chroma_elim_threshold; s->strict_std_compliance= avctx->strict_std_compliance; s->data_partitioning= avctx->flags & CODEC_FLAG_PART; + s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0; s->mpeg_quant= avctx->mpeg_quant; if (s->gop_size <= 1) { @@ -534,12 +574,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->intra_only = 0; } - /* ME algorithm */ - if (avctx->me_method == 0) - /* For compatibility */ - s->me_method = motion_estimation_method; - else - s->me_method = avctx->me_method; + s->me_method = avctx->me_method; /* Fixed QSCALE */ s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE); @@ -572,6 +607,7 @@ int MPV_encode_init(AVCodecContext *avctx) if (mjpeg_init(s) < 0) return -1; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_H263: if (h263_get_picture_format(s->width, s->height) == 7) { @@ -580,6 +616,7 @@ int MPV_encode_init(AVCodecContext *avctx) } s->out_format = FMT_H263; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_H263P: s->out_format = FMT_H263; @@ -591,18 +628,19 @@ int MPV_encode_init(AVCodecContext *avctx) s->umvplus = 0; s->umvplus_dec = 0; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_RV10: s->out_format = FMT_H263; s->h263_rv10 = 1; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_MPEG4: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; - s->has_b_frames= s->max_b_frames ? 1 : 0; - s->low_delay= !s->has_b_frames; + s->low_delay= s->max_b_frames ? 0 : 1; avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); break; case CODEC_ID_MSMPEG4V1: @@ -612,6 +650,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->unrestricted_mv = 1; s->msmpeg4_version= 1; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_MSMPEG4V2: s->out_format = FMT_H263; @@ -620,6 +659,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->unrestricted_mv = 1; s->msmpeg4_version= 2; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_MSMPEG4V3: s->out_format = FMT_H263; @@ -628,6 +668,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->unrestricted_mv = 1; s->msmpeg4_version= 3; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_WMV1: s->out_format = FMT_H263; @@ -636,6 +677,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->unrestricted_mv = 1; s->msmpeg4_version= 4; avctx->delay=0; + s->low_delay=1; break; case CODEC_ID_WMV2: s->out_format = FMT_H263; @@ -644,6 +686,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->unrestricted_mv = 1; s->msmpeg4_version= 5; avctx->delay=0; + s->low_delay=1; break; default: return -1; @@ -664,13 +707,14 @@ int MPV_encode_init(AVCodecContext *avctx) } } } - s->mv_penalty= default_mv_penalty; + s->me.mv_penalty= default_mv_penalty; s->fcode_tab= default_fcode_tab; s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; /* dont use mv_penalty table for crap MV as it would be confused */ - if (s->me_method < ME_EPZS) s->mv_penalty = default_mv_penalty; + //FIXME remove after fixing / removing old ME + if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty; s->encoding = 1; @@ -678,6 +722,8 @@ int MPV_encode_init(AVCodecContext *avctx) if (MPV_common_init(s) < 0) return -1; + ff_init_me(s); + #ifdef CONFIG_ENCODERS if (s->out_format == FMT_H263) h263_encode_init(s); @@ -770,21 +816,41 @@ static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w) } } +static int find_unused_picture(MpegEncContext *s, int shared){ + int i; + + if(shared){ + for(i=0; i<MAX_PICTURE_COUNT; i++){ + if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break; + } + }else{ + for(i=0; i<MAX_PICTURE_COUNT; i++){ + if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; + } + for(i=0; i<MAX_PICTURE_COUNT; i++){ + if(s->picture[i].data[0]==NULL) break; + } + } + + assert(i<MAX_PICTURE_COUNT); + return i; +} + /* generic function for encode/decode called before a frame is coded/decoded */ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { - int i, r; - AVVideoFrame *pic; + int i; + AVFrame *pic; s->mb_skiped = 0; /* mark&release old frames */ if (s->pict_type != B_TYPE && s->last_picture.data[0]) { - Picture *pic= NULL; for(i=0; i<MAX_PICTURE_COUNT; i++){ +//printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]); if(s->picture[i].data[0] == s->last_picture.data[0]){ // s->picture[i].reference=0; - avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]); + avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); break; } } @@ -796,35 +862,20 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){ fprintf(stderr, "releasing zombie picture\n"); - avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]); + avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } } } } alloc: if(!s->encoding){ - /* find unused Picture */ - for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]==NULL) break; - } - assert(i<MAX_PICTURE_COUNT); + i= find_unused_picture(s, 0); - pic= (AVVideoFrame*)&s->picture[i]; + pic= (AVFrame*)&s->picture[i]; pic->reference= s->pict_type != B_TYPE; pic->coded_picture_number= s->current_picture.coded_picture_number+1; - r= avctx->get_buffer(avctx, pic); - - if(r<0 || (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1]))){ - fprintf(stderr, "get_buffer() failed (stride changed), bye bye\n"); - return -1; - } - - s->linesize = pic->linesize[0]; - s->uvlinesize= pic->linesize[1]; - - if(pic->qscale_table==NULL) - alloc_picture(s, (Picture*)pic); + alloc_picture(s, (Picture*)pic, 0); s->current_picture= s->picture[i]; } @@ -860,7 +911,6 @@ alloc: void MPV_frame_end(MpegEncContext *s) { int i; - /* draw edge for correct motion prediction if outside */ if(s->codec_id!=CODEC_ID_SVQ1){ if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) { @@ -874,8 +924,6 @@ void MPV_frame_end(MpegEncContext *s) s->last_pict_type = s->pict_type; if(s->pict_type!=B_TYPE){ s->last_non_b_pict_type= s->pict_type; - s->num_available_buffers++; - if(s->num_available_buffers>2) s->num_available_buffers= 2; } s->current_picture.quality= s->qscale; //FIXME get average of qscale_table @@ -893,86 +941,132 @@ void MPV_frame_end(MpegEncContext *s) /* release non refernce frames */ for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0] && !s->picture[i].reference) - s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]); + if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/) + s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]); + } + if(s->avctx->debug&FF_DEBUG_SKIP){ + int x,y; + for(y=0; y<s->mb_height; y++){ + for(x=0; x<s->mb_width; x++){ + int count= s->mbskip_table[x + y*s->mb_width]; + if(count>9) count=9; + printf(" %1d", count); + } + printf("\n"); + } + printf("pict type: %d\n", s->pict_type); } } -static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){ - AVVideoFrame *pic; - int i,r; - const int encoding_delay= s->max_b_frames; - - /* find unused Picture */ - for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]==NULL) break; +static int get_sae(uint8_t *src, int ref, int stride){ + int x,y; + int acc=0; + + for(y=0; y<16; y++){ + for(x=0; x<16; x++){ + acc+= ABS(src[x+y*stride] - ref); + } } - assert(i<MAX_PICTURE_COUNT); - - pic= (AVVideoFrame*)&s->picture[i]; - pic->reference= 1; -// assert(avctx->get_buffer == default_get_buffer || avctx->get_buffer==NULL); - r= s->avctx->get_buffer(s->avctx, pic); + return acc; +} - if(r<0 || (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1]))){ - fprintf(stderr, "get_buffer() failed (stride changed), bye bye\n"); - return -1; - } +static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){ + int x, y, w, h; + int acc=0; - assert(s->linesize==0 || s->linesize ==pic->linesize[0]); - assert(s->uvlinesize==0 || s->uvlinesize==pic->linesize[1]); - assert(pic->linesize[1] == pic->linesize[2]); - s->linesize = pic->linesize[0]; - s->uvlinesize= pic->linesize[1]; + w= s->width &~15; + h= s->height&~15; - if(pic->qscale_table==NULL) - alloc_picture(s, (Picture*)pic); + for(y=0; y<h; y+=16){ + for(x=0; x<w; x+=16){ + int offset= x + y*stride; + int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride); + int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8; + int sae = get_sae(src + offset, mean, stride); + + acc+= sae + 500 < sad; + } + } + return acc; +} + -// assert(s->input_picture[0]==NULL || s->input_picture[0]->data[0]==NULL); +static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ + AVFrame *pic; + int i; + const int encoding_delay= s->max_b_frames; + int direct=1; + + if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0; + if(pic_arg->linesize[0] != s->linesize) direct=0; + if(pic_arg->linesize[1] != s->uvlinesize) direct=0; + if(pic_arg->linesize[2] != s->uvlinesize) direct=0; + +// printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); - if(s->input_picture[encoding_delay]) - pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1; -//printf("dpn2:%d\n", pic->display_picture_number); + if(direct){ + i= find_unused_picture(s, 1); - /* shift buffer entries */ - for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++) - s->input_picture[i-1]= s->input_picture[i]; - - s->input_picture[encoding_delay]= (Picture*)pic; - pic->pict_type= pic_arg->pict_type; - pic->quality= pic_arg->quality; + pic= (AVFrame*)&s->picture[i]; + pic->reference= 1; - if( pic->data[0] == pic_arg->data[0] - && pic->data[1] == pic_arg->data[1] - && pic->data[2] == pic_arg->data[2]){ - // empty + for(i=0; i<4; i++){ + pic->data[i]= pic_arg->data[i]; + pic->linesize[i]= pic_arg->linesize[i]; + } + alloc_picture(s, (Picture*)pic, 1); }else{ - int h_chroma_shift, v_chroma_shift; + i= find_unused_picture(s, 0); + + pic= (AVFrame*)&s->picture[i]; + pic->reference= 1; + + alloc_picture(s, (Picture*)pic, 0); + + if( pic->data[0] == pic_arg->data[0] + && pic->data[1] == pic_arg->data[1] + && pic->data[2] == pic_arg->data[2]){ + // empty + }else{ + int h_chroma_shift, v_chroma_shift; - avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); + avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); - for(i=0; i<3; i++){ - int src_stride= pic_arg->linesize[i]; - int dst_stride= i ? s->uvlinesize : s->linesize; - int h_shift= i ? h_chroma_shift : 0; - int v_shift= i ? v_chroma_shift : 0; - int w= s->width >>h_shift; - int h= s->height>>v_shift; - uint8_t *src= pic_arg->data[i]; - uint8_t *dst= pic->data[i] + 16; + for(i=0; i<3; i++){ + int src_stride= pic_arg->linesize[i]; + int dst_stride= i ? s->uvlinesize : s->linesize; + int h_shift= i ? h_chroma_shift : 0; + int v_shift= i ? v_chroma_shift : 0; + int w= s->width >>h_shift; + int h= s->height>>v_shift; + uint8_t *src= pic_arg->data[i]; + uint8_t *dst= pic->data[i]; - if(src_stride==dst_stride) - memcpy(dst, src, src_stride*h); - else{ - while(h--){ - memcpy(dst, src, w); - dst += dst_stride; - src += src_stride; + if(src_stride==dst_stride) + memcpy(dst, src, src_stride*h); + else{ + while(h--){ + memcpy(dst, src, w); + dst += dst_stride; + src += src_stride; + } } } } } + pic->quality= pic_arg->quality; + pic->pict_type= pic_arg->pict_type; + pic->pts = pic_arg->pts; + + if(s->input_picture[encoding_delay]) + pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1; + + /* shift buffer entries */ + for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++) + s->input_picture[i-1]= s->input_picture[i]; + + s->input_picture[encoding_delay]= (Picture*)pic; return 0; } @@ -984,70 +1078,119 @@ static void select_input_picture(MpegEncContext *s){ if(s->reordered_input_picture[0]) coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1; -//printf("cpn:%d\n", coded_pic_num); + for(i=1; i<MAX_PICTURE_COUNT; i++) s->reordered_input_picture[i-1]= s->reordered_input_picture[i]; s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; /* set next picture types & ordering */ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ - if(s->input_picture[0]->pict_type){ - /* user selected pict_type */ - if(s->input_picture[0]->pict_type == I_TYPE){ - s->reordered_input_picture[0]= s->input_picture[0]; - s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; - }else{ - int b_frames; + if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){ + s->reordered_input_picture[0]= s->input_picture[0]; + s->reordered_input_picture[0]->pict_type= I_TYPE; + s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; + }else{ + int b_frames; + + if(s->flags&CODEC_FLAG_PASS2){ + for(i=0; i<s->max_b_frames+1; i++){ + int pict_num= s->input_picture[0]->display_picture_number + i; + int pict_type= s->rc_context.entry[pict_num].new_pict_type; + s->input_picture[i]->pict_type= pict_type; + + if(i + 1 >= s->rc_context.num_entries) break; + } + } + if(s->input_picture[0]->pict_type){ + /* user selected pict_type */ for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){ if(s->input_picture[b_frames]->pict_type!=B_TYPE) break; } - + if(b_frames > s->max_b_frames){ fprintf(stderr, "warning, too many bframes in a row\n"); b_frames = s->max_b_frames; - s->input_picture[b_frames]->pict_type= I_TYPE; } + }else if(s->b_frame_strategy==0){ + b_frames= s->max_b_frames; + }else if(s->b_frame_strategy==1){ + for(i=1; i<s->max_b_frames+1; i++){ + if(s->input_picture[i]->b_frame_score==0){ + s->input_picture[i]->b_frame_score= + get_intra_count(s, s->input_picture[i ]->data[0], + s->input_picture[i-1]->data[0], s->linesize) + 1; + } + } + for(i=0; i<s->max_b_frames; i++){ + if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break; + } + + b_frames= FFMAX(0, i-1); - s->reordered_input_picture[0]= s->input_picture[b_frames]; - s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; - for(i=0; i<b_frames; i++){ - coded_pic_num++; - s->reordered_input_picture[i+1]= s->input_picture[i]; - s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num; - } + /* reset scores */ + for(i=0; i<b_frames+1; i++){ + s->input_picture[i]->b_frame_score=0; + } + }else{ + fprintf(stderr, "illegal b frame strategy\n"); + b_frames=0; } - }else{ - if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){ - s->reordered_input_picture[0]= s->input_picture[0]; + + emms_c(); +//static int b_count=0; +//b_count+= b_frames; +//printf("b_frames: %d\n", b_count); + + s->reordered_input_picture[0]= s->input_picture[b_frames]; + if( s->picture_in_gop_number + b_frames >= s->gop_size + || s->reordered_input_picture[0]->pict_type== I_TYPE) s->reordered_input_picture[0]->pict_type= I_TYPE; - s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; - }else{ - s->reordered_input_picture[0]= s->input_picture[s->max_b_frames]; - if(s->picture_in_gop_number + s->max_b_frames >= s->gop_size) - s->reordered_input_picture[0]->pict_type= I_TYPE; - else - s->reordered_input_picture[0]->pict_type= P_TYPE; - s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; - for(i=0; i<s->max_b_frames; i++){ - coded_pic_num++; - s->reordered_input_picture[i+1]= s->input_picture[i]; - s->reordered_input_picture[i+1]->pict_type= B_TYPE; - s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num; - } + else + s->reordered_input_picture[0]->pict_type= P_TYPE; + s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; + for(i=0; i<b_frames; i++){ + coded_pic_num++; + s->reordered_input_picture[i+1]= s->input_picture[i]; + s->reordered_input_picture[i+1]->pict_type= B_TYPE; + s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num; } } } if(s->reordered_input_picture[0]){ - if(s->reordered_input_picture[0]->pict_type==B_TYPE){ - s->reordered_input_picture[0]->reference=0; + s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE; + + if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ + int i= find_unused_picture(s, 0); + Picture *pic= &s->picture[i]; + + s->new_picture= *s->reordered_input_picture[0]; + + /* mark us unused / free shared pic */ + for(i=0; i<4; i++) + s->reordered_input_picture[0]->data[i]= NULL; + s->reordered_input_picture[0]->type= 0; + + pic->pict_type = s->reordered_input_picture[0]->pict_type; + pic->quality = s->reordered_input_picture[0]->quality; + pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number; + pic->reference = s->reordered_input_picture[0]->reference; + + alloc_picture(s, pic, 0); + + s->current_picture= *pic; + }else{ + assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER + || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); + + s->new_picture= *s->reordered_input_picture[0]; + + for(i=0; i<4; i++){ + s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty + } + s->current_picture= *s->reordered_input_picture[0]; } - s->current_picture= *s->reordered_input_picture[0]; - s->new_picture= s->current_picture; - s->new_picture.data[0]+=16; - s->new_picture.data[1]+=16; - s->new_picture.data[2]+=16; s->picture_number= s->new_picture.display_picture_number; //printf("dpn:%d\n", s->picture_number); @@ -1060,7 +1203,7 @@ int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { MpegEncContext *s = avctx->priv_data; - AVVideoFrame *pic_arg = data; + AVFrame *pic_arg = data; int i; init_put_bits(&s->pb, buf, buf_size, NULL, NULL); @@ -1151,7 +1294,7 @@ static inline void gmc1_motion(MpegEncContext *s, if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&15) + 16 > s->h_edge_pos || src_y + (motion_y&15) + 16 > s->v_edge_pos){ - emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); + ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer; emu=1; } @@ -1189,14 +1332,14 @@ static inline void gmc1_motion(MpegEncContext *s, offset = (src_y * uvlinesize) + src_x + (src_offset>>1); ptr = ref_picture[1] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding); @@ -1268,7 +1411,7 @@ static inline void gmc_motion(MpegEncContext *s, } -static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, +void ff_emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h){ int x, y; int start_y, start_x, end_y, end_x; @@ -1368,7 +1511,7 @@ if(s->quarter_sample) if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos || src_y + (motion_y&1) + h > v_edge_pos){ - emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, + ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer + src_offset; emu=1; @@ -1405,7 +1548,7 @@ if(s->quarter_sample) offset = (src_y * uvlinesize) + src_x + (src_offset >> 1); ptr = ref_picture[1] + offset; if(emu){ - emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, + ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer + (src_offset >> 1); } @@ -1413,7 +1556,7 @@ if(s->quarter_sample) ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, + ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer + (src_offset >> 1); } @@ -1453,7 +1596,7 @@ static inline void qpel_motion(MpegEncContext *s, if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos || src_y + (motion_y&3) + h > v_edge_pos){ - emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, + ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer + src_offset; emu=1; @@ -1498,7 +1641,7 @@ static inline void qpel_motion(MpegEncContext *s, offset = (src_y * uvlinesize) + src_x + (src_offset >> 1); ptr = ref_picture[1] + offset; if(emu){ - emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, + ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer + (src_offset >> 1); } @@ -1506,7 +1649,7 @@ static inline void qpel_motion(MpegEncContext *s, ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, + ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer + (src_offset >> 1); } @@ -1542,6 +1685,10 @@ static inline void MPV_motion(MpegEncContext *s, ref_picture, 0, 0, pix_op, qpix_op, s->mv[dir][0][0], s->mv[dir][0][1], 16); + }else if(s->mspel){ + ff_mspel_motion(s, dest_y, dest_cb, dest_cr, + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], 16); }else{ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, ref_picture, 0, @@ -1573,7 +1720,7 @@ static inline void MPV_motion(MpegEncContext *s, if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos || src_y + (motion_y&3) + 8 > s->v_edge_pos){ - emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); + ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer; } } @@ -1604,7 +1751,7 @@ static inline void MPV_motion(MpegEncContext *s, if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos || src_y + (motion_y&1) + 8 > s->v_edge_pos){ - emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); + ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); ptr= s->edge_emu_buffer; } } @@ -1651,7 +1798,7 @@ static inline void MPV_motion(MpegEncContext *s, if(s->flags&CODEC_FLAG_EMU_EDGE){ if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1 || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){ - emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; emu=1; } @@ -1660,7 +1807,7 @@ static inline void MPV_motion(MpegEncContext *s, ptr = ref_picture[2] + offset; if(emu){ - emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); ptr= s->edge_emu_buffer; } pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8); @@ -1795,7 +1942,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */ if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here - + //FIXME a lot of thet is only needed for !low_delay const int wrap = s->block_wrap[0]; const int xy = s->block_index[0]; const int mb_index= s->mb_x + s->mb_y*s->mb_width; @@ -1931,7 +2078,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize); add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize); } - } else { + } else if(s->codec_id != CODEC_ID_WMV2){ add_dct(s, block[0], 0, dest_y, dct_linesize); add_dct(s, block[1], 1, dest_y + 8, dct_linesize); add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); @@ -1941,6 +2088,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) add_dct(s, block[4], 4, dest_cb, s->uvlinesize); add_dct(s, block[5], 5, dest_cr, s->uvlinesize); } + } else{ + ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr); } } else { /* dct only in intra block */ @@ -2174,7 +2323,7 @@ static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to d void ff_draw_horiz_band(MpegEncContext *s){ if ( s->avctx->draw_horiz_band - && (s->num_available_buffers>=1 || (!s->has_b_frames)) ) { + && (s->last_picture.data[0] || s->low_delay) ) { UINT8 *src_ptr[3]; int y, h, offset; y = s->mb_y * 16; @@ -2187,7 +2336,7 @@ void ff_draw_horiz_band(MpegEncContext *s){ else offset = y * s->linesize; - if(s->pict_type==B_TYPE || (!s->has_b_frames)){ + if(s->pict_type==B_TYPE || s->low_delay){ src_ptr[0] = s->current_picture.data[0] + offset; src_ptr[1] = s->current_picture.data[1] + (offset >> 2); src_ptr[2] = s->current_picture.data[2] + (offset >> 2); @@ -2243,7 +2392,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ - emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); + ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); ptr= s->edge_emu_buffer; emu=1; } @@ -2275,14 +2424,14 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) int wrap_c = s->uvlinesize; ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; if(emu){ - emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr= s->edge_emu_buffer; } s->dsp.get_pixels(s->block[4], ptr, wrap_c); ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; if(emu){ - emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr= s->edge_emu_buffer; } s->dsp.get_pixels(s->block[5], ptr, wrap_c); @@ -2322,7 +2471,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) } if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ - emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); + ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height); ptr_y= s->edge_emu_buffer; emu=1; } @@ -2354,12 +2503,12 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) skip_dct[5]= 1; }else{ if(emu){ - emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr_cb= s->edge_emu_buffer; } s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); if(emu){ - emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); + ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr_cr= s->edge_emu_buffer; } s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); @@ -2441,21 +2590,25 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) #ifdef CONFIG_ENCODERS /* huffman encode */ - switch(s->out_format) { - case FMT_MPEG1: - mpeg1_encode_mb(s, s->block, motion_x, motion_y); - break; - case FMT_H263: - if (s->h263_msmpeg4) - msmpeg4_encode_mb(s, s->block, motion_x, motion_y); - else if(s->h263_pred) - mpeg4_encode_mb(s, s->block, motion_x, motion_y); - else - h263_encode_mb(s, s->block, motion_x, motion_y); - break; - case FMT_MJPEG: - mjpeg_encode_mb(s, s->block); - break; + switch(s->codec_id){ //FIXME funct ptr could be slightly faster + case CODEC_ID_MPEG1VIDEO: + mpeg1_encode_mb(s, s->block, motion_x, motion_y); break; + case CODEC_ID_MPEG4: + mpeg4_encode_mb(s, s->block, motion_x, motion_y); break; + case CODEC_ID_MSMPEG4V2: + case CODEC_ID_MSMPEG4V3: + case CODEC_ID_WMV1: + msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break; + case CODEC_ID_WMV2: + ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break; + case CODEC_ID_MJPEG: + mjpeg_encode_mb(s, s->block); break; + case CODEC_ID_H263: + case CODEC_ID_H263P: + case CODEC_ID_RV10: + h263_encode_mb(s, s->block, motion_x, motion_y); break; + default: + assert(0); } #endif } @@ -2571,13 +2724,18 @@ static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, in int x,y; if(w==16 && h==16) - return s->dsp.pix_norm(src1, src2, stride); + return s->dsp.sse[0](NULL, src1, src2, stride); + else if(w==8 && h==8) + return s->dsp.sse[1](NULL, src1, src2, stride); for(y=0; y<h; y++){ for(x=0; x<w; x++){ acc+= sq[src1[x + y*stride] - src2[x + y*stride]]; } } + + assert(acc>=0); + return acc; } @@ -2618,9 +2776,37 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->scene_change_score=0; s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration - + + if(s->msmpeg4_version){ + if(s->pict_type==I_TYPE) + s->no_rounding=1; + else if(s->flipflop_rounding) + s->no_rounding ^= 1; + }else if(s->out_format == FMT_H263){ + if(s->pict_type==I_TYPE) + s->no_rounding=0; + else if(s->pict_type!=B_TYPE) + s->no_rounding ^= 1; + } /* Estimate motion for every MB */ if(s->pict_type != I_TYPE){ + if(s->pict_type != B_TYPE){ + if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){ + s->me.pre_pass=1; + s->me.dia_size= s->avctx->pre_dia_size; + + for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) { + for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) { + s->mb_x = mb_x; + s->mb_y = mb_y; + ff_pre_estimate_p_frame_motion(s, mb_x, mb_y); + } + } + s->me.pre_pass=0; + } + } + + s->me.dia_size= s->avctx->dia_size; for(mb_y=0; mb_y < s->mb_height; mb_y++) { s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); @@ -2633,13 +2819,12 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; - + /* compute motion vector & mb_type and store in context */ if(s->pict_type==B_TYPE) ff_estimate_b_frame_motion(s, mb_x, mb_y); else ff_estimate_p_frame_motion(s, mb_x, mb_y); -// s->mb_type[mb_y*s->mb_width + mb_x]=MB_TYPE_INTER; } } }else /* if(s->pict_type == I_TYPE) */{ @@ -2734,7 +2919,9 @@ static void encode_picture(MpegEncContext *s, int picture_number) mjpeg_picture_header(s); break; case FMT_H263: - if (s->h263_msmpeg4) + if (s->codec_id == CODEC_ID_WMV2) + ff_wmv2_encode_picture_header(s, picture_number); + else if (s->h263_msmpeg4) msmpeg4_encode_picture_header(s, picture_number); else if (s->h263_pred) mpeg4_encode_picture_header(s, picture_number); @@ -2916,15 +3103,14 @@ static void encode_picture(MpegEncContext *s, int picture_number) &dmin, &next_block, 0, 0); } if(mb_type&MB_TYPE_DIRECT){ + int mx= s->b_direct_mv_table[xy][0]; + int my= s->b_direct_mv_table[xy][1]; + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; - s->mv_type = MV_TYPE_16X16; //FIXME s->mb_intra= 0; - s->mv[0][0][0] = s->b_direct_forw_mv_table[xy][0]; - s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1]; - s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0]; - s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1]; + ff_mpeg4_set_direct_mv(s, mx, my); encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, - &dmin, &next_block, s->b_direct_mv_table[xy][0], s->b_direct_mv_table[xy][1]); + &dmin, &next_block, mx, my); } if(mb_type&MB_TYPE_INTRA){ s->mv_dir = MV_DIR_FORWARD; @@ -2989,10 +3175,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mb_intra= 0; motion_x=s->b_direct_mv_table[xy][0]; motion_y=s->b_direct_mv_table[xy][1]; - s->mv[0][0][0] = s->b_direct_forw_mv_table[xy][0]; - s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1]; - s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0]; - s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1]; + ff_mpeg4_set_direct_mv(s, motion_x, motion_y); break; case MB_TYPE_BIDIR: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; @@ -3037,7 +3220,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; - + s->current_picture.error[0] += sse( s, s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, @@ -3094,6 +3277,254 @@ static void encode_picture(MpegEncContext *s, int picture_number) } } +static int dct_quantize_trellis_c(MpegEncContext *s, + DCTELEM *block, int n, + int qscale, int *overflow){ + const int *qmat; + const UINT8 *scantable= s->intra_scantable.scantable; + int max=0; + unsigned int threshold1, threshold2; + int bias=0; + int run_tab[65]; + int level_tab[65]; + int score_tab[65]; + int last_run=0; + int last_level=0; + int last_score= 0; + int last_i= 0; + int coeff[3][64]; + int coeff_count[64]; + int lambda, qmul, qadd, start_i, last_non_zero, i; + const int esc_length= s->ac_esc_length; + uint8_t * length; + uint8_t * last_length; + int score_limit=0; + int left_limit= 0; + + s->fdct (block); + + qmul= qscale*16; + qadd= ((qscale-1)|1)*8; + + if (s->mb_intra) { + int q; + if (!s->h263_aic) { + if (n < 4) + q = s->y_dc_scale; + else + q = s->c_dc_scale; + q = q << 3; + } else{ + /* For AIC we skip quant/dequant of INTRADC */ + q = 1 << 3; + qadd=0; + } + + /* note: block[0] is assumed to be positive */ + block[0] = (block[0] + (q >> 1)) / q; + start_i = 1; + last_non_zero = 0; + qmat = s->q_intra_matrix[qscale]; + if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO) + bias= 1<<(QMAT_SHIFT-1); + length = s->intra_ac_vlc_length; + last_length= s->intra_ac_vlc_last_length; + } else { + start_i = 0; + last_non_zero = -1; + qmat = s->q_inter_matrix[qscale]; + length = s->inter_ac_vlc_length; + last_length= s->inter_ac_vlc_last_length; + } + + threshold1= (1<<QMAT_SHIFT) - bias - 1; + threshold2= (threshold1<<1); + + for(i=start_i; i<64; i++) { + const int j = scantable[i]; + const int k= i-start_i; + int level = block[j]; + level = level * qmat[j]; + +// if( bias+level >= (1<<(QMAT_SHIFT - 3)) +// || bias-level >= (1<<(QMAT_SHIFT - 3))){ + if(((unsigned)(level+threshold1))>threshold2){ + if(level>0){ + level= (bias + level)>>QMAT_SHIFT; + coeff[0][k]= level; + coeff[1][k]= level-1; +// coeff[2][k]= level-2; + }else{ + level= (bias - level)>>QMAT_SHIFT; + coeff[0][k]= -level; + coeff[1][k]= -level+1; +// coeff[2][k]= -level+2; + } + coeff_count[k]= FFMIN(level, 2); + max |=level; + last_non_zero = i; + }else{ + coeff[0][k]= (level>>31)|1; + coeff_count[k]= 1; + } + } + + *overflow= s->max_qcoeff < max; //overflow might have happend + + if(last_non_zero < start_i){ + memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); + return last_non_zero; + } + + lambda= (qscale*qscale*64*82 + 50)/100; //FIXME finetune + + score_tab[0]= 0; + for(i=0; i<=last_non_zero - start_i; i++){ + int level_index, run, j; + const int dct_coeff= block[ scantable[i + start_i] ]; + const int zero_distoration= dct_coeff*dct_coeff; + int best_score=256*256*256*120; + + last_score += zero_distoration; + for(level_index=0; level_index < coeff_count[i]; level_index++){ + int distoration; + int level= coeff[level_index][i]; + int unquant_coeff; + + assert(level); + + if(s->out_format == FMT_H263){ + if(level>0){ + unquant_coeff= level*qmul + qadd; + }else{ + unquant_coeff= level*qmul - qadd; + } + }else{ //MPEG1 + j= s->idct_permutation[ scantable[i + start_i] ]; //FIXME optimize + if(s->mb_intra){ + if (level < 0) { + unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3; + unquant_coeff = -((unquant_coeff - 1) | 1); + } else { + unquant_coeff = (int)( level * qscale * s->intra_matrix[j]) >> 3; + unquant_coeff = (unquant_coeff - 1) | 1; + } + }else{ + if (level < 0) { + unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; + unquant_coeff = -((unquant_coeff - 1) | 1); + } else { + unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; + unquant_coeff = (unquant_coeff - 1) | 1; + } + } + unquant_coeff<<= 3; + } + + distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff); + level+=64; + if((level&(~127)) == 0){ + for(run=0; run<=i - left_limit; run++){ + int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda; + score += score_tab[i-run]; + + if(score < best_score){ + best_score= + score_tab[i+1]= score; + run_tab[i+1]= run; + level_tab[i+1]= level-64; + } + } + + if(s->out_format == FMT_H263){ + for(run=0; run<=i - left_limit; run++){ + int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda; + score += score_tab[i-run]; + if(score < last_score){ + last_score= score; + last_run= run; + last_level= level-64; + last_i= i+1; + } + } + } + }else{ + distoration += esc_length*lambda; + for(run=0; run<=i - left_limit; run++){ + int score= distoration + score_tab[i-run]; + + if(score < best_score){ + best_score= + score_tab[i+1]= score; + run_tab[i+1]= run; + level_tab[i+1]= level-64; + } + } + + if(s->out_format == FMT_H263){ + for(run=0; run<=i - left_limit; run++){ + int score= distoration + score_tab[i-run]; + if(score < last_score){ + last_score= score; + last_run= run; + last_level= level-64; + last_i= i+1; + } + } + } + } + } + + for(j=left_limit; j<=i; j++){ + score_tab[j] += zero_distoration; + } + score_limit+= zero_distoration; + if(score_tab[i+1] < score_limit) + score_limit= score_tab[i+1]; + + //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level + while(score_tab[ left_limit ] > score_limit + lambda) left_limit++; + } + + //FIXME add some cbp penalty + + if(s->out_format != FMT_H263){ + last_score= 256*256*256*120; + for(i= left_limit; i<=last_non_zero - start_i + 1; i++){ + int score= score_tab[i]; + if(i) score += lambda*2; //FIXME exacter? + + if(score < last_score){ + last_score= score; + last_i= i; + last_level= level_tab[i]; + last_run= run_tab[i]; + } + } + } + + last_non_zero= last_i - 1 + start_i; + memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); + + if(last_non_zero < start_i) + return last_non_zero; + + i= last_i; + assert(last_level); +//FIXME use permutated scantable + block[ s->idct_permutation[ scantable[last_non_zero] ] ]= last_level; + i -= last_run + 1; + + for(;i>0 ; i -= run_tab[i] + 1){ + const int j= s->idct_permutation[ scantable[i - 1 + start_i] ]; + + block[j]= level_tab[i]; + assert(block[j]); + } + + return last_non_zero; +} + static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow) @@ -3332,6 +3763,16 @@ static void dct_unquantize_h263_c(MpegEncContext *s, } } +char ff_get_pict_type_char(int pict_type){ + switch(pict_type){ + case I_TYPE: return 'I'; + case P_TYPE: return 'P'; + case B_TYPE: return 'B'; + case S_TYPE: return 'S'; + default: return '?'; + } +} + AVCodec mpeg1video_encoder = { "mpeg1video", CODEC_TYPE_VIDEO, @@ -3432,12 +3873,3 @@ AVCodec wmv1_encoder = { MPV_encode_end, }; -AVCodec wmv2_encoder = { - "wmv2", - CODEC_TYPE_VIDEO, - CODEC_ID_WMV2, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; |