diff options
Diffstat (limited to 'src')
34 files changed, 1199 insertions, 562 deletions
diff --git a/src/libffmpeg/libavcodec/4xm.c b/src/libffmpeg/libavcodec/4xm.c index 6932d52ab..0b4b72fac 100644 --- a/src/libffmpeg/libavcodec/4xm.c +++ b/src/libffmpeg/libavcodec/4xm.c @@ -564,7 +564,7 @@ static int decode_i_frame(FourXContext *f, uint8_t *buf, int length){ if(prestream_size + bitstream_size + 12 != length || bitstream_size > (1<<26) || prestream_size > (1<<26)){ - av_log(f->avctx, AV_LOG_ERROR, "size missmatch %d %d %d\n", prestream_size, bitstream_size, length); + av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length); return -1; } @@ -591,7 +591,7 @@ static int decode_i_frame(FourXContext *f, uint8_t *buf, int length){ } if(get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256) - av_log(f->avctx, AV_LOG_ERROR, "end missmatch\n"); + av_log(f->avctx, AV_LOG_ERROR, "end mismatch\n"); return 0; } @@ -607,7 +607,7 @@ static int decode_frame(AVCodecContext *avctx, frame_4cc= get32(buf); if(buf_size != get32(buf+4)+8){ - av_log(f->avctx, AV_LOG_ERROR, "size missmatch %d %d\n", buf_size, get32(buf+4)); + av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, get32(buf+4)); } if(frame_4cc == ff_get_fourcc("cfrm")){ @@ -643,7 +643,7 @@ static int decode_frame(AVCodecContext *avctx, frame_size= cfrm->size; if(id != avctx->frame_number){ - av_log(f->avctx, AV_LOG_ERROR, "cframe id missmatch %d %d\n", id, avctx->frame_number); + av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n", id, avctx->frame_number); } cfrm->size= cfrm->id= 0; diff --git a/src/libffmpeg/libavcodec/Makefile.am b/src/libffmpeg/libavcodec/Makefile.am index 8b4ae4fef..edfc2ad07 100644 --- a/src/libffmpeg/libavcodec/Makefile.am +++ b/src/libffmpeg/libavcodec/Makefile.am @@ -43,6 +43,7 @@ libavcodec_la_SOURCES = \ huffyuv.c \ idcinvideo.c \ imgconvert.c \ + indeo2.c \ indeo3.c \ integer.c \ interplayvideo.c \ @@ -115,6 +116,7 @@ noinst_HEADERS = \ fastmemcpy.h \ golomb.h \ imgconvert_template.h \ + indeo2data.h \ indeo3data.h \ integer.h \ h261data.h \ diff --git a/src/libffmpeg/libavcodec/asv1.c b/src/libffmpeg/libavcodec/asv1.c index 4ab2518ab..13976db61 100644 --- a/src/libffmpeg/libavcodec/asv1.c +++ b/src/libffmpeg/libavcodec/asv1.c @@ -557,6 +557,7 @@ static int decode_init(AVCodecContext *avctx){ common_init(avctx); init_vlcs(a); ff_init_scantable(a->dsp.idct_permutation, &a->scantable, scantab); + avctx->pix_fmt= PIX_FMT_YUV420P; a->inv_qscale= ((uint8_t*)avctx->extradata)[0]; if(a->inv_qscale == 0){ diff --git a/src/libffmpeg/libavcodec/avcodec.h b/src/libffmpeg/libavcodec/avcodec.h index 064f58df2..4de1dd573 100644 --- a/src/libffmpeg/libavcodec/avcodec.h +++ b/src/libffmpeg/libavcodec/avcodec.h @@ -17,7 +17,7 @@ extern "C" { #define FFMPEG_VERSION_INT 0x000409 #define FFMPEG_VERSION "0.4.9-pre1" -#define LIBAVCODEC_BUILD 4752 +#define LIBAVCODEC_BUILD 4754 #define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT #define LIBAVCODEC_VERSION FFMPEG_VERSION @@ -28,6 +28,7 @@ extern "C" { #define AV_NOPTS_VALUE int64_t_C(0x8000000000000000) #define AV_TIME_BASE 1000000 +#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE} /* FIXME: We cannot use ffmpeg's XvMC capabilities, since that would require * linking the ffmpeg plugin against XvMC libraries, which is a bad thing, @@ -114,6 +115,7 @@ enum CodecID { CODEC_ID_LOCO, CODEC_ID_WNV1, CODEC_ID_AASC, + CODEC_ID_INDEO2, /* various pcm "codecs" */ CODEC_ID_PCM_S16LE= 0x10000, @@ -212,6 +214,7 @@ enum CodecType { * to run on the IBM VGA graphics adapter use 6-bit palette components. */ enum PixelFormat { + PIX_FMT_NONE= -1, PIX_FMT_YUV420P, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples) PIX_FMT_YUV422, ///< Packed pixel, Y0 Cb Y1 Cr PIX_FMT_RGB24, ///< Packed pixel, 3 bytes per pixel, RGBRGB... @@ -447,8 +450,8 @@ typedef struct AVPanScan{ int pict_type;\ \ /**\ - * presentation timestamp in AV_TIME_BASE (=micro seconds currently) (time when frame should be shown to user)\ - * if AV_NOPTS_VALUE then the frame_rate will be used as reference\ + * presentation timestamp in time_base units (time when frame should be shown to user)\ + * if AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed\ * - encoding: MUST be set by user\ * - decoding: set by lavc\ */\ @@ -726,13 +729,11 @@ typedef struct AVCodecContext { /* video only */ /** - * frames per sec multiplied by frame_rate_base. - * for variable fps this is the precission, so if the timestamps - * can be specified in msec precssion then this is 1000*frame_rate_base + * time base in which the timestamps are specified. * - encoding: MUST be set by user - * - decoding: set by lavc. 0 or the frame_rate if available + * - decoding: set by lavc. */ - int frame_rate; + AVRational time_base; /** * picture width / height. @@ -1426,15 +1427,6 @@ typedef struct AVCodecContext { int me_range; /** - * frame_rate_base. - * for variable fps this is 1 - * - encoding: set by user. - * - decoding: set by lavc. - * @todo move this after frame_rate - */ - - int frame_rate_base; - /** * intra quantizer bias. * - encoding: set by user. * - decoding: unused @@ -1697,14 +1689,14 @@ typedef struct AVCodecContext { int nsse_weight; /** - * number of macroblock rows at the top which are skiped. + * number of macroblock rows at the top which are skipped. * - encoding: unused * - decoding: set by user */ int skip_top; /** - * number of macroblock rows at the bottom which are skiped. + * number of macroblock rows at the bottom which are skipped. * - encoding: unused * - decoding: set by user */ @@ -2026,6 +2018,7 @@ extern AVCodec wnv1_decoder; extern AVCodec aasc_decoder; extern AVCodec alac_decoder; extern AVCodec ws_snd1_decoder; +extern AVCodec indeo2_decoder; /* pcm codecs */ #define PCM_CODEC(id, name) \ @@ -2248,6 +2241,11 @@ int64_t av_rescale(int64_t a, int64_t b, int64_t c); */ int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding); +/** + * rescale a 64bit integer by 2 rational numbers. + */ +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq); + /* frame parsing */ typedef struct AVCodecParserContext { void *priv_data; diff --git a/src/libffmpeg/libavcodec/common.h b/src/libffmpeg/libavcodec/common.h index b9e89be46..9feb68e1f 100644 --- a/src/libffmpeg/libavcodec/common.h +++ b/src/libffmpeg/libavcodec/common.h @@ -508,8 +508,6 @@ tend= rdtsc();\ #define STOP_TIMER(id) {} #endif -#define CLAMP_TO_8BIT(d) ((d > 0xff) ? 0xff : (d < 0) ? 0 : d) - /* avoid usage of various functions */ #define malloc please_use_av_malloc #define free please_use_av_free diff --git a/src/libffmpeg/libavcodec/dpcm.c b/src/libffmpeg/libavcodec/dpcm.c index 487203ae9..78ab8cb34 100644 --- a/src/libffmpeg/libavcodec/dpcm.c +++ b/src/libffmpeg/libavcodec/dpcm.c @@ -282,9 +282,6 @@ static int dpcm_decode_frame(AVCodecContext *avctx, } } break; - - default: - break; } *data_size = out * sizeof(short); diff --git a/src/libffmpeg/libavcodec/dsputil.c b/src/libffmpeg/libavcodec/dsputil.c index 926832ff1..2db9c6631 100644 --- a/src/libffmpeg/libavcodec/dsputil.c +++ b/src/libffmpeg/libavcodec/dsputil.c @@ -292,6 +292,7 @@ static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) static inline int w_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int w, int h, int type){ +#ifdef CONFIG_SNOW_ENCODER //idwt is in snow.c int s, i, j; const int dec_count= w==8 ? 3 : 4; int tmp[16*16]; @@ -338,6 +339,7 @@ static inline int w_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, in pix1 += line_size; pix2 += line_size; } + ff_spatial_dwt(tmp, w, h, 16, type, dec_count); s=0; @@ -369,6 +371,7 @@ static inline int w_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, in assert(s>=0); return s>>2; +#endif } static int w53_8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h){ @@ -2640,6 +2643,92 @@ static void h261_loop_filter_c(uint8_t *src, int stride){ } } +static inline void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int *tc0) +{ + int i, d; + for( i = 0; i < 4; i++ ) { + if( tc0[i] < 0 ) { + pix += 4*ystride; + continue; + } + for( d = 0; d < 4; d++ ) { + const int p0 = pix[-1*xstride]; + const int p1 = pix[-2*xstride]; + const int p2 = pix[-3*xstride]; + const int q0 = pix[0]; + const int q1 = pix[1*xstride]; + const int q2 = pix[2*xstride]; + + if( ABS( p0 - q0 ) < alpha && + ABS( p1 - p0 ) < beta && + ABS( q1 - q0 ) < beta ) { + + int tc = tc0[i]; + int i_delta; + + if( ABS( p2 - p0 ) < beta ) { + pix[-2*xstride] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0[i], tc0[i] ); + tc++; + } + if( ABS( q2 - q0 ) < beta ) { + pix[xstride] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0[i], tc0[i] ); + tc++; + } + + i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + pix[-xstride] = clip_uint8( p0 + i_delta ); /* p0' */ + pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ + } + pix += ystride; + } + } +} +static void h264_v_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + h264_loop_filter_luma_c(pix, stride, 1, alpha, beta, tc0); +} +static void h264_h_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + h264_loop_filter_luma_c(pix, 1, stride, alpha, beta, tc0); +} + +static inline void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int *tc0) +{ + int i, d; + for( i = 0; i < 4; i++ ) { + const int tc = tc0[i]; + if( tc <= 0 ) { + pix += 2*ystride; + continue; + } + for( d = 0; d < 2; d++ ) { + const int p0 = pix[-1*xstride]; + const int p1 = pix[-2*xstride]; + const int q0 = pix[0]; + const int q1 = pix[1*xstride]; + + if( ABS( p0 - q0 ) < alpha && + ABS( p1 - p0 ) < beta && + ABS( q1 - q0 ) < beta ) { + + int delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); + + pix[-xstride] = clip_uint8( p0 + delta ); /* p0' */ + pix[0] = clip_uint8( q0 - delta ); /* q0' */ + } + pix += ystride; + } + } +} +static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + h264_loop_filter_chroma_c(pix, stride, 1, alpha, beta, tc0); +} +static void h264_h_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + h264_loop_filter_chroma_c(pix, 1, stride, alpha, beta, tc0); +} + static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; @@ -3739,6 +3828,11 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->diff_bytes= diff_bytes_c; c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c; c->bswap_buf= bswap_buf; + + c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_c; + c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_c; + c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_c; + c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_c; c->h263_h_loop_filter= h263_h_loop_filter_c; c->h263_v_loop_filter= h263_v_loop_filter_c; diff --git a/src/libffmpeg/libavcodec/dsputil.h b/src/libffmpeg/libavcodec/dsputil.h index c728a24d6..10d2d072a 100644 --- a/src/libffmpeg/libavcodec/dsputil.h +++ b/src/libffmpeg/libavcodec/dsputil.h @@ -277,6 +277,11 @@ typedef struct DSPContext { */ void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top); void (*bswap_buf)(uint32_t *dst, uint32_t *src, int w); + + void (*h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int *tc0); + void (*h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int *tc0); + void (*h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int *tc0); + void (*h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int *tc0); void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale); void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale); diff --git a/src/libffmpeg/libavcodec/error_resilience.c b/src/libffmpeg/libavcodec/error_resilience.c index b0d22ddf9..2bb2276cd 100644 --- a/src/libffmpeg/libavcodec/error_resilience.c +++ b/src/libffmpeg/libavcodec/error_resilience.c @@ -346,7 +346,7 @@ static void guess_mv(MpegEncContext *s){ s->mv_dir = MV_DIR_FORWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; - s->mb_skiped=0; + s->mb_skipped=0; s->dsp.clear_blocks(s->block[0]); @@ -474,7 +474,7 @@ int score_sum=0; s->mv_dir = MV_DIR_FORWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; - s->mb_skiped=0; + s->mb_skipped=0; s->dsp.clear_blocks(s->block[0]); @@ -858,7 +858,7 @@ void ff_er_frame_end(MpegEncContext *s){ s->mv_dir = MV_DIR_FORWARD; s->mb_intra=0; - s->mb_skiped=0; + s->mb_skipped=0; if(IS_8X8(mb_type)){ int mb_index= mb_x*2 + mb_y*2*s->b8_stride; int j; @@ -897,7 +897,7 @@ void ff_er_frame_end(MpegEncContext *s){ s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD; s->mb_intra=0; s->mv_type = MV_TYPE_16X16; - s->mb_skiped=0; + s->mb_skipped=0; if(s->pp_time){ int time_pp= s->pp_time; diff --git a/src/libffmpeg/libavcodec/ffv1.c b/src/libffmpeg/libavcodec/ffv1.c index 6a4c6ed3f..107eab1bc 100644 --- a/src/libffmpeg/libavcodec/ffv1.c +++ b/src/libffmpeg/libavcodec/ffv1.c @@ -551,7 +551,7 @@ static int encode_init(AVCodecContext *avctx) int i; if(avctx->strict_std_compliance >= 0){ - av_log(avctx, AV_LOG_ERROR, "this codec is under development, files encoded with it wont be decodeable with future versions!!!\n" + av_log(avctx, AV_LOG_ERROR, "this codec is under development, files encoded with it may not be decodeable with future versions!!!\n" "use vstrict=-1 / -strict -1 to use it anyway\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/flac.c b/src/libffmpeg/libavcodec/flac.c index 894da9384..17082e432 100644 --- a/src/libffmpeg/libavcodec/flac.c +++ b/src/libffmpeg/libavcodec/flac.c @@ -556,7 +556,7 @@ static int decode_frame(FLACContext *s) skip_bits(&s->gb, 8); crc8= get_crc8(s->gb.buffer, get_bits_count(&s->gb)/8); if(crc8){ - av_log(s->avctx, AV_LOG_ERROR, "header crc missmatch crc=%2X\n", crc8); + av_log(s->avctx, AV_LOG_ERROR, "header crc mismatch crc=%2X\n", crc8); return -1; } diff --git a/src/libffmpeg/libavcodec/h261.c b/src/libffmpeg/libavcodec/h261.c index aceebaa38..f9e148455 100644 --- a/src/libffmpeg/libavcodec/h261.c +++ b/src/libffmpeg/libavcodec/h261.c @@ -103,8 +103,8 @@ void ff_h261_encode_picture_header(MpegEncContext * s, int picture_number){ put_bits(&s->pb, 20, 0x10); /* PSC */ - temp_ref= s->picture_number * (int64_t)30000 * s->avctx->frame_rate_base / - (1001 * (int64_t)s->avctx->frame_rate); + temp_ref= s->picture_number * (int64_t)30000 * s->avctx->time_base.num / + (1001 * (int64_t)s->avctx->time_base.den); //FIXME maybe this should use a timestamp put_bits(&s->pb, 5, temp_ref & 0x1f); /* TemporalReference */ put_bits(&s->pb, 1, 0); /* split screen off */ @@ -531,7 +531,6 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 ) xy = s->mb_x + s->mb_y * s->mb_stride; ff_init_block_index(s); ff_update_block_index(s); - s->dsp.clear_blocks(s->block[0]); for(j=0;j<6;j++) s->block_last_index[j] = -1; @@ -541,7 +540,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 ) s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = 1; + s->mb_skipped = 1; h->mtype &= ~MB_TYPE_H261_FIL; MPV_decode_mb(s, s->block); @@ -606,7 +605,6 @@ static int h261_decode_mb(H261Context *h){ xy = s->mb_x + s->mb_y * s->mb_stride; ff_init_block_index(s); ff_update_block_index(s); - s->dsp.clear_blocks(s->block[0]); // Read mtype h->mtype = get_vlc2(&s->gb, h261_mtype_vlc.table, H261_MTYPE_VLC_BITS, 2); @@ -661,12 +659,16 @@ static int h261_decode_mb(H261Context *h){ intra: /* decode each block */ if(s->mb_intra || HAS_CBP(h->mtype)){ + s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (h261_decode_block(h, s->block[i], i, cbp&32) < 0){ return SLICE_ERROR; } cbp+=cbp; } + }else{ + for (i = 0; i < 6; i++) + s->block_last_index[i]= -1; } MPV_decode_mb(s, s->block); diff --git a/src/libffmpeg/libavcodec/h263.c b/src/libffmpeg/libavcodec/h263.c index 81c3648f1..80a283bb5 100644 --- a/src/libffmpeg/libavcodec/h263.c +++ b/src/libffmpeg/libavcodec/h263.c @@ -110,7 +110,7 @@ max run: 29/41 */ #endif -#if 0 //3IV1 is quite rare and tis slows things down a tiny bit +#if 0 //3IV1 is quite rare and it slows things down a tiny bit #define IS_3IV1 s->avctx->codec_tag == ff_get_fourcc("3IV1") #else #define IS_3IV1 0 @@ -160,8 +160,8 @@ void ff_flv_encode_picture_header(MpegEncContext * s, int picture_number) put_bits(&s->pb, 17, 1); put_bits(&s->pb, 5, (s->h263_flv-1)); /* 0: h263 escape codes 1: 11-bit escape codes */ - put_bits(&s->pb, 8, (((int64_t)s->picture_number * 30 * s->avctx->frame_rate_base) / - s->avctx->frame_rate) & 0xff); /* TemporalReference */ + put_bits(&s->pb, 8, (((int64_t)s->picture_number * 30 * s->avctx->time_base.num) / //FIXME use timestamp + s->avctx->time_base.den) & 0xff); /* TemporalReference */ if (s->width == 352 && s->height == 288) format = 2; else if (s->width == 176 && s->height == 144) @@ -208,9 +208,9 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) if(s->h263_plus){ for(i=0; i<2; i++){ int div, error; - div= (s->avctx->frame_rate_base*1800000LL + 500LL*s->avctx->frame_rate) / ((1000LL+i)*s->avctx->frame_rate); + div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den); div= clip(1, div, 127); - error= ABS(s->avctx->frame_rate_base*1800000LL - (1000LL+i)*s->avctx->frame_rate*div); + error= ABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div); if(error < best_error){ best_error= error; best_divisor= div; @@ -227,8 +227,8 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) /* Update the pointer to last GOB */ s->ptr_lastgob = pbBufPtr(&s->pb); put_bits(&s->pb, 22, 0x20); /* PSC */ - temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->frame_rate_base / - (coded_frame_rate_base * (int64_t)s->avctx->frame_rate); + temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp + (coded_frame_rate_base * (int64_t)s->avctx->time_base.den); put_bits(&s->pb, 8, temp_ref & 0xff); /* TemporalReference */ put_bits(&s->pb, 1, 1); /* marker */ @@ -523,8 +523,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ if(s->pict_type== B_TYPE){ int odd=0; - /* ok, come on, this isnt funny anymore, theres more code for handling this mpeg4 mess than - for the actual adaptive quantization */ + /* ok, come on, this isn't funny anymore, there's more code for handling this mpeg4 mess than for the actual adaptive quantization */ for(i=0; i<s->mb_num; i++){ int mb_xy= s->mb_index2xy[i]; @@ -615,11 +614,11 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ void ff_h263_update_motion_val(MpegEncContext * s){ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; - //FIXME a lot of thet is only needed for !low_delay + //FIXME a lot of that is only needed for !low_delay const int wrap = s->b8_stride; const int xy = s->block_index[0]; - s->current_picture.mbskip_table[mb_xy]= s->mb_skiped; + s->current_picture.mbskip_table[mb_xy]= s->mb_skipped; if(s->mv_type != MV_TYPE_8X8){ int motion_x, motion_y; @@ -855,16 +854,16 @@ void mpeg4_encode_mb(MpegEncContext * s, assert((s->dquant&1)==0); assert(mb_type>=0); - /* nothing to do if this MB was skiped in the next P Frame */ + /* nothing to do if this MB was skipped in the next P Frame */ if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ... s->skip_count++; s->mv[0][0][0]= s->mv[0][0][1]= s->mv[1][0][0]= s->mv[1][0][1]= 0; - s->mv_dir= MV_DIR_FORWARD; //doesnt matter + s->mv_dir= MV_DIR_FORWARD; //doesn't matter s->qscale -= s->dquant; -// s->mb_skiped=1; +// s->mb_skipped=1; return; } @@ -887,7 +886,7 @@ void mpeg4_encode_mb(MpegEncContext * s, put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */ put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ //FIXME merge - put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we dont need it :) + put_bits(&s->pb, mb_type+1, 1); // this table is so simple that we don't need it :) if(cbp) put_bits(&s->pb, 6, cbp); if(cbp && mb_type){ @@ -901,7 +900,7 @@ void mpeg4_encode_mb(MpegEncContext * s, if(!s->progressive_sequence){ if(cbp) put_bits(&s->pb, 1, s->interlaced_dct); - if(mb_type) // not diect mode + if(mb_type) // not direct mode put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD); } @@ -976,7 +975,7 @@ void mpeg4_encode_mb(MpegEncContext * s, if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type==MV_TYPE_16X16) { /* check if the B frames can skip it too, as we must skip it if we skip here - why didnt they just compress the skip-mb bits instead of reusing them ?! */ + why didn't they just compress the skip-mb bits instead of reusing them ?! */ if(s->max_b_frames>0){ int i; int x,y, offset; @@ -990,7 +989,7 @@ void mpeg4_encode_mb(MpegEncContext * s, offset= x + y*s->linesize; p_pic= s->new_picture.data[0] + offset; - s->mb_skiped=1; + s->mb_skipped=1; for(i=0; i<s->max_b_frames; i++){ uint8_t *b_pic; int diff; @@ -1001,14 +1000,14 @@ void mpeg4_encode_mb(MpegEncContext * s, b_pic= pic->data[0] + offset + 16; //FIXME +16 diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16); if(diff>s->qscale*70){ //FIXME check that 70 is optimal - s->mb_skiped=0; + s->mb_skipped=0; break; } } }else - s->mb_skiped=1; + s->mb_skipped=1; - if(s->mb_skiped==1){ + if(s->mb_skipped==1){ /* skip macroblock */ put_bits(&s->pb, 1, 1); @@ -1623,7 +1622,7 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir, A = mot_val[ - 1]; /* special case for first (slice) line */ if (s->first_slice_line && block<3) { - // we cant just change some MVs to simulate that as we need them for the B frames (and ME) + // we can't just change some MVs to simulate that as we need them for the B frames (and ME) // and if we ever support non rectangular objects than we need to do a few ifs here anyway :( if(block==0){ //most common case if(s->mb_x == s->resync_mb_x){ //rare @@ -2054,7 +2053,7 @@ void h263_encode_init(MpegEncContext *s) s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; break; - default: //nothing needed default table allready set in mpegvideo.c + default: //nothing needed - default table already set in mpegvideo.c s->min_qcoeff= -127; s->max_qcoeff= 127; s->y_dc_scale_table= @@ -2210,10 +2209,10 @@ void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){ int time_div, time_mod; assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE); - s->time= (s->current_picture_ptr->pts*s->time_increment_resolution + AV_TIME_BASE/2)/AV_TIME_BASE; + s->time= s->current_picture_ptr->pts*s->avctx->time_base.num; - time_div= s->time/s->time_increment_resolution; - time_mod= s->time%s->time_increment_resolution; + time_div= s->time/s->avctx->time_base.den; + time_mod= s->time%s->avctx->time_base.den; if(s->pict_type==B_TYPE){ s->pb_time= s->pp_time - (s->last_non_b_time - s->time); @@ -2237,9 +2236,9 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){ time= s->current_picture_ptr->pts; if(s->reordered_input_picture[1]) time= FFMIN(time, s->reordered_input_picture[1]->pts); - time= (time*s->time_increment_resolution + AV_TIME_BASE/2)/AV_TIME_BASE; + time= time*s->avctx->time_base.num; - seconds= time/s->time_increment_resolution; + seconds= time/s->avctx->time_base.den; minutes= seconds/60; seconds %= 60; hours= minutes/60; minutes %= 60; hours%=24; @@ -2252,7 +2251,7 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){ put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); //broken link == NO - s->last_time_base= time / s->time_increment_resolution; + s->last_time_base= time / s->avctx->time_base.den; ff_mpeg4_stuffing(&s->pb); } @@ -2349,7 +2348,7 @@ static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_n put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */ put_bits(&s->pb, 1, 1); /* marker bit */ - put_bits(&s->pb, 16, s->time_increment_resolution); + put_bits(&s->pb, 16, s->avctx->time_base.den); if (s->time_increment_bits < 1) s->time_increment_bits = 1; put_bits(&s->pb, 1, 1); /* marker bit */ @@ -2420,14 +2419,14 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number) s->partitioned_frame= s->data_partitioning && s->pict_type!=B_TYPE; -//printf("num:%d rate:%d base:%d\n", s->picture_number, s->frame_rate, FRAME_RATE_BASE); +//printf("num:%d rate:%d base:%d\n", s->picture_number, s->time_base.den, FRAME_RATE_BASE); put_bits(&s->pb, 16, 0); /* vop header */ put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */ put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */ - time_div= s->time/s->time_increment_resolution; - time_mod= s->time%s->time_increment_resolution; + time_div= s->time/s->avctx->time_base.den; + time_mod= s->time%s->avctx->time_base.den; time_incr= time_div - s->last_time_base; assert(time_incr >= 0); while(time_incr--) @@ -2509,7 +2508,7 @@ static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *di b = dc_val[ - 1 - wrap]; c = dc_val[ - wrap]; - /* outside slice handling (we cant do that by memset as we need the dc for error resilience) */ + /* outside slice handling (we can't do that by memset as we need the dc for error resilience) */ if(s->first_slice_line && n!=3){ if(n!=2) b=c= 1024; if(n!=1 && s->mb_x == s->resync_mb_x) b=a= 1024; @@ -3211,7 +3210,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) } if(s->pict_type == B_TYPE){ while(s->next_picture.mbskip_table[ s->mb_index2xy[ mb_num ] ]) mb_num++; - if(mb_num >= s->mb_num) return -1; // slice contains just skiped MBs which where allready decoded + if(mb_num >= s->mb_num) return -1; // slice contains just skipped MBs which where allready decoded } s->mb_x= mb_num % s->mb_width; @@ -3242,7 +3241,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) if(s->shape != BIN_ONLY_SHAPE){ skip_bits(&s->gb, 3); /* intra dc vlc threshold */ -//FIXME dont just ignore everything +//FIXME don't just ignore everything if(s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ mpeg4_decode_sprite_trajectory(s, &s->gb); av_log(s->avctx, AV_LOG_ERROR, "untested\n"); @@ -3293,7 +3292,7 @@ void ff_mpeg4_clean_buffers(MpegEncContext *s) memset(s->ac_val[2] + c_xy, 0, (c_wrap +1)*16*sizeof(int16_t)); /* clean MV */ - // we cant clear the MVs as they might be needed by a b frame + // we can't clear the MVs as they might be needed by a b frame // memset(s->motion_val + l_xy, 0, (l_wrap*2+1)*2*sizeof(int16_t)); // memset(s->motion_val, 0, 2*sizeof(int16_t)*(2 + s->mb_width*2)*(2 + s->mb_height*2)); s->last_mv[0][0][0]= @@ -3322,7 +3321,7 @@ int ff_h263_resync(MpegEncContext *s){ if(ret>=0) return 0; } - //ok, its not where its supposed to be ... + //ok, it's not where its supposed to be ... s->gb= s->last_resync_gb; align_get_bits(&s->gb); left= s->gb.size_in_bits - get_bits_count(&s->gb); @@ -3729,10 +3728,10 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) s->mv_type = MV_TYPE_16X16; if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ s->mcsel=1; - s->mb_skiped = 0; + s->mb_skipped = 0; }else{ s->mcsel=0; - s->mb_skiped = 1; + s->mb_skipped = 1; } }else if(s->mb_intra){ s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); @@ -3753,6 +3752,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) if (!IS_SKIP(mb_type)) { int i; + s->dsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if(mpeg4_decode_block(s, block[i], i, cbp&32, s->mb_intra, s->rvlc) < 0){ @@ -3910,7 +3910,7 @@ int ff_h263_decode_mb(MpegEncContext *s, s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = !(s->obmc | s->loop_filter); + s->mb_skipped = !(s->obmc | s->loop_filter); goto end; } cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); @@ -3921,6 +3921,8 @@ int ff_h263_decode_mb(MpegEncContext *s, } }while(cbpc == 20); + s->dsp.clear_blocks(s->block[0]); + dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) goto intra; @@ -4024,6 +4026,7 @@ int ff_h263_decode_mb(MpegEncContext *s, s->mb_intra = IS_INTRA(mb_type); if(HAS_CBP(mb_type)){ + s->dsp.clear_blocks(s->block[0]); cbpc = get_vlc2(&s->gb, cbpc_b_vlc.table, CBPC_B_VLC_BITS, 1); if(s->mb_intra){ dquant = IS_QUANT(mb_type); @@ -4102,6 +4105,8 @@ int ff_h263_decode_mb(MpegEncContext *s, } }while(cbpc == 8); + s->dsp.clear_blocks(s->block[0]); + dquant = cbpc & 4; s->mb_intra = 1; intra: @@ -4175,13 +4180,13 @@ int ff_mpeg4_decode_mb(MpegEncContext *s, s->mv[0][0][0]= get_amv(s, 0); s->mv[0][0][1]= get_amv(s, 1); - s->mb_skiped = 0; + s->mb_skipped = 0; }else{ s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel=0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = 1; + s->mb_skipped = 1; } goto end; } @@ -4193,6 +4198,7 @@ int ff_mpeg4_decode_mb(MpegEncContext *s, } }while(cbpc == 20); + s->dsp.clear_blocks(s->block[0]); dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) goto intra; @@ -4294,9 +4300,9 @@ int ff_mpeg4_decode_mb(MpegEncContext *s, } /* if we skipped it in the future P Frame than skip it now too */ - s->mb_skiped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC + s->mb_skipped= s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC - if(s->mb_skiped){ + if(s->mb_skipped){ /* skip mb */ for(i=0;i<6;i++) s->block_last_index[i] = -1; @@ -4324,7 +4330,10 @@ int ff_mpeg4_decode_mb(MpegEncContext *s, } mb_type= mb_type_b_map[ mb_type ]; if(modb2) cbp= 0; - else cbp= get_bits(&s->gb, 6); + else{ + s->dsp.clear_blocks(s->block[0]); + cbp= get_bits(&s->gb, 6); + } if ((!IS_DIRECT(mb_type)) && cbp) { if(get_bits1(&s->gb)){ @@ -4442,6 +4451,7 @@ intra: if(!s->progressive_sequence) s->interlaced_dct= get_bits1(&s->gb); + s->dsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(s, block[i], i, cbp&32, 1, 0) < 0) @@ -4556,6 +4566,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, } else if (s->mb_intra) { /* DC coef */ if(s->codec_id == CODEC_ID_RV10){ +#ifdef CONFIG_RV10_DECODER if (s->rv10_version == 3 && s->pict_type == I_TYPE) { int component, diff; component = (n <= 3 ? 0 : n - 4 + 1); @@ -4575,6 +4586,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, if (level == 255) level = 128; } +#endif }else{ level = get_bits(&s->gb, 8); if((level&0x7F) == 0){ @@ -4638,7 +4650,7 @@ retry: i += run; if (i >= 64){ if(s->alt_inter_vlc && rl == &rl_inter && !s->mb_intra){ - //looks like a hack but no, its the way its supposed to work ... + //looks like a hack but no, it's the way its supposed to work ... rl = &rl_intra_aic; i = 0; s->gb= gb; @@ -5043,8 +5055,7 @@ int h263_decode_picture_header(MpegEncContext *s) s->width = width; s->height = height; s->avctx->sample_aspect_ratio= (AVRational){12,11}; - s->avctx->frame_rate = 30000; - s->avctx->frame_rate_base= 1001; + s->avctx->time_base= (AVRational){1001, 30000}; } else { int ufep; @@ -5139,20 +5150,19 @@ int h263_decode_picture_header(MpegEncContext *s) if(s->custom_pcf){ int gcd; - s->avctx->frame_rate= 1800000; - s->avctx->frame_rate_base= 1000 + get_bits1(&s->gb); - s->avctx->frame_rate_base*= get_bits(&s->gb, 7); - if(s->avctx->frame_rate_base == 0){ + s->avctx->time_base.den= 1800000; + s->avctx->time_base.num= 1000 + get_bits1(&s->gb); + s->avctx->time_base.num*= get_bits(&s->gb, 7); + if(s->avctx->time_base.num == 0){ av_log(s, AV_LOG_ERROR, "zero framerate\n"); return -1; } - gcd= ff_gcd(s->avctx->frame_rate, s->avctx->frame_rate_base); - s->avctx->frame_rate /= gcd; - s->avctx->frame_rate_base /= gcd; -// av_log(s->avctx, AV_LOG_DEBUG, "%d/%d\n", s->avctx->frame_rate, s->avctx->frame_rate_base); + gcd= ff_gcd(s->avctx->time_base.den, s->avctx->time_base.num); + s->avctx->time_base.den /= gcd; + s->avctx->time_base.num /= gcd; +// av_log(s->avctx, AV_LOG_DEBUG, "%d/%d\n", s->avctx->time_base.den, s->avctx->time_base.num); }else{ - s->avctx->frame_rate = 30000; - s->avctx->frame_rate_base= 1001; + s->avctx->time_base= (AVRational){1001, 30000}; } } @@ -5223,7 +5233,7 @@ int h263_decode_picture_header(MpegEncContext *s) s->modified_quant ? " MQ" : "", s->loop_filter ? " LOOP" : "", s->h263_slice_structured ? " SS" : "", - s->avctx->frame_rate, s->avctx->frame_rate_base + s->avctx->time_base.den, s->avctx->time_base.num ); } #if 1 @@ -5287,7 +5297,7 @@ static void mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb w2= 1<<alpha; h2= 1<<beta; -// Note, the 4th point isnt used for GMC +// Note, the 4th point isn't used for GMC if(s->divx_version==500 && s->divx_build==413){ sprite_ref[0][0]= a*vop_ref[0][0] + d[0][0]; sprite_ref[0][1]= a*vop_ref[0][1] + d[0][1]; @@ -5512,7 +5522,7 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ skip_bits1(gb); /* marker */ } }else{ - // set low delay flag only once so the smart? low delay detection wont be overriden + // set low delay flag only once the smartest? low delay detection won't be overriden if(s->picture_number==0) s->low_delay=0; } @@ -5526,18 +5536,19 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ check_marker(gb, "before time_increment_resolution"); - s->time_increment_resolution = get_bits(gb, 16); + s->avctx->time_base.den = get_bits(gb, 16); - s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; + s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; if (s->time_increment_bits < 1) s->time_increment_bits = 1; check_marker(gb, "before fixed_vop_rate"); if (get_bits1(gb) != 0) { /* fixed_vop_rate */ - skip_bits(gb, s->time_increment_bits); - } - + s->avctx->time_base.num = get_bits(gb, s->time_increment_bits); + }else + s->avctx->time_base.num = 1; + s->t_frame=0; if (s->shape != BIN_ONLY_SHAPE) { @@ -5586,7 +5597,7 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ if (get_bits1(gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(gb, 4); /* quant_precision */ if(get_bits(gb, 4)!=8) av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n"); /* bits_per_pixel */ - if(s->quant_precision!=5) av_log(s->avctx, AV_LOG_ERROR, "quant precission %d\n", s->quant_precision); + if(s->quant_precision!=5) av_log(s->avctx, AV_LOG_ERROR, "quant precision %d\n", s->quant_precision); } else { s->quant_precision = 5; } @@ -5778,7 +5789,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ s->pict_type = get_bits(gb, 2) + I_TYPE; /* pict type: I = 0 , P = 1 */ if(s->pict_type==B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){ - av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set, but shouldnt, clearing it\n"); + av_log(s->avctx, AV_LOG_ERROR, "low_delay flag incorrectly, clearing it\n"); s->low_delay=0; } @@ -5788,8 +5799,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ else s->decode_mb= ff_mpeg4_decode_mb; - if(s->time_increment_resolution==0){ - s->time_increment_resolution=1; + if(s->avctx->time_base.den==0){ + s->avctx->time_base.den=1; // fprintf(stderr, "time_increment_resolution is illegal\n"); } time_incr=0; @@ -5799,7 +5810,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ check_marker(gb, "before time_increment"); if(s->time_increment_bits==0){ - av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers arnt complete, trying to guess time_increment_bits\n"); + av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers are not complete, trying to guess time_increment_bits\n"); for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){ if(show_bits(gb, s->time_increment_bits+1)&1) break; @@ -5816,22 +5827,22 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ if(s->pict_type!=B_TYPE){ s->last_time_base= s->time_base; s->time_base+= time_incr; - s->time= s->time_base*s->time_increment_resolution + time_increment; + s->time= s->time_base*s->avctx->time_base.den + time_increment; if(s->workaround_bugs&FF_BUG_UMP4){ if(s->time < s->last_non_b_time){ // fprintf(stderr, "header is not mpeg4 compatible, broken encoder, trying to workaround\n"); s->time_base++; - s->time+= s->time_increment_resolution; + s->time+= s->avctx->time_base.den; } } s->pp_time= s->time - s->last_non_b_time; s->last_non_b_time= s->time; }else{ - s->time= (s->last_time_base + time_incr)*s->time_increment_resolution + time_increment; + s->time= (s->last_time_base + time_incr)*s->avctx->time_base.den + time_increment; s->pb_time= s->pp_time - (s->last_non_b_time - s->time); if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ -// printf("messed up order, seeking?, skiping current b frame\n"); - return FRAME_SKIPED; +// printf("messed up order, maybe after seeking? skipping current b frame\n"); + return FRAME_SKIPPED; } if(s->t_frame==0) s->t_frame= s->pb_time; @@ -5843,9 +5854,9 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ } //av_log(s->avctx, AV_LOG_DEBUG, "last nonb %Ld last_base %d time %Ld pp %d pb %d t %d ppf %d pbf %d\n", s->last_non_b_time, s->last_time_base, s->time, s->pp_time, s->pb_time, s->t_frame, s->pp_field_time, s->pb_field_time); - s->current_picture_ptr->pts= s->time*(int64_t)AV_TIME_BASE / s->time_increment_resolution; + s->current_picture_ptr->pts= (s->time + s->avctx->time_base.num/2) / s->avctx->time_base.num; if(s->avctx->debug&FF_DEBUG_PTS) - av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %f\n", s->current_picture_ptr->pts/(float)AV_TIME_BASE); + av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %f\n", s->current_picture_ptr->pts); check_marker(gb, "before vop_coded"); @@ -5853,9 +5864,9 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ if (get_bits1(gb) != 1){ if(s->avctx->debug&FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n"); - return FRAME_SKIPED; + return FRAME_SKIPPED; } -//printf("time %d %d %d || %Ld %Ld %Ld\n", s->time_increment_bits, s->time_increment_resolution, s->time_base, +//printf("time %d %d %d || %Ld %Ld %Ld\n", s->time_increment_bits, s->avctx->time_base.den, s->time_base, //s->time, s->last_non_b_time, s->last_non_b_time - s->pp_time); if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == P_TYPE || (s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) { @@ -5951,20 +5962,20 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ if(s->enhancement_type){ int load_backward_shape= get_bits1(gb); if(load_backward_shape){ - av_log(s->avctx, AV_LOG_ERROR, "load backward shape isnt supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "load backward shape isn't supported\n"); } } skip_bits(gb, 2); //ref_select_code } } - /* detect buggy encoders which dont set the low_delay flag (divx4/xvid/opendivx)*/ - // note we cannot detect divx5 without b-frames easyly (allthough its buggy too) + /* detect buggy encoders which don't set the low_delay flag (divx4/xvid/opendivx)*/ + // note we cannot detect divx5 without b-frames easily (although it's buggy too) if(s->vo_type==0 && s->vol_control_parameters==0 && s->divx_version==0 && s->picture_number==0){ av_log(s->avctx, AV_LOG_ERROR, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n"); s->low_delay=1; } - s->picture_number++; // better than pic number==0 allways ;) + s->picture_number++; // better than pic number==0 always ;) s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; //FIXME add short header support s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; @@ -6003,7 +6014,7 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) if(get_bits_count(gb) >= gb->size_in_bits){ if(gb->size_in_bits==8 && (s->divx_version || s->xvid_build)){ av_log(s->avctx, AV_LOG_ERROR, "frame skip %d\n", gb->size_in_bits); - return FRAME_SKIPED; //divx bug + return FRAME_SKIPPED; //divx bug }else return -1; //end of stream } @@ -6032,11 +6043,11 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) else if(startcode==0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if(startcode==0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if(startcode==0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); - else if(startcode==0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Textutre Object start"); - else if(startcode==0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Textutre Spatial Layer start"); - else if(startcode==0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Textutre SNR Layer start"); - else if(startcode==0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Textutre Tile start"); - else if(startcode==0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Textutre Shape Layer start"); + else if(startcode==0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); + else if(startcode==0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); + else if(startcode==0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); + else if(startcode==0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); + else if(startcode==0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if(startcode==0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if(startcode<=0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if(startcode<=0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); diff --git a/src/libffmpeg/libavcodec/h263dec.c b/src/libffmpeg/libavcodec/h263dec.c index da2bd54a2..04c1e2726 100644 --- a/src/libffmpeg/libavcodec/h263dec.c +++ b/src/libffmpeg/libavcodec/h263dec.c @@ -195,11 +195,10 @@ static int decode_slice(MpegEncContext *s){ } /* DCT & quantize */ - s->dsp.clear_blocks(s->block[0]); - + s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; -// s->mb_skiped = 0; +// s->mb_skipped = 0; //printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24)); ret= s->decode_mb(s, s->block); @@ -451,7 +450,7 @@ uint64_t time= rdtsc(); }else if(s->codec_id==CODEC_ID_H263){ next= h263_find_frame_end(&s->parse_context, buf, buf_size); }else{ - av_log(s->avctx, AV_LOG_ERROR, "this codec doesnt support truncated bitstreams\n"); + av_log(s->avctx, AV_LOG_ERROR, "this codec does not support truncated bitstreams\n"); return -1; } @@ -503,7 +502,7 @@ retry: ret = h263_decode_picture_header(s); } - if(ret==FRAME_SKIPED) return get_consumed_bytes(s, buf_size); + if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0){ diff --git a/src/libffmpeg/libavcodec/h264.c b/src/libffmpeg/libavcodec/h264.c index 39ee4c6a2..9048886bd 100644 --- a/src/libffmpeg/libavcodec/h264.c +++ b/src/libffmpeg/libavcodec/h264.c @@ -162,7 +162,7 @@ typedef struct H264Context{ int chroma_qp; //QPc - int prev_mb_skiped; //FIXME remove (IMHO not used) + int prev_mb_skipped; //FIXME remove (IMHO not used) //prediction stuff int chroma_pred_mode; @@ -209,8 +209,8 @@ typedef struct H264Context{ */ int block_offset[2*(16+8)]; - uint16_t *mb2b_xy; //FIXME are these 4 a good idea? - uint16_t *mb2b8_xy; + uint32_t *mb2b_xy; //FIXME are these 4 a good idea? + uint32_t *mb2b8_xy; int b_stride; //FIXME use s->b4_stride int b8_stride; @@ -227,7 +227,7 @@ typedef struct H264Context{ /** * current pps */ - PPS pps; //FIXME move tp Picture perhaps? (->no) do we need that? + PPS pps; //FIXME move to Picture perhaps? (->no) do we need that? int slice_num; uint8_t *slice_table_base; @@ -370,7 +370,7 @@ static inline void fill_rectangle(void *vp, int w, int h, int stride, uint32_t v stride *= size; assert((((int)vp)&(FFMIN(w, STRIDE_ALIGN)-1)) == 0); -//FIXME check what gcc generates for 64 bit on x86 and possible write a 32 bit ver of it +//FIXME check what gcc generates for 64 bit on x86 and possibly write a 32 bit ver of it if(w==2 && h==2){ *(uint16_t*)(p + 0)= *(uint16_t*)(p + stride)= size==4 ? val : val*0x0101; @@ -428,7 +428,13 @@ static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ int left_block[8]; int i; - //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it + //FIXME deblocking can skip fill_caches much of the time with multiple slices too. + // the actual condition is whether we're on the edge of a slice, + // and even then the intra and nnz parts are unnecessary. + if(for_deblock && h->slice_num == 1) + return; + + //wow what a mess, why didn't they simplify the interlacing&intra stuff, i can't imagine that these complex rules are worth it top_xy = mb_xy - s->mb_stride; topleft_xy = top_xy - 1; @@ -662,10 +668,10 @@ static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ #if 1 //FIXME direct mb can skip much of this - if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){ + if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){ int list; - for(list=0; list<2; list++){ - if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type) && !for_deblock){ + for(list=0; list<1+(h->slice_type==B_TYPE); list++){ + if(!USES_LIST(mb_type, list) && !IS_DIRECT(mb_type) && !h->deblocking_filter){ /*if(!h->mv_cache_clean[list]){ memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all? memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t)); @@ -675,16 +681,6 @@ static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ } h->mv_cache_clean[list]= 0; - if(IS_INTER(topleft_type)){ - const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride; - const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; - h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy]; - }else{ - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0; - h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - if(IS_INTER(top_type)){ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride; @@ -704,16 +700,6 @@ static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101; } - if(IS_INTER(topright_type)){ - const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride; - const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride; - *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; - h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy]; - }else{ - *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0; - h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; - } - //FIXME unify cleanup or sth if(IS_INTER(left_type[0])){ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; @@ -743,17 +729,38 @@ static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; } - if(for_deblock) + if(for_deblock || (IS_DIRECT(mb_type) && !h->direct_spatial_mv_pred)) continue; + if(IS_INTER(topleft_type)){ + const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride; + const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride; + *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; + h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy]; + }else{ + *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0; + h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } + + if(IS_INTER(topright_type)){ + const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride; + const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride; + *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; + h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy]; + }else{ + *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0; + h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; + } + + h->ref_cache[list][scan8[5 ]+1] = h->ref_cache[list][scan8[7 ]+1] = - h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewher else) + h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else) h->ref_cache[list][scan8[4 ]] = h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE; *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]= *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]= - *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else) + *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else) *(uint32_t*)h->mv_cache [list][scan8[4 ]]= *(uint32_t*)h->mv_cache [list][scan8[12]]= 0; @@ -796,7 +803,7 @@ static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ } *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]= *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]= - *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else) + *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else) *(uint32_t*)h->mvd_cache [list][scan8[4 ]]= *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0; @@ -1356,7 +1363,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){ for(list=0; list<2; list++){ int y; if(!USES_LIST(mb_type, list)){ - if(1){ //FIXME skip or never read if mb_type doesnt use it + if(1){ //FIXME skip or never read if mb_type doesn't use it for(y=0; y<4; y++){ *(uint64_t*)s->current_picture.motion_val[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= 0; @@ -1404,7 +1411,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){ * Decodes a network abstraction layer unit. * @param consumed is the number of bytes used as input * @param length is the length of the array - * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp ttailing? + * @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing? * @returns decoded bytes, might be src+1 if no escapes */ static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *consumed, int length){ @@ -1441,7 +1448,7 @@ static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *c h->rbsp_buffer= av_fast_realloc(h->rbsp_buffer, &h->rbsp_buffer_size, length); dst= h->rbsp_buffer; -//printf("deoding esc\n"); +//printf("decoding esc\n"); si=di=0; while(si<length){ //remove escapes (very rare 1:2^22) @@ -1729,7 +1736,7 @@ static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int st } #endif -//FIXME need to check that this doesnt overflow signed 32 bit for low qp, iam not sure, its very close +//FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away) static inline int quantize_c(DCTELEM *block, uint8_t *scantable, int qscale, int intra, int seperate_dc){ int i; @@ -2635,7 +2642,7 @@ static void free_tables(H264Context *h){ /** * allocates tables. - * needs widzh/height + * needs width/height */ static int alloc_tables(H264Context *h){ MpegEncContext * const s = &h->s; @@ -2660,8 +2667,8 @@ static int alloc_tables(H264Context *h){ memset(h->slice_table_base, -1, big_mb_num * sizeof(uint8_t)); h->slice_table= h->slice_table_base + s->mb_stride + 1; - CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint16_t)); - CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint16_t)); + CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint32_t)); + CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t)); for(y=0; y<s->mb_height; y++){ for(x=0; x<s->mb_width; x++){ const int mb_xy= x + y*s->mb_stride; @@ -2760,7 +2767,7 @@ static inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src src_cb -= uvlinesize; src_cr -= uvlinesize; - // There is two lines saved, the line above the the top macroblock of a pair, + // There are two lines saved, the line above the the top macroblock of a pair, // and the line above the bottom macroblock h->left_border[0]= h->top_borders[0][s->mb_x][15]; for(i=1; i<17; i++){ @@ -2832,7 +2839,7 @@ static inline void backup_pair_border(H264Context *h, uint8_t *src_y, uint8_t *s src_cb -= 2 * uvlinesize; src_cr -= 2 * uvlinesize; - // There is two lines saved, the line above the the top macroblock of a pair, + // There are two lines saved, the line above the the top macroblock of a pair, // and the line above the bottom macroblock h->left_border[0]= h->top_borders[0][s->mb_x][15]; h->left_border[1]= h->top_borders[1][s->mb_x][15]; @@ -3113,7 +3120,7 @@ static void hl_decode_mb(H264Context *h){ // top s->mb_y--; tprintf("call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y); - fill_caches(h, mb_type_top, 1); //FIXME dont fill stuff which isnt used by filter_mb + fill_caches(h, mb_type_top, 1); //FIXME don't fill stuff which isn't used by filter_mb filter_mb(h, mb_x, mb_y, pair_dest_y, pair_dest_cb, pair_dest_cr, linesize, uvlinesize); if (tmp != s->current_picture.data[1][384]) { tprintf("modified pixel 8,1 (1)\n"); @@ -3121,7 +3128,7 @@ static void hl_decode_mb(H264Context *h){ // bottom s->mb_y++; tprintf("call mbaff filter_mb\n"); - fill_caches(h, mb_type_bottom, 1); //FIXME dont fill stuff which isnt used by filter_mb + fill_caches(h, mb_type_bottom, 1); //FIXME don't fill stuff which isn't used by filter_mb filter_mb(h, mb_x, mb_y+1, dest_y, dest_cb, dest_cr, linesize, uvlinesize); if (tmp != s->current_picture.data[1][384]) { tprintf("modified pixel 8,1 (2)\n"); @@ -3129,7 +3136,7 @@ static void hl_decode_mb(H264Context *h){ } else { tprintf("call filter_mb\n"); backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize); - fill_caches(h, mb_type, 1); //FIXME dont fill stuff which isnt used by filter_mb + fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } } @@ -3258,7 +3265,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){ print_short_term(h); print_long_term(h); - if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move beofre func + if(h->slice_type==I_TYPE || h->slice_type==SI_TYPE) return 0; //FIXME move before func for(list=0; list<2; list++){ memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]); @@ -3450,9 +3457,19 @@ static void idr(H264Context *h){ h->short_ref_count=0; } +/* forget old pics after a seek */ +static void flush_dpb(AVCodecContext *avctx){ + H264Context *h= avctx->priv_data; + int i; + for(i=0; i<16; i++) + h->delayed_pic[i]= NULL; + h->delayed_output_pic= NULL; + idr(h); +} + /** * - * @return the removed picture or NULL if an error occures + * @return the removed picture or NULL if an error occurs */ static Picture * remove_short(H264Context *h, int frame_num){ MpegEncContext * const s = &h->s; @@ -3477,7 +3494,7 @@ static Picture * remove_short(H264Context *h, int frame_num){ /** * - * @return the removed picture or NULL if an error occures + * @return the removed picture or NULL if an error occurs */ static Picture * remove_long(H264Context *h, int i){ Picture *pic; @@ -3782,7 +3799,7 @@ static int decode_slice_header(H264Context *h){ } h->slice_type= slice_type; - s->pict_type= h->slice_type; // to make a few old func happy, its wrong though + s->pict_type= h->slice_type; // to make a few old func happy, it's wrong though pps_id= get_ue_golomb(&s->gb); if(pps_id>255){ @@ -3831,8 +3848,7 @@ static int decode_slice_header(H264Context *h){ s->avctx->sample_aspect_ratio.den = 1; if(h->sps.timing_info_present_flag && h->sps.fixed_frame_rate_flag){ - s->avctx->frame_rate = h->sps.time_scale; - s->avctx->frame_rate_base = h->sps.num_units_in_tick; + s->avctx->time_base= (AVRational){h->sps.num_units_in_tick, h->sps.time_scale}; } } @@ -4075,7 +4091,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, in level_code= prefix + get_bits(gb, 4); //part }else if(prefix==15){ level_code= (prefix<<suffix_length) + get_bits(gb, 12); //part - if(suffix_length==0) level_code+=15; //FIXME doesnt make (much)sense + if(suffix_length==0) level_code+=15; //FIXME doesn't make (much)sense }else{ av_log(h->s.avctx, AV_LOG_ERROR, "prefix too large at %d %d\n", s->mb_x, s->mb_y); return -1; @@ -4198,7 +4214,7 @@ static void decode_mb_skip(H264Context *h){ s->current_picture.mb_type[mb_xy]= mb_type|MB_TYPE_SKIP; s->current_picture.qscale_table[mb_xy]= s->qscale; h->slice_table[ mb_xy ]= h->slice_num; - h->prev_mb_skiped= 1; + h->prev_mb_skipped= 1; } /** @@ -4210,7 +4226,7 @@ static int decode_mb_cavlc(H264Context *h){ const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; int mb_type, partition_count, cbp; - s->dsp.clear_blocks(h->mb); //FIXME avoid if allready clear (move after skip handlong? + s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong? tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); cbp = 0; /* avoid warning. FIXME: find a solution without slowing @@ -4225,12 +4241,12 @@ static int decode_mb_cavlc(H264Context *h){ } } if(h->mb_aff_frame){ - if ( ((s->mb_y&1) == 0) || h->prev_mb_skiped) + if ( ((s->mb_y&1) == 0) || h->prev_mb_skipped) h->mb_field_decoding_flag = get_bits1(&s->gb); }else h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME); - h->prev_mb_skiped= 0; + h->prev_mb_skipped= 0; mb_type= get_ue_golomb(&s->gb); if(h->slice_type == B_TYPE){ @@ -4297,10 +4313,10 @@ decode_intra_mb: } } - // In deblocking, the quantiser is 0 + // In deblocking, the quantizer is 0 s->current_picture.qscale_table[mb_xy]= 0; h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, 0); - // All coeffs are presents + // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 16); return 0; @@ -4566,7 +4582,7 @@ decode_intra_mb: h->chroma_qp= chroma_qp= get_chroma_qp(h->pps.chroma_qp_index_offset, s->qscale); if(IS_INTRA16x16(mb_type)){ if( decode_residual(h, h->intra_gb_ptr, h->mb, LUMA_DC_BLOCK_INDEX, dc_scan, s->qscale, 16) < 0){ - return -1; //FIXME continue if partotioned and other retirn -1 too + return -1; //FIXME continue if partitioned and other return -1 too } assert((cbp&15) == 0 || (cbp&15) == 15); @@ -5185,7 +5201,7 @@ static int decode_mb_cabac(H264Context *h) { const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; int mb_type, partition_count, cbp = 0; - s->dsp.clear_blocks(h->mb); //FIXME avoid if allready clear (move after skip handlong?) + s->dsp.clear_blocks(h->mb); //FIXME avoid if already clear (move after skip handlong?) tprintf("pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); if( h->slice_type != I_TYPE && h->slice_type != SI_TYPE ) { @@ -5202,12 +5218,12 @@ static int decode_mb_cabac(H264Context *h) { } } if(h->mb_aff_frame){ - if ( ((s->mb_y&1) == 0) || h->prev_mb_skiped) + if ( ((s->mb_y&1) == 0) || h->prev_mb_skipped) h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h); }else h->mb_field_decoding_flag= (s->picture_structure!=PICT_FRAME); - h->prev_mb_skiped = 0; + h->prev_mb_skipped = 0; compute_mb_neighboors(h); if( ( mb_type = decode_cabac_mb_type( h ) ) < 0 ) { @@ -5280,13 +5296,13 @@ decode_intra_mb: ff_init_cabac_decoder(&h->cabac, ptr, h->cabac.bytestream_end - ptr); - // All blocks are presents + // All blocks are present h->cbp_table[mb_xy] = 0x1ef; h->chroma_pred_mode_table[mb_xy] = 0; - // In deblocking, the quantiser is 0 + // In deblocking, the quantizer is 0 s->current_picture.qscale_table[mb_xy]= 0; h->chroma_qp = get_chroma_qp(h->pps.chroma_qp_index_offset, 0); - // All coeffs are presents + // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 16); return 0; } @@ -5617,48 +5633,15 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4] const int alpha = alpha_table[index_a]; const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )]; - for( i = 0; i < 4; i++ ) { - if( bS[i] == 0 ) { - pix += 4 * stride; - continue; - } - - if( bS[i] < 4 ) { - const int tc0 = tc0_table[index_a][bS[i] - 1]; - /* 4px edge length */ - for( d = 0; d < 4; d++ ) { - const int p0 = pix[-1]; - const int p1 = pix[-2]; - const int p2 = pix[-3]; - const int q0 = pix[0]; - const int q1 = pix[1]; - const int q2 = pix[2]; - - if( ABS( p0 - q0 ) < alpha && - ABS( p1 - p0 ) < beta && - ABS( q1 - q0 ) < beta ) { - int tc = tc0; - int i_delta; - - if( ABS( p2 - p0 ) < beta ) { - pix[-2] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); - tc++; - } - if( ABS( q2 - q0 ) < beta ) { - pix[1] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); - tc++; - } - - i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ - tprintf("filter_mb_edgev i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, tc, bS[i], pix[-3], p1, p0, q0, q1, pix[2], pix[-2], pix[-1], pix[0], pix[1]); - } - pix += stride; - } - }else{ - /* 4px edge length */ - for( d = 0; d < 4; d++ ) { + if( bS[0] < 4 ) { + int tc[4]; + for(i=0; i<4; i++) + tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] : -1; + h->s.dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc); + } else { + /* 16px edge length, because bS=4 is triggered by being at + * the edge of an intra MB, so all 4 bS are the same */ + for( d = 0; d < 16; d++ ) { const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; @@ -5703,7 +5686,6 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4] } pix += stride; } - } } } static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) { @@ -5712,35 +5694,14 @@ static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4 const int alpha = alpha_table[index_a]; const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )]; - for( i = 0; i < 4; i++ ) { - if( bS[i] == 0 ) { - pix += 2 * stride; - continue; - } - - if( bS[i] < 4 ) { - const int tc = tc0_table[index_a][bS[i] - 1] + 1; - /* 2px edge length (because we use same bS than the one for luma) */ - for( d = 0; d < 2; d++ ){ - const int p0 = pix[-1]; - const int p1 = pix[-2]; - const int q0 = pix[0]; - const int q1 = pix[1]; - - if( ABS( p0 - q0 ) < alpha && - ABS( p1 - p0 ) < beta && - ABS( q1 - q0 ) < beta ) { - const int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - - pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ - tprintf("filter_mb_edgecv i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, tc, bS[i], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); - } - pix += stride; - } - }else{ - /* 2px edge length (because we use same bS than the one for luma) */ - for( d = 0; d < 2; d++ ){ + if( bS[0] < 4 ) { + int tc[4]; + for(i=0; i<4; i++) + tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] + 1 : 0; + h->s.dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc); + } else { + /* 8px edge length, see filter_mb_edgev */ + for( d = 0; d < 8; d++ ){ const int p0 = pix[-1]; const int p1 = pix[-2]; const int q0 = pix[0]; @@ -5756,7 +5717,6 @@ static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4 } pix += stride; } - } } } @@ -5921,49 +5881,14 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4] const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )]; const int pix_next = stride; - for( i = 0; i < 4; i++ ) { - if( bS[i] == 0 ) { - pix += 4; - continue; - } - - if( bS[i] < 4 ) { - const int tc0 = tc0_table[index_a][bS[i] - 1]; - /* 4px edge length */ - for( d = 0; d < 4; d++ ) { - const int p0 = pix[-1*pix_next]; - const int p1 = pix[-2*pix_next]; - const int p2 = pix[-3*pix_next]; - const int q0 = pix[0]; - const int q1 = pix[1*pix_next]; - const int q2 = pix[2*pix_next]; - - if( ABS( p0 - q0 ) < alpha && - ABS( p1 - p0 ) < beta && - ABS( q1 - q0 ) < beta ) { - - int tc = tc0; - int i_delta; - - if( ABS( p2 - p0 ) < beta ) { - pix[-2*pix_next] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); - tc++; - } - if( ABS( q2 - q0 ) < beta ) { - pix[pix_next] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); - tc++; - } - - i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - pix[-pix_next] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ - tprintf("filter_mb_edgeh i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, tc, bS[i], p2, p1, p0, q0, q1, q2, pix[-2*pix_next], pix[-pix_next], pix[0], pix[pix_next]); - } - pix++; - } - }else{ - /* 4px edge length */ - for( d = 0; d < 4; d++ ) { + if( bS[0] < 4 ) { + int tc[4]; + for(i=0; i<4; i++) + tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] : -1; + h->s.dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc); + } else { + /* 16px edge length, see filter_mb_edgev */ + for( d = 0; d < 16; d++ ) { const int p0 = pix[-1*pix_next]; const int p1 = pix[-2*pix_next]; const int p2 = pix[-3*pix_next]; @@ -6006,7 +5931,6 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4] } pix++; } - } } } @@ -6017,37 +5941,14 @@ static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4 const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )]; const int pix_next = stride; - for( i = 0; i < 4; i++ ) - { - if( bS[i] == 0 ) { - pix += 2; - continue; - } - - if( bS[i] < 4 ) { - int tc = tc0_table[index_a][bS[i] - 1] + 1; - /* 2px edge length (see deblocking_filter_edgecv) */ - for( d = 0; d < 2; d++ ) { - const int p0 = pix[-1*pix_next]; - const int p1 = pix[-2*pix_next]; - const int q0 = pix[0]; - const int q1 = pix[1*pix_next]; - - if( ABS( p0 - q0 ) < alpha && - ABS( p1 - p0 ) < beta && - ABS( q1 - q0 ) < beta ) { - - int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); - - pix[-pix_next] = clip_uint8( p0 + i_delta ); /* p0' */ - pix[0] = clip_uint8( q0 - i_delta ); /* q0' */ - tprintf("filter_mb_edgech i:%d d:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, d, qp, index_a, alpha, beta, tc, bS[i], pix[-3*pix_next], p1, p0, q0, q1, pix[2*pix_next], pix[-2*pix_next], pix[-pix_next], pix[0], pix[pix_next]); - } - pix++; - } - }else{ - /* 2px edge length (see deblocking_filter_edgecv) */ - for( d = 0; d < 2; d++ ) { + if( bS[0] < 4 ) { + int tc[4]; + for(i=0; i<4; i++) + tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] + 1 : 0; + h->s.dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc); + } else { + /* 8px edge length, see filter_mb_edgev */ + for( d = 0; d < 8; d++ ) { const int p0 = pix[-1*pix_next]; const int p1 = pix[-2*pix_next]; const int q0 = pix[0]; @@ -6063,7 +5964,6 @@ static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4 } pix++; } - } } } @@ -6072,6 +5972,10 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 const int mb_xy= mb_x + mb_y*s->mb_stride; int first_vertical_edge_done = 0; int dir; + /* FIXME: A given frame may occupy more than one position in + * the reference list. So ref2frm should be populated with + * frame numbers, not indices. */ + static const int ref2frm[18] = {-1,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; if (h->mb_aff_frame // left mb is in picture @@ -6081,7 +5985,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 // and left mb is in the same slice if deblocking_filter == 2 && (h->deblocking_filter!=2 || h->slice_table[mb_xy-1] == h->slice_table[mb_xy])) { /* First vertical edge is different in MBAFF frames - * There are 8 differents bS to compute and 2 differents Qp + * There are 8 different bS to compute and 2 different Qp */ int bS[8]; int qp[2]; @@ -6103,13 +6007,10 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 h->non_zero_count_cache[bn_idx] != 0 ) { bS[i] = 2; } else { - /* FIXME: A given frame may occupy more than one position in - * the reference list. So we should compare the frame numbers, - * not the indices in the ref list. */ int l; bS[i] = 0; for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) { - if( h->ref_cache[l][b_idx] != h->ref_cache[l][bn_idx] || + if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] || ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 || ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4 ) { bS[i] = 1; @@ -6119,7 +6020,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 } } if(bS[0]+bS[1]+bS[2]+bS[3] != 0) { - // Do not use s->qscale as luma quantiser because it has not the same + // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp[0] = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[h->left_mb_xy[0]] + 1 ) >> 1; chroma_qp[0] = ( get_chroma_qp( h->pps.chroma_qp_index_offset, s->current_picture.qscale_table[mb_xy] ) + @@ -6153,7 +6054,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 /* Calculate bS */ for( edge = start; edge < 4; edge++ ) { - /* mbn_xy: neighbour macroblock */ + /* mbn_xy: neighbor macroblock */ int mbn_xy = edge > 0 ? mb_xy : mbm_xy; int bS[4]; int qp; @@ -6180,7 +6081,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 assert(0); } /* Filter edge */ - // Do not use s->qscale as luma quantiser because it has not the same + // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); @@ -6201,7 +6102,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 assert(0); } /* Filter edge */ - // Do not use s->qscale as luma quantiser because it has not the same + // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); @@ -6242,13 +6143,10 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 } else { - /* FIXME: A given frame may occupy more than one position in - * the reference list. So we should compare the frame numbers, - * not the indices in the ref list. */ int l; bS[i] = 0; for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) { - if( h->ref_cache[l][b_idx] != h->ref_cache[l][bn_idx] || + if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] || ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 || ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= 4 ) { bS[i] = 1; @@ -6263,7 +6161,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8 } /* Filter edge */ - // Do not use s->qscale as luma quantiser because it has not the same + // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; //tprintf("filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp, s->current_picture.qscale_table[mbn_xy]); @@ -6621,7 +6519,7 @@ static inline int decode_seq_parameter_set(H264Context *h){ sps->crop_top = get_ue_golomb(&s->gb); sps->crop_bottom= get_ue_golomb(&s->gb); if(sps->crop_left || sps->crop_top){ - av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completly supported, this could look slightly wrong ...\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n"); } }else{ sps->crop_left = @@ -6843,7 +6741,7 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){ switch(h->nal_unit_type){ case NAL_IDR_SLICE: - idr(h); //FIXME ensure we dont loose some frames if there is reordering + idr(h); //FIXME ensure we don't loose some frames if there is reordering case NAL_SLICE: init_get_bits(&s->gb, ptr, bit_length); h->intra_gb_ptr= @@ -6922,12 +6820,12 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){ } /** - * retunrs the number of bytes consumed for building the current frame + * returns the number of bytes consumed for building the current frame */ static int get_consumed_bytes(MpegEncContext *s, int pos, int buf_size){ if(s->flags&CODEC_FLAG_TRUNCATED){ pos -= s->parse_context.last_index; - if(pos<0) pos=0; // FIXME remove (uneeded?) + if(pos<0) pos=0; // FIXME remove (unneeded?) return pos; }else{ @@ -7015,7 +6913,7 @@ static int decode_frame(AVCodecContext *avctx, //FIXME do something with unavailable reference frames -// if(ret==FRAME_SKIPED) return get_consumed_bytes(s, buf_index, buf_size); +// if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_index, buf_size); if(!s->current_picture_ptr){ av_log(h->s.avctx, AV_LOG_DEBUG, "error, NO frame\n"); return -1; @@ -7320,6 +7218,7 @@ AVCodec h264_decoder = { decode_end, decode_frame, /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, + .flush= flush_dpb, }; AVCodecParser h264_parser = { diff --git a/src/libffmpeg/libavcodec/huffyuv.c b/src/libffmpeg/libavcodec/huffyuv.c index 5b496c512..d2f358575 100644 --- a/src/libffmpeg/libavcodec/huffyuv.c +++ b/src/libffmpeg/libavcodec/huffyuv.c @@ -542,7 +542,7 @@ static int encode_init(AVCodecContext *avctx) if(s->interlaced != ( s->height > 288 )) av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n"); }else if(avctx->strict_std_compliance>=0){ - av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 / -strict -1 to use it anyway.\n"); + av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodable with future versions!!! Set vstrict=-1 / -strict -1 to use it anyway.\n"); return -1; } @@ -827,7 +827,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 p->data[0][1]= get_bits(&s->gb, 8); p->data[0][0]= get_bits(&s->gb, 8); - av_log(avctx, AV_LOG_ERROR, "YUY2 output isnt implemenetd yet\n"); + av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n"); return -1; }else{ @@ -996,14 +996,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 } } } - draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order + draw_slice(s, height); // just 1 large slice as this is not possible in reverse order break; default: av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n"); } }else{ - av_log(avctx, AV_LOG_ERROR, "BGR24 output isnt implemenetd yet\n"); + av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n"); return -1; } } diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c index 550122673..1d1f2b213 100644 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c +++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c @@ -39,6 +39,7 @@ static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x00 static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL; static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL; +static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL; static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL; static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL; static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL; @@ -691,6 +692,204 @@ static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ ); } +// dst = ABS( a - b ) +#define MMABS_DIFF_MMX2(a,b,dst,z)\ + "movq " #b ", " #dst " \n\t"\ + "movq " #a ", " #z " \n\t"\ + "psubusw " #b ", " #z " \n\t"\ + "psubusw " #a ", " #dst " \n\t"\ + "pmaxsw " #z ", " #dst " \n\t" + +// a = clip( a, -tc, tc ) +#define CLIP_MMX2(a,tc,z)\ + "pxor " #z ", " #z " \n\t"\ + "psubw " #tc ", " #z " \n\t"\ + "pmaxsw " #z ", " #a " \n\t"\ + "pminsw " #tc ", " #a " \n\t" + +// in: mm0=p1, mm1=p0, mm2=q0, mm3=q1 +// out: mm7 = do we filter this pixel? +#define H264_DEBLOCK_THRESH(alpha,beta)\ + "pxor %%mm7, %%mm7 \n\t"\ + "punpcklbw %%mm7, %%mm0 \n\t"\ + "punpcklbw %%mm7, %%mm1 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpcklbw %%mm7, %%mm3 \n\t"\ + MMABS_DIFF_MMX2(%%mm1, %%mm2, %%mm5, %%mm4)\ + "movd " #alpha ", %%mm6 \n\t"\ + "pshufw $0, %%mm6, %%mm6 \n\t"\ + "pcmpgtw %%mm5, %%mm6 \n\t" /* ABS(p0-q0) < alpha */\ + MMABS_DIFF_MMX2(%%mm0, %%mm1, %%mm5, %%mm4)\ + MMABS_DIFF_MMX2(%%mm3, %%mm2, %%mm7, %%mm4)\ + "pmaxsw %%mm7, %%mm5 \n\t"\ + "movd " #beta ", %%mm7 \n\t"\ + "pshufw $0, %%mm7, %%mm7 \n\t"\ + "movq %%mm7, %%mm4 \n\t"\ + "pcmpgtw %%mm5, %%mm7 \n\t" /* ABS(p1-p0) < beta && ABS(q1-q0) < beta */\ + "pand %%mm6, %%mm7 \n\t" + +// in: mm0=p1, mm1=p0, mm2=q0, mm3=q1, mm6=tc +// out: mm1=p0', mm2=q0' +#define H264_DEBLOCK_P0_Q0(pw4)\ + "movq " #pw4 ", %%mm4 \n\t"\ + "movq %%mm2, %%mm5 \n\t"\ + "paddw %%mm4, %%mm0 \n\t"\ + "psubw %%mm1, %%mm5 \n\t"\ + "psubw %%mm3, %%mm0 \n\t"\ + "psllw $2, %%mm5 \n\t"\ + "paddw %%mm0, %%mm5 \n\t"\ + "psraw $3, %%mm5 \n\t" /* mm5 = (((q0 - p0) << 2) + (p1 - q1) + 4) >> 3 */\ + CLIP_MMX2(%%mm5, %%mm6, %%mm4) /* delta = clip( mm5, -tc, tc ) */\ + "paddw %%mm5, %%mm1 \n\t" /* p0 += delta */\ + "psubw %%mm5, %%mm2 \n\t" /* q0 -= delta */ + +// in: mm1=p0, mm2=q0, mm6=tc0 +// out: mm5=delta +#define H264_DEBLOCK_DELTA_PQ1(p1,p2,z)\ + "movq %%mm1, %%mm5 \n\t"\ + "pavgb %%mm2, %%mm5 \n\t"\ + "paddw " #p2 ", %%mm5 \n\t"\ + "psraw $1, %%mm5 \n\t"\ + "psubw " #p1 ", %%mm5 \n\t" /* ( ( q2 + ((p0+q0+1)>>1) ) >> 1 ) - q1 */\ + CLIP_MMX2(%%mm5, %%mm6, z) + +static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int tc0) +{ + uint64_t tmp0, tmp1; + asm volatile( + "movd (%2,%4), %%mm0 \n\t" //p1 + "movd (%2,%4,2), %%mm1 \n\t" //p0 + "movd (%3), %%mm2 \n\t" //q0 + "movd (%3,%4), %%mm3 \n\t" //q1 + H264_DEBLOCK_THRESH(%6,%7) + "movq %%mm7, %0 \n\t" + +// filter p1 if ABS(p2-p0) < beta + "movd (%2), %%mm3 \n\t" + "pxor %%mm6, %%mm6 \n\t" + "punpcklbw %%mm6, %%mm3 \n\t" //p2 + MMABS_DIFF_MMX2(%%mm1, %%mm3, %%mm5, %%mm6) + "pcmpgtw %%mm5, %%mm4 \n\t" + "pand %%mm7, %%mm4 \n\t" // mm4 = ( ABS( p2 - p0 ) < beta && filterp ) + "movd %5, %%mm6 \n\t" + "pshufw $0, %%mm6, %%mm6 \n\t" //tc + + H264_DEBLOCK_DELTA_PQ1(%%mm0, %%mm3, %%mm7) // delta = clip( ( p2 + ((p0+q0+1)>>1) ) >> 1 ) - p1 ) + "pand %%mm4, %%mm5 \n\t" + "paddw %%mm0, %%mm5 \n\t" + "packuswb %%mm5, %%mm5 \n\t" + "movd %%mm5, (%2,%4) \n\t" // *p1 += delta + "psrlw $15, %%mm4 \n\t" + "paddw %%mm6, %%mm4 \n\t" // tc++ + "movq %%mm4, %1 \n\t" + +// filter q1 if ABS(q2-q0) < beta + "pxor %%mm7, %%mm7 \n\t" + "movd (%3,%4), %%mm3 \n\t" //q1 + "movd (%3,%4,2), %%mm4 \n\t" //q2 + "punpcklbw %%mm7, %%mm3 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + MMABS_DIFF_MMX2(%%mm2, %%mm4, %%mm5, %%mm7) + "movd %7, %%mm7 \n\t" + "pshufw $0, %%mm7, %%mm7 \n\t" + "pcmpgtw %%mm5, %%mm7 \n\t" + + H264_DEBLOCK_DELTA_PQ1(%%mm3, %%mm4, %%mm4) // delta = clip( ( q2 + ((p0+q0+1)>>1) ) >> 1 ) - q1 ) + "movq %0, %%mm4 \n\t" + "pand %%mm4, %%mm7 \n\t" // mm7 = ( ABS( q2 - q0 ) < beta && filterp ) + "pand %%mm7, %%mm5 \n\t" + "paddw %%mm3, %%mm5 \n\t" + "packuswb %%mm5, %%mm5 \n\t" + "movd %%mm5, (%3,%4) \n\t" // *q1 += delta + "movq %1, %%mm6 \n\t" + "psrlw $15, %%mm7 \n\t" + "paddw %%mm7, %%mm6 \n\t" // tc++ + "movq %0, %%mm4 \n\t" + "pand %%mm4, %%mm6 \n\t" + + H264_DEBLOCK_P0_Q0(%8) + "packuswb %%mm1, %%mm1 \n\t" + "packuswb %%mm2, %%mm2 \n\t" + "movd %%mm1, (%2,%4,2) \n\t" + "movd %%mm2, (%3) \n\t" + + : "=m"(tmp0), "=m"(tmp1) + : "r"(pix-3*stride), "r"(pix), "r"((long)stride), + "r"(tc0), "r"(alpha), "r"(beta), "m"(ff_pw_4) + ); +} + +static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + int i; + for(i=0; i<4; i++, pix+=4) { + if(tc0[i] < 0) + continue; + h264_loop_filter_luma_mmx2(pix, stride, alpha, beta, tc0[i]); + } +} + +static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + uint8_t trans[4*8]; + int i; + for(i=0; i<4; i++, pix+=4*stride) { + if(tc0[i] < 0) + continue; + //FIXME: could cut some load/stores by merging transpose with filter + transpose4x4(trans, pix-4, 4, stride); + transpose4x4(trans+4*4, pix, 4, stride); + h264_loop_filter_luma_mmx2(trans+4*4, 4, alpha, beta, tc0[i]); + transpose4x4(pix-2, trans+2*4, stride, 4); + } +} + +static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + asm volatile( + "movd (%0), %%mm0 \n\t" + "movd (%0,%2), %%mm1 \n\t" + "movd (%1), %%mm2 \n\t" + "movd (%1,%2), %%mm3 \n\t" + H264_DEBLOCK_THRESH(%4,%5) + "movd %3, %%mm6 \n\t" + "pshufw $0x50, %%mm6, %%mm6 \n\t" // mm6 = tc[1], tc[1], tc[0], tc[0] + "pand %%mm7, %%mm6 \n\t" + H264_DEBLOCK_P0_Q0(%6) + "packuswb %%mm1, %%mm1 \n\t" + "packuswb %%mm2, %%mm2 \n\t" + "movd %%mm1, (%0,%2) \n\t" + "movd %%mm2, (%1) \n\t" + :: "r"(pix-2*stride), "r"(pix), "r"((long)stride), + "r"(tc0[1]<<16 | tc0[0]), + "r"(alpha), "r"(beta), "m"(ff_pw_4) + ); +} + +static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + int i; + for(i=0; i<2; i++) { + h264_loop_filter_chroma_mmx2(pix, stride, alpha, beta, tc0); + pix += 4; + tc0 += 2; + } +} + +static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int *tc0) +{ + uint8_t trans[4*4]; + int i; + for(i=0; i<2; i++) { + //FIXME: could cut some load/stores by merging transpose with filter + transpose4x4(trans, pix-2, 4, stride); + h264_loop_filter_chroma_mmx2(trans+2*4, 4, alpha, beta, tc0); + transpose4x4(pix-2, trans, stride, 4); + pix += 4*stride; + tc0 += 2; + } +} + #ifdef CONFIG_ENCODERS static int pix_norm1_mmx(uint8_t *pix, int line_size) { int tmp; @@ -3184,6 +3383,11 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) dspfunc(avg_h264_qpel, 2, 4); #undef dspfunc + c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; + c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; + c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; + c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; + #ifdef CONFIG_ENCODERS c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; #endif //CONFIG_ENCODERS diff --git a/src/libffmpeg/libavcodec/indeo2.c b/src/libffmpeg/libavcodec/indeo2.c new file mode 100644 index 000000000..1cee019dd --- /dev/null +++ b/src/libffmpeg/libavcodec/indeo2.c @@ -0,0 +1,216 @@ +/* + * Indel Indeo 2 codec + * Copyright (c) 2005 Konstantin Shishkov + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +/** + * @file indeo2.c + * Intel Indeo 2 decoder. + */ + +#include "avcodec.h" +#include "bitstream.h" +#include "indeo2data.h" + +typedef struct Ir2Context{ + AVCodecContext *avctx; + AVFrame picture; + GetBitContext gb; + int decode_delta; +} Ir2Context; + +#define CODE_VLC_BITS 14 +static VLC ir2_vlc; + +/* Indeo 2 codes are in range 0x01..0x7F and 0x81..0x90 */ +static inline int ir2_get_code(GetBitContext *gb) +{ + return get_vlc2(gb, ir2_vlc.table, CODE_VLC_BITS, 1) + 1; +} + +static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst, int stride, + const uint8_t *table) +{ + int i; + int j; + int out = 0; + int c; + int t; + + if(width&1) + return -1; + + /* first line contain absolute values, other lines contain deltas */ + while (out < width){ + c = ir2_get_code(&ctx->gb); + if(c >= 0x80) { /* we have a run */ + c -= 0x7F; + if(out + c*2 > width) + return -1; + for (i = 0; i < c * 2; i++) + dst[out++] = 0x80; + } else { /* copy two values from table */ + dst[out++] = table[c * 2]; + dst[out++] = table[(c * 2) + 1]; + } + } + dst += stride; + + for (j = 1; j < height; j++){ + out = 0; + while (out < width){ + c = ir2_get_code(&ctx->gb); + if(c >= 0x80) { /* we have a skip */ + c -= 0x7F; + if(out + c*2 > width) + return -1; + for (i = 0; i < c * 2; i++) { + dst[out] = dst[out - stride]; + out++; + } + } else { /* add two deltas from table */ + t = dst[out - stride] + (table[c * 2] - 128); + t= clip_uint8(t); + dst[out] = t; + out++; + t = dst[out - stride] + (table[(c * 2) + 1] - 128); + t= clip_uint8(t); + dst[out] = t; + out++; + } + } + dst += stride; + } + return 0; +} + +static int ir2_decode_plane_inter(Ir2Context *ctx, int width, int height, uint8_t *dst, int stride, + const uint8_t *table) +{ + int j; + int out = 0; + int c; + int t; + + if(width&1) + return -1; + + for (j = 0; j < height; j++){ + out = 0; + while (out < width){ + c = ir2_get_code(&ctx->gb); + if(c >= 0x80) { /* we have a skip */ + c -= 0x7F; + out += c * 2; + } else { /* add two deltas from table */ + t = dst[out] + (table[c * 2] - 128); + t= clip_uint8(t); + dst[out] = t; + out++; + t = dst[out] + (table[(c * 2) + 1] - 128); + t= clip_uint8(t); + dst[out] = t; + out++; + } + } + dst += stride; + } + return 0; +} + +static int ir2_decode_frame(AVCodecContext *avctx, + void *data, int *data_size, + uint8_t *buf, int buf_size) +{ + Ir2Context * const s = avctx->priv_data; + AVFrame *picture = data; + AVFrame * const p= (AVFrame*)&s->picture; + int start; + int i; + + if(p->data[0]) + avctx->release_buffer(avctx, p); + + p->reference = 1; + p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; + if (avctx->reget_buffer(avctx, p)) { + av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); + return -1; + } + + s->decode_delta = buf[18]; + + /* decide whether frame uses deltas or not */ + + for (i = 0; i < buf_size; i++) + buf[i] = ff_reverse[buf[i]]; + + start = 48; /* hardcoded for now */ + + init_get_bits(&s->gb, buf + start, buf_size - start); + + if (s->decode_delta) { /* intraframe */ + ir2_decode_plane(s, avctx->width, avctx->height, + s->picture.data[0], s->picture.linesize[0], ir2_luma_table); + /* swapped U and V */ + ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2, + s->picture.data[2], s->picture.linesize[2], ir2_luma_table); + ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2, + s->picture.data[1], s->picture.linesize[1], ir2_luma_table); + } else { /* interframe */ + ir2_decode_plane_inter(s, avctx->width, avctx->height, + s->picture.data[0], s->picture.linesize[0], ir2_luma_table); + /* swapped U and V */ + ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2, + s->picture.data[2], s->picture.linesize[2], ir2_luma_table); + ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2, + s->picture.data[1], s->picture.linesize[1], ir2_luma_table); + } + + *picture= *(AVFrame*)&s->picture; + *data_size = sizeof(AVPicture); + + return buf_size; +} + +static int ir2_decode_init(AVCodecContext *avctx){ + Ir2Context * const ic = avctx->priv_data; + + ic->avctx = avctx; + + avctx->pix_fmt= PIX_FMT_YUV410P; + + if (!ir2_vlc.table) + init_vlc(&ir2_vlc, CODE_VLC_BITS, IR2_CODES, + &ir2_codes[0][1], 4, 2, + &ir2_codes[0][0], 4, 2, 1); + + return 0; +} + +AVCodec indeo2_decoder = { + "indeo2", + CODEC_TYPE_VIDEO, + CODEC_ID_INDEO2, + sizeof(Ir2Context), + ir2_decode_init, + NULL, + NULL, + ir2_decode_frame, + CODEC_CAP_DR1, +}; diff --git a/src/libffmpeg/libavcodec/indeo2data.h b/src/libffmpeg/libavcodec/indeo2data.h new file mode 100644 index 000000000..f58b8415f --- /dev/null +++ b/src/libffmpeg/libavcodec/indeo2data.h @@ -0,0 +1,74 @@ +#define IR2_CODES 143 +static const uint16_t ir2_codes[IR2_CODES][2] = { + {0x0000, 3}, {0x0001, 3}, {0x0003, 3}, {0x0010, 5}, + {0x0012, 5}, {0x0013, 5}, {0x0016, 5}, {0x0017, 5}, + {0x0031, 6}, {0x0032, 6}, {0x0033, 6}, {0x0034, 6}, + {0x0035, 6}, {0x0036, 6}, {0x00E0, 8}, {0x00E1, 8}, + {0x00E4, 8}, {0x00E5, 8}, {0x00E6, 8}, {0x00E7, 8}, + {0x00E9, 8}, {0x00EA, 8}, {0x00EC, 8}, {0x00ED, 8}, + {0x00EF, 8}, {0x01E0, 9}, {0x01E2, 9}, {0x01E3, 9}, + {0x01E5, 9}, {0x01E6, 9}, {0x01E8, 9}, {0x01E9, 9}, + {0x01EB, 9}, {0x01EC, 9}, {0x01EE, 9}, {0x01EF, 9}, + {0x03E0, 10}, {0x03E1, 10}, {0x03E2, 10}, {0x03E3, 10}, + {0x03E4, 10}, {0x03E5, 10}, {0x03E6, 10}, {0x03E7, 10}, + {0x03E8, 10}, {0x03E9, 10}, {0x03EA, 10}, {0x03EB, 10}, + {0x03EC, 10}, {0x03ED, 10}, {0x03EE, 10}, {0x03EF, 10}, + {0x1F80, 13}, {0x1F81, 13}, {0x1F82, 13}, {0x1F83, 13}, + {0x1F84, 13}, {0x1F85, 13}, {0x1F86, 13}, {0x1F87, 13}, + {0x1F88, 13}, {0x1F89, 13}, {0x1F8A, 13}, {0x1F8B, 13}, + {0x1F8C, 13}, {0x1F8D, 13}, {0x1F8E, 13}, {0x1F8F, 13}, + {0x1F90, 13}, {0x1F91, 13}, {0x1F92, 13}, {0x1F93, 13}, + {0x1F94, 13}, {0x1F95, 13}, {0x1F96, 13}, {0x1F97, 13}, + {0x1F98, 13}, {0x1F99, 13}, {0x1F9A, 13}, {0x1F9B, 13}, + {0x1F9C, 13}, {0x1F9D, 13}, {0x1F9E, 13}, {0x1F9F, 13}, + {0x1FA0, 13}, {0x1FA1, 13}, {0x1FA2, 13}, {0x1FA3, 13}, + {0x1FA4, 13}, {0x1FA5, 13}, {0x1FA6, 13}, {0x1FA7, 13}, + {0x1FA8, 13}, {0x1FA9, 13}, {0x1FAA, 13}, {0x1FAB, 13}, + {0x1FAC, 13}, {0x1FAD, 13}, {0x1FAE, 13}, {0x1FAF, 13}, + {0x1FB0, 13}, {0x1FB1, 13}, {0x1FB2, 13}, {0x1FB3, 13}, + {0x1FB4, 13}, {0x1FB5, 13}, {0x1FB6, 13}, {0x1FB7, 13}, + {0x1FB8, 13}, {0x1FB9, 13}, {0x1FBA, 13}, {0x1FBB, 13}, + {0x1FBC, 13}, {0x1FBD, 13}, {0x1FBE, 13}, {0x1FBF, 13}, + {0x3F80, 14}, {0x3F81, 14}, {0x3F82, 14}, {0x3F83, 14}, + {0x3F84, 14}, {0x3F85, 14}, {0x3F86, 14}, {0x3F87, 14}, + {0x3F88, 14}, {0x3F89, 14}, {0x3F8A, 14}, {0x0002, 3}, + {0x0011, 5}, {0x0014, 5}, {0x0015, 5}, {0x0030, 6}, + {0x0037, 6}, {0x00E2, 8}, {0x00E3, 8}, {0x00E8, 8}, + {0x00EB, 8}, {0x00EE, 8}, {0x01E1, 9}, {0x01E4, 9}, + {0x01E7, 9}, {0x01EA, 9}, {0x01ED, 9} +}; + +static const uint8_t ir2_luma_table[256] = { + 0x80, 0x80, 0x84, 0x84, 0x7C, 0x7C, 0x7F, 0x85, + 0x81, 0x7B, 0x85, 0x7F, 0x7B, 0x81, 0x8C, 0x8C, + 0x74, 0x74, 0x83, 0x8D, 0x7D, 0x73, 0x8D, 0x83, + 0x73, 0x7D, 0x77, 0x89, 0x89, 0x77, 0x89, 0x77, + 0x77, 0x89, 0x8C, 0x95, 0x74, 0x6B, 0x95, 0x8C, + 0x6B, 0x74, 0x7C, 0x90, 0x84, 0x70, 0x90, 0x7C, + 0x70, 0x84, 0x96, 0x96, 0x6A, 0x6A, 0x82, 0x98, + 0x7E, 0x68, 0x98, 0x82, 0x68, 0x7E, 0x97, 0xA2, + 0x69, 0x5E, 0xA2, 0x97, 0x5E, 0x69, 0xA2, 0xA2, + 0x5E, 0x5E, 0x8B, 0xA3, 0x75, 0x5D, 0xA3, 0x8B, + 0x5D, 0x75, 0x71, 0x95, 0x8F, 0x6B, 0x95, 0x71, + 0x6B, 0x8F, 0x78, 0x9D, 0x88, 0x63, 0x9D, 0x78, + 0x63, 0x88, 0x7F, 0xA7, 0x81, 0x59, 0xA7, 0x7F, + 0x59, 0x81, 0xA4, 0xB1, 0x5C, 0x4F, 0xB1, 0xA4, + 0x4F, 0x5C, 0x96, 0xB1, 0x6A, 0x4F, 0xB1, 0x96, + 0x4F, 0x6A, 0xB2, 0xB2, 0x4E, 0x4E, 0x65, 0x9B, + 0x9B, 0x65, 0x9B, 0x65, 0x65, 0x9B, 0x89, 0xB4, + 0x77, 0x4C, 0xB4, 0x89, 0x4C, 0x77, 0x6A, 0xA3, + 0x96, 0x5D, 0xA3, 0x6A, 0x5D, 0x96, 0x73, 0xAC, + 0x8D, 0x54, 0xAC, 0x73, 0x54, 0x8D, 0xB4, 0xC3, + 0x4C, 0x3D, 0xC3, 0xB4, 0x3D, 0x4C, 0xA4, 0xC3, + 0x5C, 0x3D, 0xC3, 0xA4, 0x3D, 0x5C, 0xC4, 0xC4, + 0x3C, 0x3C, 0x96, 0xC6, 0x6A, 0x3A, 0xC6, 0x96, + 0x3A, 0x6A, 0x7C, 0xBA, 0x84, 0x46, 0xBA, 0x7C, + 0x46, 0x84, 0x5B, 0xAB, 0xA5, 0x55, 0xAB, 0x5B, + 0x55, 0xA5, 0x63, 0xB4, 0x9D, 0x4C, 0xB4, 0x63, + 0x4C, 0x9D, 0x86, 0xCA, 0x7A, 0x36, 0xCA, 0x86, + 0x36, 0x7A, 0xB6, 0xD7, 0x4A, 0x29, 0xD7, 0xB6, + 0x29, 0x4A, 0xC8, 0xD7, 0x38, 0x29, 0xD7, 0xC8, + 0x29, 0x38, 0xA4, 0xD8, 0x5C, 0x28, 0xD8, 0xA4, + 0x28, 0x5C, 0x6C, 0xC1, 0x94, 0x3F, 0xC1, 0x6C, + 0x3F, 0x94, 0xD9, 0xD9, 0x27, 0x27, 0x80, 0x80 +}; diff --git a/src/libffmpeg/libavcodec/lcl.c b/src/libffmpeg/libavcodec/lcl.c index a15a10769..b93f31448 100644 --- a/src/libffmpeg/libavcodec/lcl.c +++ b/src/libffmpeg/libavcodec/lcl.c @@ -714,7 +714,7 @@ static int decode_init(AVCodecContext *avctx) break; default: if ((c->compression < Z_NO_COMPRESSION) || (c->compression > Z_BEST_COMPRESSION)) { - av_log(avctx, AV_LOG_ERROR, "Unusupported compression level for ZLIB: (%d).\n", c->compression); + av_log(avctx, AV_LOG_ERROR, "Unsupported compression level for ZLIB: (%d).\n", c->compression); return 1; } av_log(avctx, AV_LOG_INFO, "Compression level for ZLIB: (%d).\n", c->compression); diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c b/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c index d1307caca..0c99260a7 100644 --- a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c +++ b/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c @@ -2646,7 +2646,7 @@ Switch between * accurate deblock filter */ static always_inline void RENAME(do_a_deblock)(uint8_t *src, int step, int stride, PPContext *c){ - int64_t dc_mask, eq_mask; + int64_t dc_mask, eq_mask, both_masks; int64_t sums[10*8*2]; src+= step*3; // src points to begin of the 8x8 Block //START_TIMER @@ -2755,7 +2755,9 @@ asm volatile( : "%"REG_a ); - if(dc_mask & eq_mask){ + both_masks = dc_mask & eq_mask; + + if(both_masks){ long offset= -8*step; int64_t *temp_sums= sums; @@ -2930,7 +2932,7 @@ asm volatile( " js 1b \n\t" : "+r"(offset), "+r"(temp_sums) - : "r" ((long)step), "r"(src - offset), "m"(dc_mask & eq_mask) + : "r" ((long)step), "r"(src - offset), "m"(both_masks) ); }else src+= step; // src points to begin of the 8x8 Block diff --git a/src/libffmpeg/libavcodec/motion_est.c b/src/libffmpeg/libavcodec/motion_est.c index 9aaad6daa..0136dcdfe 100644 --- a/src/libffmpeg/libavcodec/motion_est.c +++ b/src/libffmpeg/libavcodec/motion_est.c @@ -272,7 +272,7 @@ void ff_init_me(MpegEncContext *s){ // 8x8 fullpel search would need a 4x4 chroma compare, which we dont have yet, and even if we had the motion estimation code doesnt expect it if(s->codec_id != CODEC_ID_SNOW){ - if((c->avctx->me_cmp&FF_CMP_CHROMA) && !s->dsp.me_cmp[2]){ + if((c->avctx->me_cmp&FF_CMP_CHROMA)/* && !s->dsp.me_cmp[2]*/){ s->dsp.me_cmp[2]= zero_cmp; } if((c->avctx->me_sub_cmp&FF_CMP_CHROMA) && !s->dsp.me_sub_cmp[2]){ @@ -1303,7 +1303,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, c->sub_motion_search(s, &mx, &my, dmin, 0, 0, 0, 16); if(s->flags&CODEC_FLAG_MV0) if(mx || my) - mb_type |= CANDIDATE_MB_TYPE_SKIPED; //FIXME check difference + mb_type |= CANDIDATE_MB_TYPE_SKIPPED; //FIXME check difference }else{ mx <<=shift; my <<=shift; diff --git a/src/libffmpeg/libavcodec/mpeg12.c b/src/libffmpeg/libavcodec/mpeg12.c index 40ef61503..b74412ba8 100644 --- a/src/libffmpeg/libavcodec/mpeg12.c +++ b/src/libffmpeg/libavcodec/mpeg12.c @@ -79,6 +79,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n); static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n); +static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n); static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred); static void exchange_uv(MpegEncContext *s); @@ -207,8 +208,8 @@ static int find_frame_rate_index(MpegEncContext *s){ int64_t d; for(i=1;i<14;i++) { - int64_t n0= 1001LL/frame_rate_tab[i].den*frame_rate_tab[i].num*s->avctx->frame_rate_base; - int64_t n1= 1001LL*s->avctx->frame_rate; + int64_t n0= 1001LL/frame_rate_tab[i].den*frame_rate_tab[i].num*s->avctx->time_base.num; + int64_t n1= 1001LL*s->avctx->time_base.den; if(s->avctx->strict_std_compliance >= 0 && i>=9) break; d = ABS(n0 - n1); @@ -232,10 +233,10 @@ static int encode_init(AVCodecContext *avctx) if(find_frame_rate_index(s) < 0){ if(s->strict_std_compliance >=0){ - av_log(avctx, AV_LOG_ERROR, "MPEG1/2 doesnt support %d/%d fps\n", avctx->frame_rate, avctx->frame_rate_base); + av_log(avctx, AV_LOG_ERROR, "MPEG1/2 does not support %d/%d fps\n", avctx->time_base.den, avctx->time_base.num); return -1; }else{ - av_log(avctx, AV_LOG_INFO, "MPEG1/2 doesnt support %d/%d fps, there may be AV sync issues\n", avctx->frame_rate, avctx->frame_rate_base); + av_log(avctx, AV_LOG_INFO, "MPEG1/2 does not support %d/%d fps, there may be AV sync issues\n", avctx->time_base.den, avctx->time_base.num); } } @@ -1061,11 +1062,11 @@ static int mpeg_decode_mb(MpegEncContext *s, dprintf("decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y); - assert(s->mb_skiped==0); + assert(s->mb_skipped==0); if (s->mb_skip_run-- != 0) { if(s->pict_type == I_TYPE){ - av_log(s->avctx, AV_LOG_ERROR, "skiped MB in I frame at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -1084,7 +1085,7 @@ static int mpeg_decode_mb(MpegEncContext *s, s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0; s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0; s->field_select[0][0]= s->picture_structure - 1; - s->mb_skiped = 1; + s->mb_skipped = 1; s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; } else { int mb_type; @@ -1107,7 +1108,7 @@ static int mpeg_decode_mb(MpegEncContext *s, // assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8)); if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0) - s->mb_skiped = 1; + s->mb_skipped = 1; } return 0; @@ -1146,6 +1147,12 @@ static int mpeg_decode_mb(MpegEncContext *s, dprintf("mb_type=%x\n", mb_type); // motion_type = 0; /* avoid warning */ if (IS_INTRA(mb_type)) { + s->dsp.clear_blocks(s->block[0]); + + if(!s->chroma_y_shift){ + s->dsp.clear_blocks(s->block[6]); + } + /* compute dct type */ if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var? !s->frame_pred_frame_dct) { @@ -1180,9 +1187,15 @@ static int mpeg_decode_mb(MpegEncContext *s, #endif if (s->codec_id == CODEC_ID_MPEG2VIDEO) { - for(i=0;i<mb_block_count;i++) { - if (mpeg2_decode_block_intra(s, s->pblocks[i], i) < 0) - return -1; + if(s->flags2 & CODEC_FLAG2_FAST){ + for(i=0;i<6;i++) { + mpeg2_fast_decode_block_intra(s, s->pblocks[i], i); + } + }else{ + for(i=0;i<mb_block_count;i++) { + if (mpeg2_decode_block_intra(s, s->pblocks[i], i) < 0) + return -1; + } } } else { for(i=0;i<6;i++) { @@ -1357,8 +1370,13 @@ static int mpeg_decode_mb(MpegEncContext *s, } s->mb_intra = 0; - if (HAS_CBP(mb_type)) { + s->dsp.clear_blocks(s->block[0]); + + if(!s->chroma_y_shift){ + s->dsp.clear_blocks(s->block[6]); + } + cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1); if (cbp < 0 || ((cbp == 0) && (s->chroma_format < 2)) ){ av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y); @@ -1571,45 +1589,42 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s, const int qscale= s->qscale; { - int v; OPEN_READER(re, &s->gb); i = -1; /* special case for the first coef. no need to add a second vlc table */ UPDATE_CACHE(re, &s->gb); - v= SHOW_UBITS(re, &s->gb, 2); - if (v & 2) { - LAST_SKIP_BITS(re, &s->gb, 2); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3*qscale*quant_matrix[0])>>5; level= (level-1)|1; - if(v&1) + if(GET_CACHE(re, &s->gb)&0x40000000) level= -level; block[0] = level; i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; } /* now quantify & encode AC coefs */ for(;;) { - UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); - if(level == 127){ - break; - } else if(level != 0) { + if(level != 0) { i += run; j = scantable[i]; level= ((level*2+1)*qscale*quant_matrix[j])>>5; level= (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); - LAST_SKIP_BITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); if (level == -128) { - level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8); + level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); } else if (level == 0) { - level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8); + level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); } i += run; j = scantable[i]; @@ -1629,7 +1644,12 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s, } block[j] = level; + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; + UPDATE_CACHE(re, &s->gb); } +end: + LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); } s->block_last_index[n] = i; @@ -1644,45 +1664,42 @@ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *bloc const int qscale= s->qscale; { - int v; OPEN_READER(re, &s->gb); i = -1; /* special case for the first coef. no need to add a second vlc table */ UPDATE_CACHE(re, &s->gb); - v= SHOW_UBITS(re, &s->gb, 2); - if (v & 2) { - LAST_SKIP_BITS(re, &s->gb, 2); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3*qscale)>>1; level= (level-1)|1; - if(v&1) + if(GET_CACHE(re, &s->gb)&0x40000000) level= -level; block[0] = level; i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; } /* now quantify & encode AC coefs */ for(;;) { - UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); - if(level == 127){ - break; - } else if(level != 0) { + if(level != 0) { i += run; j = scantable[i]; level= ((level*2+1)*qscale)>>1; level= (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); - LAST_SKIP_BITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); if (level == -128) { - level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8); + level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); } else if (level == 0) { - level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8); + level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); } i += run; j = scantable[i]; @@ -1698,7 +1715,12 @@ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *bloc } block[j] = level; + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; + UPDATE_CACHE(re, &s->gb); } +end: + LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); } s->block_last_index[n] = i; @@ -1720,7 +1742,6 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, mismatch = 1; { - int v; OPEN_READER(re, &s->gb); i = -1; if (n < 4) @@ -1730,30 +1751,28 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, /* special case for the first coef. no need to add a second vlc table */ UPDATE_CACHE(re, &s->gb); - v= SHOW_UBITS(re, &s->gb, 2); - if (v & 2) { - LAST_SKIP_BITS(re, &s->gb, 2); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3*qscale*quant_matrix[0])>>5; - if(v&1) + if(GET_CACHE(re, &s->gb)&0x40000000) level= -level; block[0] = level; mismatch ^= level; i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; } /* now quantify & encode AC coefs */ for(;;) { - UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); - if(level == 127){ - break; - } else if(level != 0) { + if(level != 0) { i += run; j = scantable[i]; level= ((level*2+1)*qscale*quant_matrix[j])>>5; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); - LAST_SKIP_BITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); @@ -1776,7 +1795,12 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, mismatch ^= level; block[j] = level; + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; + UPDATE_CACHE(re, &s->gb); } +end: + LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); } block[63] ^= (mismatch & 1); @@ -1799,29 +1823,27 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, /* special case for the first coef. no need to add a second vlc table */ UPDATE_CACHE(re, &s->gb); - v= SHOW_UBITS(re, &s->gb, 2); - if (v & 2) { - LAST_SKIP_BITS(re, &s->gb, 2); + if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { level= (3*qscale)>>1; - if(v&1) + if(GET_CACHE(re, &s->gb)&0x40000000) level= -level; block[0] = level; i++; + SKIP_BITS(re, &s->gb, 2); + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + goto end; } /* now quantify & encode AC coefs */ for(;;) { - UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); - if(level == 127){ - break; - } else if(level != 0) { + if(level != 0) { i += run; j = scantable[i]; level= ((level*2+1)*qscale)>>1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); - LAST_SKIP_BITS(re, &s->gb, 1); + SKIP_BITS(re, &s->gb, 1); } else { /* escape */ run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); @@ -1839,7 +1861,12 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, } block[j] = level; + if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) + break; + UPDATE_CACHE(re, &s->gb); } +end: + LAST_SKIP_BITS(re, &s->gb, 2); CLOSE_READER(re, &s->gb); s->block_last_index[n] = i; return 0; @@ -1926,6 +1953,76 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, return 0; } +static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, + DCTELEM *block, + int n) +{ + int level, dc, diff, j, run; + int component; + RLTable *rl; + uint8_t * scantable= s->intra_scantable.permutated; + const uint16_t *quant_matrix; + const int qscale= s->qscale; + + /* DC coef */ + if (n < 4){ + quant_matrix = s->intra_matrix; + component = 0; + }else{ + quant_matrix = s->chroma_intra_matrix; + component = (n&1) + 1; + } + diff = decode_dc(&s->gb, component); + if (diff >= 0xffff) + return -1; + dc = s->last_dc[component]; + dc += diff; + s->last_dc[component] = dc; + block[0] = dc << (3 - s->intra_dc_precision); + if (s->intra_vlc_format) + rl = &rl_mpeg2; + else + rl = &rl_mpeg1; + + { + OPEN_READER(re, &s->gb); + /* now quantify & encode AC coefs */ + for(;;) { + UPDATE_CACHE(re, &s->gb); + GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); + + if(level == 127){ + break; + } else if(level != 0) { + scantable += run; + j = *scantable; + level= (level*qscale*quant_matrix[j])>>4; + level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); + LAST_SKIP_BITS(re, &s->gb, 1); + } else { + /* escape */ + run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); + UPDATE_CACHE(re, &s->gb); + level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); + scantable += run; + j = *scantable; + if(level<0){ + level= (-level*qscale*quant_matrix[j])>>4; + level= -level; + }else{ + level= (level*qscale*quant_matrix[j])>>4; + } + } + + block[j] = level; + } + CLOSE_READER(re, &s->gb); + } + + s->block_last_index[n] = scantable - s->intra_scantable.permutated; + return 0; +} + typedef struct Mpeg1Context { MpegEncContext mpeg_enc_ctx; int mpeg_enc_ctx_allocated; /* true if decoding context allocated */ @@ -2009,8 +2106,8 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){ if(avctx->sub_id==1){//s->codec_id==avctx->codec_id==CODEC_ID //mpeg1 fps - avctx->frame_rate = frame_rate_tab[s->frame_rate_index].num; - avctx->frame_rate_base= frame_rate_tab[s->frame_rate_index].den; + avctx->time_base.den = frame_rate_tab[s->frame_rate_index].num; + avctx->time_base.num= frame_rate_tab[s->frame_rate_index].den; //mpeg1 aspect avctx->sample_aspect_ratio= av_d2q( 1.0/mpeg1_aspect[s->aspect_ratio_info], 255); @@ -2018,8 +2115,8 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){ }else{//mpeg2 //mpeg2 fps av_reduce( - &s->avctx->frame_rate, - &s->avctx->frame_rate_base, + &s->avctx->time_base.den, + &s->avctx->time_base.num, frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num, frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den, 1<<30); @@ -2499,10 +2596,6 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, XVMC_init_block(s);//set s->block #endif - s->dsp.clear_blocks(s->block[0]); - if(!s->chroma_y_shift){ - s->dsp.clear_blocks(s->block[6]); - } ret = mpeg_decode_mb(s, s->block); s->chroma_qscale= s->qscale; diff --git a/src/libffmpeg/libavcodec/mpegaudiodec.c b/src/libffmpeg/libavcodec/mpegaudiodec.c index 196d77d2a..106c9fe68 100644 --- a/src/libffmpeg/libavcodec/mpegaudiodec.c +++ b/src/libffmpeg/libavcodec/mpegaudiodec.c @@ -2628,7 +2628,10 @@ static int decode_frame(AVCodecContext * avctx, } s->inbuf_ptr = s->inbuf; s->frame_size = 0; - *data_size = out_size; + if(out_size>=0) + *data_size = out_size; + else + av_log(avctx, AV_LOG_DEBUG, "Error while decoding mpeg audio frame\n"); //FIXME return -1 / but also return the number of bytes consumed break; } } diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c index 6eecd0259..0bdfd6304 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.c +++ b/src/libffmpeg/libavcodec/mpegvideo.c @@ -280,7 +280,7 @@ int DCT_common_init(MpegEncContext *s) #endif //CONFIG_ENCODERS /* load & permutate scantables - note: only wmv uses differnt ones + note: only wmv uses different ones */ if(s->alternate_scan){ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); @@ -320,7 +320,7 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *sr if(!src->ref_index[0]) av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n"); if(src->motion_subsample_log2 != dst->motion_subsample_log2) - av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)\n", + av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n", src->motion_subsample_log2, dst->motion_subsample_log2); memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0])); @@ -417,7 +417,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); s->prev_pict_types[0]= s->pict_type; if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE) - pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway + pic->age= INT_MAX; // skipped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway return 0; fail: //for the CHECKED_ALLOCZ macro @@ -971,7 +971,7 @@ int MPV_encode_init(AVCodecContext *avctx) } if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ - av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n"); + av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n"); } if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){ @@ -998,7 +998,7 @@ int MPV_encode_init(AVCodecContext *avctx) } if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){ - av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n"); + av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n"); return -1; } @@ -1029,7 +1029,7 @@ int MPV_encode_init(AVCodecContext *avctx) } if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too - av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n"); + av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n"); return -1; } @@ -1058,7 +1058,7 @@ int MPV_encode_init(AVCodecContext *avctx) if(s->avctx->thread_count > 1) s->rtp_mode= 1; - if(!avctx->frame_rate || !avctx->frame_rate_base){ + if(!avctx->time_base.den || !avctx->time_base.num){ av_log(avctx, AV_LOG_ERROR, "framerate not set\n"); return -1; } @@ -1073,11 +1073,11 @@ int MPV_encode_init(AVCodecContext *avctx) return -1; } - i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base); + i= ff_gcd(avctx->time_base.den, avctx->time_base.num); if(i > 1){ av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); - avctx->frame_rate /= i; - avctx->frame_rate_base /= i; + avctx->time_base.den /= i; + avctx->time_base.num /= i; // return -1; } @@ -1099,8 +1099,11 @@ int MPV_encode_init(AVCodecContext *avctx) avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); - av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1); - s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1; + if(s->avctx->time_base.den > (1<<16)-1){ + av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n"); + return -1; + } + s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; switch(avctx->codec->id) { case CODEC_ID_MPEG1VIDEO: @@ -1268,8 +1271,10 @@ int MPV_encode_init(AVCodecContext *avctx) /* xine: do not need this for decode or MPEG-1 encoding modes */ #if 0 +#ifdef CONFIG_H261_ENCODER if (s->out_format == FMT_H261) ff_h261_encode_init(s); +#endif if (s->out_format == FMT_H263) h263_encode_init(s); if(s->msmpeg4_version) @@ -1462,7 +1467,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i; AVFrame *pic; - s->mb_skiped = 0; + s->mb_skipped = 0; assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); @@ -2024,8 +2029,8 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ if(pts != AV_NOPTS_VALUE){ if(s->user_specified_pts != AV_NOPTS_VALUE){ - int64_t time= av_rescale(pts, s->avctx->frame_rate, s->avctx->frame_rate_base*(int64_t)AV_TIME_BASE); - int64_t last= av_rescale(s->user_specified_pts, s->avctx->frame_rate, s->avctx->frame_rate_base*(int64_t)AV_TIME_BASE); + int64_t time= pts; + int64_t last= s->user_specified_pts; if(time <= last){ av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%Ld, last=%Ld\n", pts, s->user_specified_pts); @@ -2036,10 +2041,10 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ }else{ if(s->user_specified_pts != AV_NOPTS_VALUE){ s->user_specified_pts= - pts= s->user_specified_pts + AV_TIME_BASE*(int64_t)s->avctx->frame_rate_base / s->avctx->frame_rate; + pts= s->user_specified_pts + 1; av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pts); }else{ - pts= av_rescale(pic_arg->display_picture_number*(int64_t)s->avctx->frame_rate_base, AV_TIME_BASE, s->avctx->frame_rate); + pts= pic_arg->display_picture_number; } } } @@ -2235,7 +2240,7 @@ static void select_input_picture(MpegEncContext *s){ b_frames= i; } if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){ - av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n"); + av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n"); } if(s->picture_in_gop_number + b_frames >= s->gop_size){ @@ -2839,9 +2844,11 @@ if(s->quarter_sample) pix_op[s->chroma_x_shift][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); pix_op[s->chroma_x_shift][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift); } +#if defined(CONFIG_H261_ENCODER) || defined(CONFIG_H261_DECODER) if(s->out_format == FMT_H261){ ff_h261_loop_filter(s); } +#endif } /* apply one mpeg motion vector to the three components */ @@ -3248,7 +3255,7 @@ static inline void MPV_motion(MpegEncContext *s, const int mot_stride= s->b8_stride; const int mot_xy= mb_x*2 + mb_y*2*mot_stride; - assert(!s->mb_skiped); + assert(!s->mb_skipped); memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4); memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); @@ -3707,7 +3714,7 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; - const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics + const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag; const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; @@ -3720,11 +3727,11 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc assert(age); - if (s->mb_skiped) { - s->mb_skiped= 0; + if (s->mb_skipped) { + s->mb_skipped= 0; assert(s->pict_type!=I_TYPE); - (*mbskip_ptr) ++; /* indicate that this time we skiped it */ + (*mbskip_ptr) ++; /* indicate that this time we skipped it */ if(*mbskip_ptr >99) *mbskip_ptr= 99; /* if previous was skipped too, then nothing to do ! */ @@ -3754,7 +3761,7 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc if (!s->mb_intra) { /* motion handling */ - /* decoding or more than one mb_type (MC was allready done otherwise) */ + /* decoding or more than one mb_type (MC was already done otherwise) */ if(!s->encoding){ if(lowres_flag){ h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab; @@ -3964,7 +3971,7 @@ static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index } if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE) - av_log(s->avctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel); + av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel); } #endif //CONFIG_ENCODERS @@ -4309,8 +4316,10 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_WMV2: ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break; +#ifdef CONFIG_H261_ENCODER case CODEC_ID_H261: ff_h261_encode_mb(s, s->block, motion_x, motion_y); break; +#endif case CODEC_ID_H263: case CODEC_ID_H263P: case CODEC_ID_FLV1: @@ -4398,7 +4407,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext d->misc_bits= s->misc_bits; d->last_bits= 0; - d->mb_skiped= 0; + d->mb_skipped= 0; d->qscale= s->qscale; d->dquant= s->dquant; } @@ -4425,7 +4434,7 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext * d->misc_bits= s->misc_bits; d->mb_intra= s->mb_intra; - d->mb_skiped= s->mb_skiped; + d->mb_skipped= s->mb_skipped; d->mv_type= s->mv_type; d->mv_dir= s->mv_dir; d->pb= s->pb; @@ -4714,11 +4723,13 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->mb_y = mb_y; // moved into loop, can get changed by H.261 ff_update_block_index(s); +#ifdef CONFIG_H261_ENCODER if(s->codec_id == CODEC_ID_H261){ ff_h261_reorder_mb_index(s); xy= s->mb_y*s->mb_stride + s->mb_x; mb_type= s->mb_type[xy]; } +#endif /* write gob / video packet header */ if(s->rtp_mode){ @@ -4815,7 +4826,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->first_slice_line=0; } - s->mb_skiped=0; + s->mb_skipped=0; s->dquant=0; //only for QP_RD if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD @@ -4852,13 +4863,13 @@ static int encode_thread(AVCodecContext *c, void *arg){ encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } - if(mb_type&CANDIDATE_MB_TYPE_SKIPED){ + if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPED, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&CANDIDATE_MB_TYPE_INTER4V){ @@ -5166,7 +5177,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ encode_mb(s, motion_x, motion_y); - // RAL: Update last macrobloc type + // RAL: Update last macroblock type s->last_mv_dir = s->mv_dir; if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) @@ -5206,9 +5217,9 @@ static int encode_thread(AVCodecContext *c, void *arg){ } } + //not beautiful here but we must write it before flushing so it has to be here /* xine: do not need this for decode or MPEG-1 encoding modes */ #if 0 - //not beautifull here but we must write it before flushing so it has to be here if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE) msmpeg4_encode_ext_header(s); #endif /* #if 0 */ @@ -5294,7 +5305,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->no_rounding ^= 1; } - s->mb_intra=0; //for the rate distoration & bit compare functions + s->mb_intra=0; //for the rate distortion & bit compare functions for(i=1; i<s->avctx->thread_count; i++){ ff_update_duplicate_context(s->thread_context[i], s); } @@ -5423,7 +5434,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) update_qscale(s); if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) - s->qscale= 3; //reduce cliping problems + s->qscale= 3; //reduce clipping problems if (s->out_format == FMT_MJPEG) { /* for mjpeg, we do include qscale in the matrix */ @@ -5431,7 +5442,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) for(i=1;i<64;i++){ int j= s->dsp.idct_permutation[i]; - s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); + s->intra_matrix[j] = clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); } convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, 8, 8, 1); @@ -5454,9 +5465,11 @@ static void encode_picture(MpegEncContext *s, int picture_number) case FMT_MJPEG: mjpeg_picture_header(s); break; +#ifdef CONFIG_H261_ENCODER case FMT_H261: ff_h261_encode_picture_header(s, picture_number); break; +#endif case FMT_H263: if (s->codec_id == CODEC_ID_WMV2) ff_wmv2_encode_picture_header(s, picture_number); @@ -5464,10 +5477,14 @@ static void encode_picture(MpegEncContext *s, int picture_number) msmpeg4_encode_picture_header(s, picture_number); else if (s->h263_pred) mpeg4_encode_picture_header(s, picture_number); +#ifdef CONFIG_RV10_ENCODER else if (s->codec_id == CODEC_ID_RV10) rv10_encode_picture_header(s, picture_number); +#endif +#ifdef CONFIG_RV20_ENCODER else if (s->codec_id == CODEC_ID_RV20) rv20_encode_picture_header(s, picture_number); +#endif else if (s->codec_id == CODEC_ID_FLV1) ff_flv_encode_picture_header(s, picture_number); else @@ -5631,7 +5648,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s, } } - *overflow= s->max_qcoeff < max; //overflow might have happend + *overflow= s->max_qcoeff < max; //overflow might have happened if(last_non_zero < start_i){ memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); @@ -5904,7 +5921,7 @@ static int messed_sign=0; #endif dc += (1<<(RECON_SHIFT-1)); for(i=0; i<64; i++){ - rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly insteadof copying to rem[] + rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[] } #ifdef REFINE_STATS STOP_TIMER("memset rem[]")} @@ -6283,7 +6300,7 @@ static int dct_quantize_c(MpegEncContext *s, block[j]=0; } } - *overflow= s->max_qcoeff < max; //overflow might have happend + *overflow= s->max_qcoeff < max; //overflow might have happened /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */ if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM) diff --git a/src/libffmpeg/libavcodec/mpegvideo.h b/src/libffmpeg/libavcodec/mpegvideo.h index a1c459e97..df67a469c 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.h +++ b/src/libffmpeg/libavcodec/mpegvideo.h @@ -29,7 +29,7 @@ #include "dsputil.h" #include "bitstream.h" -#define FRAME_SKIPED 100 ///< return value for header parsers if frame is not coded +#define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded enum OutputFormat { FMT_MPEG1, @@ -203,7 +203,7 @@ struct MpegEncContext; */ typedef struct MotionEstContext{ AVCodecContext *avctx; - int skip; ///< set if ME is skiped for the current MB + int skip; ///< set if ME is skipped for the current MB int co_located_mv[4][2]; ///< mv from last p frame for direct mode ME int direct_basis_mv[4][2]; uint8_t *scratchpad; ///< data area for the me algo, so that the ME doesnt need to malloc/free @@ -350,7 +350,7 @@ typedef struct MpegEncContext { int ac_pred; uint8_t *prev_pict_types; ///< previous picture types in bitstream order, used for mb skip #define PREV_PICT_TYPES_BUFFER_SIZE 256 - int mb_skiped; ///< MUST BE SET only during DECODING + int mb_skipped; ///< MUST BE SET only during DECODING uint8_t *mbskip_table; /**< used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encoding & decoding (contains skip table of next P Frame) */ uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding @@ -378,7 +378,7 @@ typedef struct MpegEncContext { /* motion compensation */ int unrestricted_mv; ///< mv can point outside of the coded picture int h263_long_vectors; ///< use horrible h263v1 long vector mode - int decode; ///< if 0 then decoding will be skiped (for encoding b frames for example) + int decode; ///< if 0 then decoding will be skipped (for encoding b frames for example) DSPContext dsp; ///< pointers for accelerated dsp fucntions int f_code; ///< forward MV resolution @@ -427,7 +427,7 @@ typedef struct MpegEncContext { int no_rounding; /**< apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is allways 0 */ - int hurry_up; /**< when set to 1 during decoding, b frames will be skiped + int hurry_up; /**< when set to 1 during decoding, b frames will be skipped when set to 2 idct/dequant will be skipped too */ /* macroblock layer */ @@ -438,7 +438,7 @@ typedef struct MpegEncContext { #define CANDIDATE_MB_TYPE_INTRA 0x01 #define CANDIDATE_MB_TYPE_INTER 0x02 #define CANDIDATE_MB_TYPE_INTER4V 0x04 -#define CANDIDATE_MB_TYPE_SKIPED 0x08 +#define CANDIDATE_MB_TYPE_SKIPPED 0x08 //#define MB_TYPE_GMC 0x10 #define CANDIDATE_MB_TYPE_DIRECT 0x10 @@ -552,7 +552,6 @@ typedef struct MpegEncContext { int custom_pcf; /* mpeg4 specific */ - int time_increment_resolution; int time_increment_bits; ///< number of bits to represent the fractional part of time int last_time_base; int time_base; ///< time in seconds of last I,P,S Frame diff --git a/src/libffmpeg/libavcodec/msmpeg4.c b/src/libffmpeg/libavcodec/msmpeg4.c index a3140abb8..643ed33c6 100644 --- a/src/libffmpeg/libavcodec/msmpeg4.c +++ b/src/libffmpeg/libavcodec/msmpeg4.c @@ -177,6 +177,7 @@ static void common_init(MpegEncContext * s) s->y_dc_scale_table= wmv1_y_dc_scale_table; s->c_dc_scale_table= wmv1_c_dc_scale_table; break; +#if defined(CONFIG_WMV3_DECODER)||defined(CONFIG_VC9_DECODER) case 6: /* xine: comment this out as WMV3 support is incomplete */ #if 0 @@ -184,6 +185,7 @@ static void common_init(MpegEncContext * s) s->c_dc_scale_table= wmv3_dc_scale_table; #endif /* #if 0 */ break; +#endif } @@ -441,7 +443,7 @@ void msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number) void msmpeg4_encode_ext_header(MpegEncContext * s) { - put_bits(&s->pb, 5, s->avctx->frame_rate / s->avctx->frame_rate_base); //yes 29.97 -> 29 + put_bits(&s->pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29 put_bits(&s->pb, 11, FFMIN(s->bit_rate/1024, 2047)); @@ -1495,7 +1497,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = 1; + s->mb_skipped = 1; return 0; } } @@ -1555,6 +1557,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) } } + s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { @@ -1583,7 +1586,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = 1; + s->mb_skipped = 1; *mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; return 0; @@ -1647,6 +1650,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) } } + s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { diff --git a/src/libffmpeg/libavcodec/parser.c b/src/libffmpeg/libavcodec/parser.c index 4725d56c6..93bbf87d1 100644 --- a/src/libffmpeg/libavcodec/parser.c +++ b/src/libffmpeg/libavcodec/parser.c @@ -282,6 +282,7 @@ static const int frame_rate_tab[16] = { 25025, }; +//FIXME move into mpeg12.c static void mpegvideo_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t *buf, int buf_size) @@ -311,8 +312,8 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s, pc->height = ((buf[1] & 0x0f) << 8) | buf[2]; avcodec_set_dimensions(avctx, pc->width, pc->height); frame_rate_index = buf[3] & 0xf; - pc->frame_rate = avctx->frame_rate = frame_rate_tab[frame_rate_index]; - avctx->frame_rate_base = MPEG1_FRAME_RATE_BASE; + pc->frame_rate = avctx->time_base.den = frame_rate_tab[frame_rate_index]; + avctx->time_base.num = MPEG1_FRAME_RATE_BASE; avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400; avctx->codec_id = CODEC_ID_MPEG1VIDEO; avctx->sub_id = 1; @@ -336,8 +337,8 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s, pc->height |=( vert_size_ext << 12); avctx->bit_rate += (bit_rate_ext << 18) * 400; avcodec_set_dimensions(avctx, pc->width, pc->height); - avctx->frame_rate = pc->frame_rate * (frame_rate_ext_n + 1); - avctx->frame_rate_base = MPEG1_FRAME_RATE_BASE * (frame_rate_ext_d + 1); + avctx->time_base.den = pc->frame_rate * (frame_rate_ext_n + 1); + avctx->time_base.num = MPEG1_FRAME_RATE_BASE * (frame_rate_ext_d + 1); avctx->codec_id = CODEC_ID_MPEG2VIDEO; avctx->sub_id = 2; /* forces MPEG2 */ } @@ -406,7 +407,7 @@ static int mpegvideo_parse(AVCodecParserContext *s, mpegvideo_extract_headers(s, avctx, buf, buf_size); #if 0 printf("pict_type=%d frame_rate=%0.3f repeat_pict=%d\n", - s->pict_type, (double)avctx->frame_rate / avctx->frame_rate_base, s->repeat_pict); + s->pict_type, (double)avctx->time_base.den / avctx->time_base.num, s->repeat_pict); #endif *poutbuf = (uint8_t *)buf; diff --git a/src/libffmpeg/libavcodec/ratecontrol.c b/src/libffmpeg/libavcodec/ratecontrol.c index 19641d453..0fc9caa31 100644 --- a/src/libffmpeg/libavcodec/ratecontrol.c +++ b/src/libffmpeg/libavcodec/ratecontrol.c @@ -79,7 +79,7 @@ int ff_rate_control_init(MpegEncContext *s) rcc->entry = (RateControlEntry*)av_mallocz(i*sizeof(RateControlEntry)); rcc->num_entries= i; - /* init all to skiped p frames (with b frames we might have a not encoded frame at the end FIXME) */ + /* init all to skipped p frames (with b frames we might have a not encoded frame at the end FIXME) */ for(i=0; i<rcc->num_entries; i++){ RateControlEntry *rce= &rcc->entry[i]; rce->pict_type= rce->new_pict_type=P_TYPE; @@ -166,7 +166,7 @@ int ff_rate_control_init(MpegEncContext *s) bits= rce.i_tex_bits + rce.p_tex_bits; q= get_qscale(s, &rce, rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum, i); - rcc->pass1_wanted_bits+= s->bit_rate/(s->avctx->frame_rate / (double)s->avctx->frame_rate_base); + rcc->pass1_wanted_bits+= s->bit_rate/(1/av_q2d(s->avctx->time_base)); //FIXME missbehaves a little for variable fps } } @@ -199,7 +199,7 @@ static inline double bits2qp(RateControlEntry *rce, double bits){ int ff_vbv_update(MpegEncContext *s, int frame_size){ RateControlContext *rcc= &s->rc_context; - const double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; + const double fps= 1/av_q2d(s->avctx->time_base); const int buffer_size= s->avctx->rc_buffer_size; const double min_rate= s->avctx->rc_min_rate/fps; const double max_rate= s->avctx->rc_max_rate/fps; @@ -400,7 +400,7 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, double bits; const int pict_type= rce->new_pict_type; const double buffer_size= s->avctx->rc_buffer_size; - const double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; + const double fps= 1/av_q2d(s->avctx->time_base); const double min_rate= s->avctx->rc_min_rate / fps; const double max_rate= s->avctx->rc_max_rate / fps; @@ -631,7 +631,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) get_qminmax(&qmin, &qmax, s, pict_type); - fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; + fps= 1/av_q2d(s->avctx->time_base); //printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate); /* update predictors */ if(picture_number>2){ @@ -757,7 +757,7 @@ static int init_pass2(MpegEncContext *s) RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; int i; - double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; + double fps= 1/av_q2d(s->avctx->time_base); double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1 double avg_quantizer[5]; uint64_t const_bits[5]={0,0,0,0,0}; // quantizer idependant bits diff --git a/src/libffmpeg/libavcodec/rpza.c b/src/libffmpeg/libavcodec/rpza.c index 317c240b9..6b1510a4b 100644 --- a/src/libffmpeg/libavcodec/rpza.c +++ b/src/libffmpeg/libavcodec/rpza.c @@ -92,7 +92,7 @@ static void rpza_decode_stream(RpzaContext *s) /* First byte is always 0xe1. Warn if it's different */ if (s->buf[stream_ptr] != 0xe1) - av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0x1e\n", + av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n", s->buf[stream_ptr]); /* Get chunk size, ingnoring first byte */ diff --git a/src/libffmpeg/libavcodec/rv10.c b/src/libffmpeg/libavcodec/rv10.c index 884be9c7c..8183391e9 100644 --- a/src/libffmpeg/libavcodec/rv10.c +++ b/src/libffmpeg/libavcodec/rv10.c @@ -440,21 +440,23 @@ static int rv20_decode_picture_header(MpegEncContext *s) } seq= get_bits(&s->gb, 14)<<1; - if(v>1 || (s->avctx->sub_id < 0x20201002 && v>0)){ - f= get_bits(&s->gb, av_log2(v-1)+1); - } - + if(v) + f= get_bits(&s->gb, av_log2(v)); + if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, v); } + }else{ + seq= get_bits(&s->gb, 8)*128; + } +// if(s->avctx->sub_id <= 0x20201002){ //0x20201002 definitely needs this + mb_pos= ff_h263_decode_mba(s); +/* }else{ mb_pos= get_bits(&s->gb, av_log2(s->mb_num-1)+1); s->mb_x= mb_pos % s->mb_width; s->mb_y= mb_pos / s->mb_width; - }else{ - seq= get_bits(&s->gb, 8)*128; - mb_pos= ff_h263_decode_mba(s); - } + }*/ //av_log(s->avctx, AV_LOG_DEBUG, "%d\n", seq); seq |= s->time &~0x7FFF; if(seq - s->time > 0x4000) seq -= 0x8000; @@ -468,8 +470,8 @@ static int rv20_decode_picture_header(MpegEncContext *s) s->time= seq; s->pb_time= s->pp_time - (s->last_non_b_time - s->time); if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ - av_log(s->avctx, AV_LOG_DEBUG, "messed up order, seeking?, skiping current b frame\n"); - return FRAME_SKIPED; + av_log(s->avctx, AV_LOG_DEBUG, "messed up order, possible from seeking? skipping current b frame\n"); + return FRAME_SKIPPED; } } } @@ -535,14 +537,17 @@ static int rv10_decode_init(AVCodecContext *avctx) s->h263_long_vectors=0; s->low_delay=1; break; - case 0x20001000: - case 0x20100001: + case 0x20001000: /* real rv20 decoder fail on this id */ + /*case 0x20100001: case 0x20101001: - case 0x20103001: + case 0x20103001:*/ + case 0x20100000 ... 0x2019ffff: s->low_delay=1; break; - case 0x20200002: + /*case 0x20200002: case 0x20201002: + case 0x20203002:*/ + case 0x20200002 ... 0x202fffff: case 0x30202002: case 0x30203002: s->low_delay=0; @@ -555,7 +560,9 @@ static int rv10_decode_init(AVCodecContext *avctx) if(avctx->debug & FF_DEBUG_PICT_INFO){ av_log(avctx, AV_LOG_DEBUG, "ver:%X ver0:%X\n", avctx->sub_id, avctx->extradata_size >= 4 ? ((uint32_t*)avctx->extradata)[0] : -1); } - + + avctx->pix_fmt = PIX_FMT_YUV420P; + if (MPV_common_init(s) < 0) return -1; @@ -571,8 +578,6 @@ static int rv10_decode_init(AVCodecContext *avctx) rv_chrom_code, 2, 2, 1); done = 1; } - - avctx->pix_fmt = PIX_FMT_YUV420P; return 0; } @@ -664,7 +669,6 @@ static int rv10_decode_packet(AVCodecContext *avctx, printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); #endif - s->dsp.clear_blocks(s->block[0]); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ret=ff_h263_decode_mb(s, s->block); diff --git a/src/libffmpeg/libavcodec/snow.c b/src/libffmpeg/libavcodec/snow.c index 9cfddfa95..e34cbac0f 100644 --- a/src/libffmpeg/libavcodec/snow.c +++ b/src/libffmpeg/libavcodec/snow.c @@ -2950,7 +2950,7 @@ static void quantize(SnowContext *s, SubBand *b, DWTELEM *src, int stride, int b } } -static void dequantize_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DWTELEM *src, int stride){ +static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DWTELEM *src, int stride, int start_y, int end_y){ const int w= b->width; const int h= b->height; const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16); @@ -2961,7 +2961,7 @@ static void dequantize_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, D if(s->qlog == LOSSLESS_QLOG) return; - for(y=0; y<h; y++){ + for(y=start_y; y<end_y; y++){ // DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride)); DWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset; for(x=0; x<w; x++){ @@ -3028,7 +3028,7 @@ static void decorrelate(SnowContext *s, SubBand *b, DWTELEM *src, int stride, in } } -static void correlate_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DWTELEM *src, int stride, int inverse, int use_median){ +static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){ const int w= b->width; const int h= b->height; int x,y; @@ -3038,7 +3038,10 @@ static void correlate_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DW DWTELEM * line; DWTELEM * prev; - for(y=0; y<h; y++){ + if (start_y != 0) + line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset; + + for(y=start_y; y<end_y; y++){ prev = line; // line = slice_buffer_get_line_from_address(sb, src + (y * stride)); line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset; @@ -3338,7 +3341,7 @@ static int encode_init(AVCodecContext *avctx) int plane_index; if(avctx->strict_std_compliance >= 0){ - av_log(avctx, AV_LOG_ERROR, "this codec is under development, files encoded with it wont be decodeable with future versions!!!\n" + av_log(avctx, AV_LOG_ERROR, "this codec is under development, files encoded with it may not be decodable with future versions!!!\n" "use vstrict=-1 / -strict -1 to use it anyway\n"); return -1; } @@ -3649,15 +3652,13 @@ static int decode_init(AVCodecContext *avctx) { SnowContext *s = avctx->priv_data; int block_size; + + avctx->pix_fmt= PIX_FMT_YUV420P; common_init(avctx); block_size = MB_SIZE >> s->block_max_depth; - /* FIXME block_size * 2 is determined empirically. block_size * 1.5 is definitely needed, but I (Robert) cannot figure out why more than that is needed. Perhaps there is a bug, or perhaps I overlooked some demands that are placed on the buffer. */ - /* FIXME The formula is WRONG. For height > 480, the buffer will overflow. */ - /* FIXME For now, I will use a full frame of lines. Fortunately, this should not materially effect cache performance because lines are allocated using a stack, so if in fact only 50 out of 496 lines are needed at a time, the other 446 will sit allocated but never accessed. */ -// slice_buffer_init(s->plane[0].sb, s->plane[0].height, (block_size * 2) + (s->spatial_decomposition_count * s->spatial_decomposition_count), s->plane[0].width, s->spatial_dwt_buffer); - slice_buffer_init(&s->sb, s->plane[0].height, s->plane[0].height, s->plane[0].width, s->spatial_dwt_buffer); + slice_buffer_init(&s->sb, s->plane[0].height, (block_size) + (s->spatial_decomposition_count * (s->spatial_decomposition_count + 2)) + 1, s->plane[0].width, s->spatial_dwt_buffer); return 0; } @@ -3689,7 +3690,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 int h= p->height; int x, y; int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */ - SubBand * correlate_band; if(s->avctx->debug&2048){ memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h); @@ -3712,12 +3712,6 @@ if(s->avctx->debug&2048){ } STOP_TIMER("unpack coeffs"); } - - /* Handle level 0, orientation 0 specially. It is particularly resistant to slicing but fortunately quite small, so process it in one pass. */ - correlate_band = &p->band[0][0]; - decode_subband_slice_buffered(s, correlate_band, &s->sb, 0, correlate_band->height, decode_state[0][0]); - correlate_buffered(s, &s->sb, correlate_band, correlate_band->buf, correlate_band->stride, 1, 0); - dequantize_buffered(s, &s->sb, correlate_band, correlate_band->buf, correlate_band->stride); {START_TIMER const int mb_h= s->b_height << s->block_max_depth; @@ -3732,23 +3726,43 @@ if(s->avctx->debug&2048){ ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count); for(mb_y=0; mb_y<=mb_h; mb_y++){ - const int slice_starty = block_w*mb_y; - const int slice_h = block_w*(mb_y+1); + int slice_starty = block_w*mb_y; + int slice_h = block_w*(mb_y+1); + if (!(s->keyframe || s->avctx->debug&512)){ + slice_starty = FFMAX(0, slice_starty - (block_w >> 1)); + slice_h -= (block_w >> 1); + } { START_TIMER for(level=0; level<s->spatial_decomposition_count; level++){ - for(orientation=level ? 1 : 1; orientation<4; orientation++){ + for(orientation=level ? 1 : 0; orientation<4; orientation++){ SubBand *b= &p->band[level][orientation]; int start_y; int end_y; int our_mb_start = mb_y; int our_mb_end = (mb_y + 1); - start_y = FFMIN(b->height, (mb_y ? ((block_w * our_mb_start - 4) >> (s->spatial_decomposition_count - level)) + 5 : 0)); - end_y = FFMIN(b->height, (((block_w * our_mb_end - 4) >> (s->spatial_decomposition_count - level)) + 5)); - - if (start_y != end_y) - decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]); + start_y = (mb_y ? ((block_w * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + 2: 0); + end_y = (((block_w * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + 2); + if (!(s->keyframe || s->avctx->debug&512)){ + start_y = FFMAX(0, start_y - (block_w >> (1+s->spatial_decomposition_count - level))); + end_y = FFMAX(0, end_y - (block_w >> (1+s->spatial_decomposition_count - level))); + } + start_y = FFMIN(b->height, start_y); + end_y = FFMIN(b->height, end_y); + + if (start_y != end_y){ + if (orientation == 0){ + SubBand * correlate_band = &p->band[0][0]; + int correlate_end_y = FFMIN(b->height, end_y + 1); + int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0)); + decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]); + correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->buf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y); + dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->buf, correlate_band->stride, start_y, end_y); + } + else + decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]); + } } } STOP_TIMER("decode_subband_slice"); @@ -3772,16 +3786,8 @@ if(s->avctx->debug&2048){ predict_slice_buffered(s, &s->sb, s->spatial_dwt_buffer, plane_index, 1, mb_y); - /* Nasty hack based empirically on how predict_slice_buffered() hits the buffer. */ - /* FIXME If possible, make predict_slice fit into the slice. As of now, it works on some previous lines (up to slice_height / 2) if the condition on the next line is false. */ - if (s->keyframe || (s->avctx->debug&512)){ - y = FFMIN(p->height, slice_starty); - end_y = FFMIN(p->height, slice_h); - } - else{ - y = FFMAX(0, FFMIN(p->height, slice_starty - (block_w >> 1))); - end_y = FFMAX(0, FFMIN(p->height, slice_h - (block_w >> 1))); - } + y = FFMIN(p->height, slice_starty); + end_y = FFMIN(p->height, slice_h); while(y < end_y) slice_buffer_release(&s->sb, y++); } diff --git a/src/libffmpeg/libavcodec/utils.c b/src/libffmpeg/libavcodec/utils.c index afff2f98a..b5bd17f4a 100644 --- a/src/libffmpeg/libavcodec/utils.c +++ b/src/libffmpeg/libavcodec/utils.c @@ -442,8 +442,7 @@ void avcodec_get_context_defaults(AVCodecContext *s){ s->error_concealment= 3; s->error_resilience= 1; s->workaround_bugs= FF_BUG_AUTODETECT; - s->frame_rate_base= 1; - s->frame_rate = 25; + s->time_base= (AVRational){0,1}; s->gop_size= 50; s->me_method= ME_EPZS; s->get_buffer= avcodec_default_get_buffer; @@ -459,6 +458,7 @@ void avcodec_get_context_defaults(AVCodecContext *s){ s->profile= FF_PROFILE_UNKNOWN; s->level= FF_LEVEL_UNKNOWN; s->me_penalty_compensation= 256; + s->pix_fmt= PIX_FMT_NONE; s->intra_quant_bias= FF_DEFAULT_QUANT_BIAS; s->inter_quant_bias= FF_DEFAULT_QUANT_BIAS; @@ -724,7 +724,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) snprintf(buf, buf_size, "Video: %s%s", codec_name, enc->mb_decision ? " (hq)" : ""); - if (enc->codec_id == CODEC_ID_RAWVIDEO) { + if (enc->pix_fmt != PIX_FMT_NONE) { snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %s", avcodec_get_pix_fmt_name(enc->pix_fmt)); @@ -733,7 +733,7 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", %dx%d, %0.2f fps", enc->width, enc->height, - (float)enc->frame_rate / enc->frame_rate_base); + 1/av_q2d(enc->time_base)); } if (encode) { snprintf(buf + strlen(buf), buf_size - strlen(buf), @@ -929,6 +929,12 @@ int64_t av_rescale(int64_t a, int64_t b, int64_t c){ return av_rescale_rnd(a, b, c, AV_ROUND_NEAR_INF); } +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq){ + int64_t b= bq.num * (int64_t)cq.den; + int64_t c= cq.num * (int64_t)bq.den; + return av_rescale_rnd(a, b, c, AV_ROUND_NEAR_INF); +} + int64_t ff_gcd(int64_t a, int64_t b){ if(b) return ff_gcd(b, a%b); else return a; diff --git a/src/libffmpeg/libavcodec/wmv2.c b/src/libffmpeg/libavcodec/wmv2.c index b6376d80b..949d7c640 100644 --- a/src/libffmpeg/libavcodec/wmv2.c +++ b/src/libffmpeg/libavcodec/wmv2.c @@ -68,7 +68,7 @@ static int encode_ext_header(Wmv2Context *w){ init_put_bits(&pb, s->avctx->extradata, s->avctx->extradata_size); - put_bits(&pb, 5, s->avctx->frame_rate / s->avctx->frame_rate_base); //yes 29.97 -> 29 + put_bits(&pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); //yes 29.97 -> 29 put_bits(&pb, 11, FFMIN(s->bit_rate/1024, 2047)); put_bits(&pb, 1, w->mspel_bit=1); @@ -474,7 +474,7 @@ s->picture_number++; //FIXME ? // return wmv2_decode_j_picture(w); //FIXME if(w->j_type){ - av_log(s->avctx, AV_LOG_ERROR, "J-type picture isnt supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "J-type picture is not supported\n"); return -1; } @@ -723,7 +723,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = 1; + s->mb_skipped = 1; w->hshift=0; return 0; } @@ -760,6 +760,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) wmv2_pred_motion(w, &mx, &my); if(cbp){ + s->dsp.clear_blocks(s->block[0]); if(s->per_mb_rl_table){ s->rl_table_index = decode012(&s->gb); s->rl_chroma_table_index = s->rl_table_index; @@ -802,6 +803,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->rl_chroma_table_index = s->rl_table_index; } + s->dsp.clear_blocks(s->block[0]); for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { |