diff options
Diffstat (limited to 'src')
82 files changed, 8089 insertions, 3790 deletions
diff --git a/src/libffmpeg/libavcodec/4xm.c b/src/libffmpeg/libavcodec/4xm.c index 03e4a175c..6544d88d0 100644 --- a/src/libffmpeg/libavcodec/4xm.c +++ b/src/libffmpeg/libavcodec/4xm.c @@ -333,7 +333,7 @@ static int decode_p_frame(FourXContext *f, uint8_t *buf, int length){ const int wordstream_size= get32(buf+12); if(bitstream_size+ bytestream_size+ wordstream_size + 20 != length) - printf("lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size, + av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size, bitstream_size+ bytestream_size+ wordstream_size - length); f->bitstream_buffer= av_fast_realloc(f->bitstream_buffer, &f->bitstream_buffer_size, bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE); @@ -354,7 +354,7 @@ static int decode_p_frame(FourXContext *f, uint8_t *buf, int length){ } if(bitstream_size != (get_bits_count(&f->gb)+31)/32*4) - printf(" %d %d %d bytes left\n", + av_log(f->avctx, AV_LOG_ERROR, " %d %d %d bytes left\n", bitstream_size - (get_bits_count(&f->gb)+31)/32*4, bytestream_size - (f->bytestream - (buf + 20 + bitstream_size + wordstream_size)), wordstream_size - (((uint8_t*)f->wordstream) - (buf + 20 + bitstream_size)) @@ -373,7 +373,7 @@ static int decode_i_block(FourXContext *f, DCTELEM *block){ /* DC coef */ val = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3); if (val>>4){ - printf("error dc run != 0\n"); + av_log(f->avctx, AV_LOG_ERROR, "error dc run != 0\n"); } if(val) @@ -396,7 +396,7 @@ static int decode_i_block(FourXContext *f, DCTELEM *block){ level = get_xbits(&f->gb, code & 0xf); i += code >> 4; if (i >= 64) { - printf("run %d oveflow\n", i); + av_log(f->avctx, AV_LOG_ERROR, "run %d oveflow\n", i); return 0; } @@ -535,7 +535,7 @@ static uint8_t *read_huffman_tables(FourXContext *f, uint8_t * const buf){ for(node= j; up[node] != -1; node= up[node]){ bits += flag[node]<<len; len++; - if(len > 31) printf("vlc length overflow\n"); //can this happen at all ? + if(len > 31) av_log(f->avctx, AV_LOG_ERROR, "vlc length overflow\n"); //can this happen at all ? } bits_tab[j]= bits; @@ -561,7 +561,7 @@ static int decode_i_frame(FourXContext *f, uint8_t *buf, int length){ uint8_t *prestream= buf + bitstream_size + 12; if(prestream_size + bitstream_size + 12 != length) - fprintf(stderr, "size missmatch %d %d %d\n", prestream_size, bitstream_size, length); + av_log(f->avctx, AV_LOG_ERROR, "size missmatch %d %d %d\n", prestream_size, bitstream_size, length); prestream= read_huffman_tables(f, prestream); @@ -586,7 +586,7 @@ static int decode_i_frame(FourXContext *f, uint8_t *buf, int length){ } if(get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256) - printf("end missmatch\n"); + av_log(f->avctx, AV_LOG_ERROR, "end missmatch\n"); return 0; } @@ -609,7 +609,7 @@ static int decode_frame(AVCodecContext *avctx, frame_4cc= get32(buf); if(buf_size != get32(buf+4)+8){ - fprintf(stderr, "size missmatch %d %d\n", buf_size, get32(buf+4)); + av_log(f->avctx, AV_LOG_ERROR, "size missmatch %d %d\n", buf_size, get32(buf+4)); } if(frame_4cc == ff_get_fourcc("cfrm")){ @@ -621,7 +621,7 @@ static int decode_frame(AVCodecContext *avctx, for(i=0; i<CFRAME_BUFFER_COUNT; i++){ if(f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number) - printf("lost c frame %d\n", f->cfrm[i].id); + av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id); } for(i=0; i<CFRAME_BUFFER_COUNT; i++){ @@ -645,7 +645,7 @@ static int decode_frame(AVCodecContext *avctx, frame_size= cfrm->size; if(id != avctx->frame_number){ - printf("cframe id missmatch %d %d\n", id, avctx->frame_number); + av_log(f->avctx, AV_LOG_ERROR, "cframe id missmatch %d %d\n", id, avctx->frame_number); } cfrm->size= cfrm->id= 0; @@ -671,7 +671,7 @@ static int decode_frame(AVCodecContext *avctx, p->reference= 1; if(avctx->get_buffer(avctx, p) < 0){ - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } @@ -684,9 +684,9 @@ static int decode_frame(AVCodecContext *avctx, if(decode_p_frame(f, buf, frame_size) < 0) return -1; }else if(frame_4cc == ff_get_fourcc("snd_")){ - printf("ignoring snd_ chunk length:%d\n", buf_size); + av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size); }else{ - printf("ignoring unknown chunk length:%d\n", buf_size); + av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size); } #if 0 diff --git a/src/libffmpeg/libavcodec/Makefile.am b/src/libffmpeg/libavcodec/Makefile.am index bc655aa89..8006d8e94 100644 --- a/src/libffmpeg/libavcodec/Makefile.am +++ b/src/libffmpeg/libavcodec/Makefile.am @@ -13,6 +13,7 @@ noinst_LTLIBRARIES = libavcodec.la libavcodec_la_SOURCES = \ 4xm.c \ + 8bps.c \ adpcm.c \ asv1.c \ cabac.c \ @@ -24,6 +25,8 @@ libavcodec_la_SOURCES = \ dv.c \ error_resilience.c \ eval.c \ + faandct.c \ + flicvideo.c \ fft.c \ golomb.c \ h263.c \ @@ -37,6 +40,7 @@ libavcodec_la_SOURCES = \ jfdctfst.c \ jfdctint.c \ jrevdct.c \ + lcl.c \ mdct.c \ mace.c \ mem.c \ @@ -57,9 +61,12 @@ libavcodec_la_SOURCES = \ rpza.c \ rv10.c \ simple_idct.c \ + smc.c \ svq1.c \ + truemotion1.c \ utils.c \ vcr1.c \ + vmdav.c \ vp3.c \ vqavideo.c \ wmadec.c \ @@ -79,7 +86,6 @@ noinst_HEADERS = \ common.h \ dsputil.h \ dvdata.h \ - fastmemcpy.h \ golomb.h \ imgconvert_template.h \ indeo3data.h \ @@ -101,5 +107,6 @@ noinst_HEADERS = \ sp5x.h \ svq1_cb.h \ svq1_vlc.h \ + truemotion1data.h \ vp3data.h \ wmadata.h diff --git a/src/libffmpeg/libavcodec/adpcm.c b/src/libffmpeg/libavcodec/adpcm.c index a6ecaf543..bced66f19 100644 --- a/src/libffmpeg/libavcodec/adpcm.c +++ b/src/libffmpeg/libavcodec/adpcm.c @@ -129,7 +129,7 @@ static int adpcm_encode_init(AVCodecContext *avctx) return -1; /* only stereo or mono =) */ switch(avctx->codec->id) { case CODEC_ID_ADPCM_IMA_QT: - fprintf(stderr, "ADPCM: codec admcp_ima_qt unsupported for encoding !\n"); + av_log(avctx, AV_LOG_ERROR, "ADPCM: codec adpcm_ima_qt unsupported for encoding !\n"); avctx->frame_size = 64; /* XXX: can multiple of avctx->channels * 64 (left and right blocks are interleaved) */ return -1; break; @@ -140,7 +140,7 @@ static int adpcm_encode_init(AVCodecContext *avctx) /* seems frame_size isn't taken into account... have to buffer the samples :-( */ break; case CODEC_ID_ADPCM_MS: - fprintf(stderr, "ADPCM: codec admcp_ms unsupported for encoding !\n"); + av_log(avctx, AV_LOG_ERROR, "ADPCM: codec adpcm_ms unsupported for encoding !\n"); return -1; break; default: @@ -299,7 +299,7 @@ static int adpcm_decode_init(AVCodecContext * avctx) return 0; } -static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble) +static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift) { int step_index; int predictor; @@ -315,34 +315,7 @@ static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble) /* perform direct multiplication instead of series of jumps proposed by * the reference ADPCM implementation since modern CPUs can do the mults * quickly enough */ - diff = ((2 * delta + 1) * step) >> 3; - predictor = c->predictor; - if (sign) predictor -= diff; - else predictor += diff; - - CLAMP_TO_SHORT(predictor); - c->predictor = predictor; - c->step_index = step_index; - - return (short)predictor; -} - -static inline short adpcm_4xa_expand_nibble(ADPCMChannelStatus *c, char nibble) -{ - int step_index; - int predictor; - int sign, delta, diff, step; - - step = step_table[c->step_index]; - step_index = c->step_index + index_table[(unsigned)nibble]; - if (step_index < 0) step_index = 0; - else if (step_index > 88) step_index = 88; - - sign = nibble & 8; - delta = nibble & 7; - - diff = (delta*step + (step>>1))>>3; // difference to code above - + diff = ((2 * delta + 1) * step) >> shift; predictor = c->predictor; if (sign) predictor -= diff; else predictor += diff; @@ -471,6 +444,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, int decode_top_nibble_next = 0; int diff_channel; + if (!buf_size) + return 0; + samples = data; src = buf; @@ -496,7 +472,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, cs->step_index = (*src++) & 0x7F; - if (cs->step_index > 88) fprintf(stderr, "ERROR: step_index = %i\n", cs->step_index); + if (cs->step_index > 88) av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); if (cs->step_index > 88) cs->step_index = 88; cs->step = step_table[cs->step_index]; @@ -504,70 +480,49 @@ static int adpcm_decode_frame(AVCodecContext *avctx, if (st && channel) samples++; - *samples++ = cs->predictor; - samples += st; - for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ - *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F); + *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); samples += avctx->channels; - *samples = adpcm_ima_expand_nibble(cs, (src[0] >> 4) & 0x0F); + *samples = adpcm_ima_expand_nibble(cs, (src[0] >> 4) & 0x0F, 3); samples += avctx->channels; src ++; } if(st) { /* handle stereo interlacing */ c->channel = (channel + 1) % 2; /* we get one packet for left, then one for right data */ - if(channel == 0) { /* wait for the other packet before outputing anything */ + if(channel == 1) { /* wait for the other packet before outputing anything */ *data_size = 0; return src - buf; } } break; case CODEC_ID_ADPCM_IMA_WAV: - if (buf_size > BLKSIZE) { - if (avctx->block_align != 0) - buf_size = avctx->block_align; - else - buf_size = BLKSIZE; - } - // XXX: do as per-channel loop - cs = &(c->status[0]); - cs->predictor = (*src++) & 0x0FF; - cs->predictor |= ((*src++) << 8) & 0x0FF00; - if(cs->predictor & 0x8000) - cs->predictor -= 0x10000; - CLAMP_TO_SHORT(cs->predictor); + if (avctx->block_align != 0 && buf_size > avctx->block_align) + buf_size = avctx->block_align; - // XXX: is this correct ??: *samples++ = cs->predictor; - - cs->step_index = *src++; - if (cs->step_index < 0) cs->step_index = 0; - if (cs->step_index > 88) cs->step_index = 88; - if (*src++) fprintf(stderr, "unused byte should be null !!\n"); /* unused */ - - if (st) { - cs = &(c->status[1]); - cs->predictor = (*src++) & 0x0FF; - cs->predictor |= ((*src++) << 8) & 0x0FF00; + for(i=0; i<avctx->channels; i++){ + cs = &(c->status[i]); + cs->predictor = *src++; + cs->predictor |= (*src++) << 8; if(cs->predictor & 0x8000) cs->predictor -= 0x10000; CLAMP_TO_SHORT(cs->predictor); - // XXX: is this correct ??: *samples++ = cs->predictor; + // XXX: is this correct ??: *samples++ = cs->predictor; - cs->step_index = *src++; + cs->step_index = *src++; if (cs->step_index < 0) cs->step_index = 0; if (cs->step_index > 88) cs->step_index = 88; - src++; /* if != 0 -> out-of-sync */ + if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null !!\n"); /* unused */ } for(m=4; src < (buf + buf_size);) { - *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F); + *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] & 0x0F, 3); if (st) - *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[4] & 0x0F); - *samples++ = adpcm_ima_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F); + *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[4] & 0x0F, 3); + *samples++ = adpcm_ima_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F, 3); if (st) { - *samples++ = adpcm_ima_expand_nibble(&c->status[1], (src[4] >> 4) & 0x0F); + *samples++ = adpcm_ima_expand_nibble(&c->status[1], (src[4] >> 4) & 0x0F, 3); if (!--m) { m=4; src+=4; @@ -586,32 +541,25 @@ static int adpcm_decode_frame(AVCodecContext *avctx, if(st){ c->status[1].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2; } -// if (cs->step_index < 0) cs->step_index = 0; -// if (cs->step_index > 88) cs->step_index = 88; + if (cs->step_index < 0) cs->step_index = 0; + if (cs->step_index > 88) cs->step_index = 88; m= (buf_size - (src - buf))>>st; -//printf("%d %d %d %d\n", st, m, c->status[0].predictor, c->status[0].step_index); - //FIXME / XXX decode chanels individual & interleave samples for(i=0; i<m; i++) { - *samples++ = adpcm_4xa_expand_nibble(&c->status[0], src[i] & 0x0F); + *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); if (st) - *samples++ = adpcm_4xa_expand_nibble(&c->status[1], src[i+m] & 0x0F); - *samples++ = adpcm_4xa_expand_nibble(&c->status[0], src[i] >> 4); + *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); + *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); if (st) - *samples++ = adpcm_4xa_expand_nibble(&c->status[1], src[i+m] >> 4); + *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); } src += m<<st; break; case CODEC_ID_ADPCM_MS: - - if (buf_size > BLKSIZE) { - if (avctx->block_align != 0) - buf_size = avctx->block_align; - else - buf_size = BLKSIZE; - } + if (avctx->block_align != 0 && buf_size > avctx->block_align) + buf_size = avctx->block_align; n = buf_size - 7 * avctx->channels; if (n < 0) return -1; @@ -656,12 +604,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, } break; case CODEC_ID_ADPCM_IMA_DK4: - if (buf_size > BLKSIZE) { - if (avctx->block_align != 0) - buf_size = avctx->block_align; - else - buf_size = BLKSIZE; - } + if (avctx->block_align != 0 && buf_size > avctx->block_align) + buf_size = avctx->block_align; + c->status[0].predictor = (src[0] | (src[1] << 8)); c->status[0].step_index = src[2]; src += 4; @@ -680,27 +625,24 @@ static int adpcm_decode_frame(AVCodecContext *avctx, /* take care of the top nibble (always left or mono channel) */ *samples++ = adpcm_ima_expand_nibble(&c->status[0], - (src[0] >> 4) & 0x0F); + (src[0] >> 4) & 0x0F, 3); /* take care of the bottom nibble, which is right sample for * stereo, or another mono sample */ if (st) *samples++ = adpcm_ima_expand_nibble(&c->status[1], - src[0] & 0x0F); + src[0] & 0x0F, 3); else *samples++ = adpcm_ima_expand_nibble(&c->status[0], - src[0] & 0x0F); + src[0] & 0x0F, 3); src++; } break; case CODEC_ID_ADPCM_IMA_DK3: - if (buf_size > BLKSIZE) { - if (avctx->block_align != 0) - buf_size = avctx->block_align; - else - buf_size = BLKSIZE; - } + if (avctx->block_align != 0 && buf_size > avctx->block_align) + buf_size = avctx->block_align; + c->status[0].predictor = (src[10] | (src[11] << 8)); c->status[1].predictor = (src[12] | (src[13] << 8)); c->status[0].step_index = src[14]; @@ -722,11 +664,11 @@ static int adpcm_decode_frame(AVCodecContext *avctx, /* process the first predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); - adpcm_ima_expand_nibble(&c->status[0], nibble); + adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the diff channel predictor */ DK3_GET_NEXT_NIBBLE(); - adpcm_ima_expand_nibble(&c->status[1], nibble); + adpcm_ima_expand_nibble(&c->status[1], nibble, 3); /* process the first pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; @@ -735,7 +677,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, /* process the second predictor of the sum channel */ DK3_GET_NEXT_NIBBLE(); - adpcm_ima_expand_nibble(&c->status[0], nibble); + adpcm_ima_expand_nibble(&c->status[0], nibble, 3); /* process the second pair of stereo PCM samples */ diff_channel = (diff_channel + c->status[1].predictor) / 2; @@ -749,14 +691,14 @@ static int adpcm_decode_frame(AVCodecContext *avctx, if (st) { *samples++ = adpcm_ima_expand_nibble(&c->status[0], - (src[0] >> 4) & 0x0F); + (src[0] >> 4) & 0x0F, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[1], - src[0] & 0x0F); + src[0] & 0x0F, 3); } else { *samples++ = adpcm_ima_expand_nibble(&c->status[0], - (src[0] >> 4) & 0x0F); + (src[0] >> 4) & 0x0F, 3); *samples++ = adpcm_ima_expand_nibble(&c->status[0], - src[0] & 0x0F); + src[0] & 0x0F, 3); } src++; diff --git a/src/libffmpeg/libavcodec/adx.c b/src/libffmpeg/libavcodec/adx.c index 4fdca49e8..e41a75726 100644 --- a/src/libffmpeg/libavcodec/adx.c +++ b/src/libffmpeg/libavcodec/adx.c @@ -199,7 +199,7 @@ static int adx_encode_init(AVCodecContext *avctx) // avctx->bit_rate = avctx->sample_rate*avctx->channels*18*8/32; - printf("adx encode init\n"); fflush(stdout); + av_log(avctx, AV_LOG_DEBUG, "adx encode init\n"); adx_decode_init(avctx); return 0; @@ -213,7 +213,7 @@ static int adx_encode_close(AVCodecContext *avctx) } static int adx_encode_frame(AVCodecContext *avctx, - unsigned char *frame, int buf_size, const void *data) + uint8_t *frame, int buf_size, void *data) { ADXContext *c = avctx->priv_data; const short *samples = data; @@ -318,15 +318,15 @@ static void dump(unsigned char *buf,size_t len) { int i; for(i=0;i<len;i++) { - if ((i&15)==0) printf("%04x ",i); - printf("%02x ",buf[i]); - if ((i&15)==15) printf("\n"); + if ((i&15)==0) av_log(NULL, AV_LOG_DEBUG, "%04x ",i); + av_log(NULL, AV_LOG_DEBUG, "%02x ",buf[i]); + if ((i&15)==15) av_log(NULL, AV_LOG_DEBUG, "\n"); } - printf("\n"); + av_log(NULL, AV_LOG_ERROR, "\n"); } static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, - const uint8_t *buf0, int buf_size) + uint8_t *buf0, int buf_size) { ADXContext *c = avctx->priv_data; short *samples = data; diff --git a/src/libffmpeg/libavcodec/alpha/dsputil_alpha.c b/src/libffmpeg/libavcodec/alpha/dsputil_alpha.c index 82ff7db66..496f46120 100644 --- a/src/libffmpeg/libavcodec/alpha/dsputil_alpha.c +++ b/src/libffmpeg/libavcodec/alpha/dsputil_alpha.c @@ -39,11 +39,11 @@ void get_pixels_mvi(DCTELEM *restrict block, const uint8_t *restrict pixels, int line_size); void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride); -int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size); +int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); int pix_abs16x16_mvi_asm(uint8_t *pix1, uint8_t *pix2, int line_size); -int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size); -int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size); -int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size); +int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); #if 0 /* These functions were the base for the optimized assembler routines, @@ -290,11 +290,6 @@ static int sad16x16_mvi(void *s, uint8_t *a, uint8_t *b, int stride) return pix_abs16x16_mvi_asm(a, b, stride); } -static int sad8x8_mvi(void *s, uint8_t *a, uint8_t *b, int stride) -{ - return pix_abs8x8_mvi(a, b, stride); -} - void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) { c->put_pixels_tab[0][0] = put_pixels16_axp_asm; @@ -347,12 +342,13 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) c->get_pixels = get_pixels_mvi; c->diff_pixels = diff_pixels_mvi; c->sad[0] = sad16x16_mvi; - c->sad[1] = sad8x8_mvi; - c->pix_abs8x8 = pix_abs8x8_mvi; - c->pix_abs16x16 = pix_abs16x16_mvi_asm; - c->pix_abs16x16_x2 = pix_abs16x16_x2_mvi; - c->pix_abs16x16_y2 = pix_abs16x16_y2_mvi; - c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mvi; + c->sad[1] = pix_abs8x8_mvi; +// c->pix_abs[0][0] = pix_abs16x16_mvi_asm; //FIXME function arguments for the asm must be fixed + c->pix_abs[0][0] = sad16x16_mvi; + c->pix_abs[1][0] = pix_abs8x8_mvi; + c->pix_abs[0][1] = pix_abs16x16_x2_mvi; + c->pix_abs[0][2] = pix_abs16x16_y2_mvi; + c->pix_abs[0][3] = pix_abs16x16_xy2_mvi; } put_pixels_clamped_axp_p = c->put_pixels_clamped; diff --git a/src/libffmpeg/libavcodec/alpha/motion_est_alpha.c b/src/libffmpeg/libavcodec/alpha/motion_est_alpha.c index 804e1d2b6..8b8a0a25c 100644 --- a/src/libffmpeg/libavcodec/alpha/motion_est_alpha.c +++ b/src/libffmpeg/libavcodec/alpha/motion_est_alpha.c @@ -84,10 +84,9 @@ static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4) return r1 + r2; } -int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) +int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int result = 0; - int h = 8; if ((size_t) pix2 & 0x7) { /* works only when pix2 is actually unaligned */ @@ -160,10 +159,9 @@ int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) } #endif -int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) +int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int result = 0; - int h = 16; uint64_t disalign = (size_t) pix2 & 0x7; switch (disalign) { @@ -234,10 +232,9 @@ int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) return result; } -int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) +int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int result = 0; - int h = 16; if ((size_t) pix2 & 0x7) { uint64_t t, p2_l, p2_r; @@ -288,10 +285,9 @@ int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) return result; } -int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size) +int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int result = 0; - int h = 16; uint64_t p1_l, p1_r; uint64_t p2_l, p2_r, p2_x; diff --git a/src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c b/src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c index 6b720373c..f64fb7472 100644 --- a/src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c +++ b/src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c @@ -21,7 +21,7 @@ #include "../dsputil.h" #include "../mpegvideo.h" -static void dct_unquantize_h263_axp(MpegEncContext *s, DCTELEM *block, +static void dct_unquantize_h263_intra_axp(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, n_coeffs; @@ -35,19 +35,15 @@ static void dct_unquantize_h263_axp(MpegEncContext *s, DCTELEM *block, /* This mask kills spill from negative subwords to the next subword. */ correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */ - if (s->mb_intra) { - if (!s->h263_aic) { - if (n < 4) - block0 = block[0] * s->y_dc_scale; - else - block0 = block[0] * s->c_dc_scale; - } else { - qadd = 0; - } - n_coeffs = 63; // does not always use zigzag table + if (!s->h263_aic) { + if (n < 4) + block0 = block[0] * s->y_dc_scale; + else + block0 = block[0] * s->c_dc_scale; } else { - n_coeffs = s->intra_scantable.raster_end[s->block_last_index[n]]; + qadd = 0; } + n_coeffs = 63; // does not always use zigzag table for(i = 0; i <= n_coeffs; block += 4, i += 4) { uint64_t levels, negmask, zeros, add; @@ -90,7 +86,62 @@ static void dct_unquantize_h263_axp(MpegEncContext *s, DCTELEM *block, orig_block[0] = block0; } +static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block, + int n, int qscale) +{ + int i, n_coeffs; + uint64_t qmul, qadd; + uint64_t correction; + DCTELEM *orig_block = block; + DCTELEM block0; + + qadd = WORD_VEC((qscale - 1) | 1); + qmul = qscale << 1; + /* This mask kills spill from negative subwords to the next subword. */ + correction = WORD_VEC((qmul - 1) + 1); /* multiplication / addition */ + + n_coeffs = s->intra_scantable.raster_end[s->block_last_index[n]]; + + for(i = 0; i <= n_coeffs; block += 4, i += 4) { + uint64_t levels, negmask, zeros, add; + + levels = ldq(block); + if (levels == 0) + continue; + +#ifdef __alpha_max__ + /* I don't think the speed difference justifies runtime + detection. */ + negmask = maxsw4(levels, -1); /* negative -> ffff (-1) */ + negmask = minsw4(negmask, 0); /* positive -> 0000 (0) */ +#else + negmask = cmpbge(WORD_VEC(0x7fff), levels); + negmask &= (negmask >> 1) | (1 << 7); + negmask = zap(-1, negmask); +#endif + + zeros = cmpbge(0, levels); + zeros &= zeros >> 1; + /* zeros |= zeros << 1 is not needed since qadd <= 255, so + zapping the lower byte suffices. */ + + levels *= qmul; + levels -= correction & (negmask << 16); + + /* Negate qadd for negative levels. */ + add = qadd ^ negmask; + add += WORD_VEC(0x0001) & negmask; + /* Set qadd to 0 for levels == 0. */ + add = zap(add, zeros); + + levels += add; + + stq(levels, block); + } +} + void MPV_common_init_axp(MpegEncContext *s) { - s->dct_unquantize_h263 = dct_unquantize_h263_axp; + s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp; + s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp; } diff --git a/src/libffmpeg/libavcodec/asv1.c b/src/libffmpeg/libavcodec/asv1.c index bff0fe37a..b84b02475 100644 --- a/src/libffmpeg/libavcodec/asv1.c +++ b/src/libffmpeg/libavcodec/asv1.c @@ -207,7 +207,7 @@ static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64]){ if(ccp){ if(ccp == 16) break; if(ccp < 0 || i>=10){ - printf("coded coeff pattern damaged\n"); + av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n"); return -1; } @@ -415,7 +415,7 @@ static int decode_frame(AVCodecContext *avctx, p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type= I_TYPE; @@ -481,8 +481,6 @@ for(i=0; i<s->avctx->extradata_size; i++){ return (get_bits_count(&a->gb)+31)/32*4; } -#ifdef CONFIG_ENCODERS - static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ ASV1Context * const a = avctx->priv_data; AVFrame *pict = data; @@ -537,8 +535,6 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, return size*4; } -#endif - static void common_init(AVCodecContext *avctx){ ASV1Context * const a = avctx->priv_data; @@ -565,7 +561,7 @@ static int decode_init(AVCodecContext *avctx){ a->inv_qscale= ((uint8_t*)avctx->extradata)[0]; if(a->inv_qscale == 0){ - printf("illegal qscale 0\n"); + av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n"); if(avctx->codec_id == CODEC_ID_ASV1) a->inv_qscale= 6; else @@ -586,8 +582,6 @@ static int decode_init(AVCodecContext *avctx){ return 0; } -#ifdef CONFIG_ENCODERS - static int encode_init(AVCodecContext *avctx){ ASV1Context * const a = avctx->priv_data; int i; @@ -612,8 +606,6 @@ static int encode_init(AVCodecContext *avctx){ return 0; } -#endif - static int decode_end(AVCodecContext *avctx){ ASV1Context * const a = avctx->priv_data; diff --git a/src/libffmpeg/libavcodec/avcodec.h b/src/libffmpeg/libavcodec/avcodec.h index 3293411be..6f26b0685 100644 --- a/src/libffmpeg/libavcodec/avcodec.h +++ b/src/libffmpeg/libavcodec/avcodec.h @@ -13,6 +13,7 @@ extern "C" { #include "common.h" #include "rational.h" +#include <sys/types.h> /* size_t */ /* FIXME: We cannot use ffmpeg's XvMC capabilities, since that would require * linking the ffmpeg plugin against XvMC libraries, which is a bad thing, @@ -23,7 +24,7 @@ extern "C" { #define FFMPEG_VERSION_INT 0x000408 #define FFMPEG_VERSION "0.4.8" -#define LIBAVCODEC_BUILD 4688 +#define LIBAVCODEC_BUILD 4699 #define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT #define LIBAVCODEC_VERSION FFMPEG_VERSION @@ -39,6 +40,7 @@ enum CodecID { CODEC_ID_MPEG2VIDEO_XVMC, CODEC_ID_H263, CODEC_ID_RV10, + CODEC_ID_RV20, CODEC_ID_MP2, CODEC_ID_MP3, /* prefered ID for MPEG Audio layer 1, 2 or3 decoding */ CODEC_ID_VORBIS, @@ -90,6 +92,14 @@ enum CodecID { CODEC_ID_MSRLE, CODEC_ID_MSVIDEO1, CODEC_ID_IDCIN, + CODEC_ID_8BPS, + CODEC_ID_SMC, + CODEC_ID_FLIC, + CODEC_ID_TRUEMOTION1, + CODEC_ID_VMDVIDEO, + CODEC_ID_VMDAUDIO, + CODEC_ID_MSZH, + CODEC_ID_ZLIB, /* various pcm "codecs" */ CODEC_ID_PCM_S16LE, @@ -232,7 +242,7 @@ static const __attribute__((unused)) int Motion_Est_QTab[] = */ #define CODEC_FLAG_QSCALE 0x0002 ///< use fixed qscale -#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed +#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / Advanced prediction for H263 #define CODEC_FLAG_QPEL 0x0010 ///< use qpel MC #define CODEC_FLAG_GMC 0x0020 ///< use GMC #define CODEC_FLAG_MV0 0x0040 ///< always try a MB with MV=<0,0> @@ -260,15 +270,18 @@ static const __attribute__((unused)) int Motion_Est_QTab[] = #define CODEC_FLAG_AC_PRED 0x01000000 ///< H263 Advanced intra coding / MPEG4 AC prediction #define CODEC_FLAG_H263P_UMV 0x02000000 ///< Unlimited motion vector #define CODEC_FLAG_CBP_RD 0x04000000 ///< use rate distortion optimization for cbp -/* For advanced prediction mode, we reuse the 4MV flag */ +#define CODEC_FLAG_QP_RD 0x08000000 ///< use rate distortion optimization for qp selectioon +#define CODEC_FLAG_H263P_AIV 0x00000008 ///< H263 Alternative inter vlc +#define CODEC_FLAG_OBMC 0x00000001 ///< OBMC +#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter +#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000 +#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation +#define CODEC_FLAG_SVCD_SCAN_OFFSET 0x40000000 ///< will reserve space for SVCD scan offset user data +#define CODEC_FLAG_CLOSED_GOP 0x80000000 /* Unsupported options : * Syntax Arithmetic coding (SAC) - * Deblocking filter internal loop - * Slice structured * Reference Picture Selection - * Independant Segment Decoding - * Alternative Inter * VLC - * Modified Quantization */ + * Independant Segment Decoding */ /* /Fx */ /* codec capabilities */ @@ -283,6 +296,30 @@ static const __attribute__((unused)) int Motion_Est_QTab[] = #define CODEC_CAP_PARSE_ONLY 0x0004 #define CODEC_CAP_TRUNCATED 0x0008 +//the following defines might change, so dont expect compatibility if u use them +#define MB_TYPE_INTRA4x4 0x0001 +#define MB_TYPE_INTRA16x16 0x0002 //FIXME h264 specific +#define MB_TYPE_INTRA_PCM 0x0004 //FIXME h264 specific +#define MB_TYPE_16x16 0x0008 +#define MB_TYPE_16x8 0x0010 +#define MB_TYPE_8x16 0x0020 +#define MB_TYPE_8x8 0x0040 +#define MB_TYPE_INTERLACED 0x0080 +#define MB_TYPE_DIRECT2 0x0100 //FIXME +#define MB_TYPE_ACPRED 0x0200 +#define MB_TYPE_GMC 0x0400 +#define MB_TYPE_SKIP 0x0800 +#define MB_TYPE_P0L0 0x1000 +#define MB_TYPE_P1L0 0x2000 +#define MB_TYPE_P0L1 0x4000 +#define MB_TYPE_P1L1 0x8000 +#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) +#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) +#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) +#define MB_TYPE_QUANT 0x00010000 +#define MB_TYPE_CBP 0x00020000 +//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) + /** * Pan Scan area. * this specifies the area which should be displayed. Note there may be multiple such areas for one frame @@ -406,6 +443,28 @@ typedef struct AVPanScan{ uint8_t *mbskip_table;\ \ /**\ + * Motion vector table\ + * - encoding: unused\ + * - decoding: set by lavc\ + */\ + int16_t (*motion_val[2])[2];\ +\ + /**\ + * Macroblock type table\ + * mb_type_base + mb_width + 2\ + * - encoding: unused\ + * - decoding: set by lavc\ + */\ + uint32_t *mb_type;\ +\ + /**\ + * Macroblock size: (0->16x16, 1->8x8, 2-> 4x4, 3-> 2x2)\ + * - encoding: unused\ + * - decoding: set by lavc\ + */\ + uint8_t motion_subsample_log2;\ +\ + /**\ * for some private data of the user\ * - encoding: unused\ * - decoding: set by user\ @@ -467,6 +526,13 @@ typedef struct AVPanScan{ * - decoding: set by lavc (default 0)\ */\ int palette_has_changed;\ + \ + /**\ + * Codec suggestion on buffer type if != 0\ + * - encoding: unused\ + * - decoding: set by lavc (before get_buffer() call))\ + */\ + int buffer_hints;\ #define FF_QSCALE_TYPE_MPEG1 0 #define FF_QSCALE_TYPE_MPEG2 1 @@ -484,6 +550,11 @@ typedef struct AVPanScan{ #define FF_SI_TYPE 5 #define FF_SP_TYPE 6 +#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore) +#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer +#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content +#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update) + /** * Audio Video Frame. */ @@ -682,11 +753,8 @@ typedef struct AVCodecContext { void *priv_data; - /* The following data is for RTP friendly coding */ - /* By now only H.263/H.263+/MPEG4 coder honours this */ - int rtp_mode; /* 1 for activate RTP friendly-mode */ - /* highers numbers represent more error-prone */ - /* enviroments, by now just "1" exist */ + /* unused, FIXME remove*/ + int rtp_mode; int rtp_payload_size; /* The size of the RTP payload, the coder will */ /* do it's best to deliver a chunk with size */ @@ -902,6 +970,8 @@ typedef struct AVCodecContext { /** * qscale factor between p and i frames. + * if > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset) + * if < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset) * - encoding: set by user. * - decoding: unused */ @@ -909,8 +979,6 @@ typedef struct AVCodecContext { /** * qscale offset between p and i frames. - * if > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset) - * if < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset) * - encoding: set by user. * - decoding: unused */ @@ -1054,6 +1122,7 @@ typedef struct AVCodecContext { /** * sample aspect ratio (0 if unknown). + * numerator and denominator must be relative prime and smaller then 256 for some video standards * - encoding: set by user. * - decoding: set by lavc. */ @@ -1078,15 +1147,27 @@ typedef struct AVCodecContext { #define FF_DEBUG_MB_TYPE 8 #define FF_DEBUG_QP 16 #define FF_DEBUG_MV 32 -#define FF_DEBUG_VIS_MV 0x00000040 +//#define FF_DEBUG_VIS_MV 0x00000040 #define FF_DEBUG_SKIP 0x00000080 #define FF_DEBUG_STARTCODE 0x00000100 #define FF_DEBUG_PTS 0x00000200 #define FF_DEBUG_ER 0x00000400 #define FF_DEBUG_MMCO 0x00000800 #define FF_DEBUG_BUGS 0x00001000 +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 /** + * debug. + * - encoding: set by user. + * - decoding: set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames + + /** * error. * - encoding: set by lavc if flags&CODEC_FLAG_PSNR * - decoding: unused @@ -1125,6 +1206,12 @@ typedef struct AVCodecContext { * - decoding: unused */ int mb_cmp; + /** + * interlaced dct compare function + * - encoding: set by user. + * - decoding: unused + */ + int ildct_cmp; #define FF_CMP_SAD 0 #define FF_CMP_SSE 1 #define FF_CMP_SATD 2 @@ -1133,6 +1220,8 @@ typedef struct AVCodecContext { #define FF_CMP_BIT 5 #define FF_CMP_RD 6 #define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 #define FF_CMP_CHROMA 256 /** @@ -1365,7 +1454,63 @@ typedef struct AVCodecContext { * - decoding: set by user. */ struct AVPaletteControl *palctrl; + + /** + * noise reduction strength + * - encoding: set by user. + * - decoding: unused + */ + int noise_reduction; + /** + * called at the beginning of a frame to get cr buffer for it. + * buffer type (size, hints) must be the same. lavc won't check it. + * lavc will pass previous buffer in pic, function should return + * same buffer or new buffer with old frame "painted" into it. + * if pic.data[0] == NULL must behave like get_buffer(). + * - encoding: unused + * - decoding: set by lavc, user can override + */ + int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * number of bits which should be loaded into the rc buffer before decoding starts + * - encoding: set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + + /** + * + * - encoding: set by user. + * - decoding: unused + */ + int inter_threshold; + + /** + * CODEC_FLAG2_*. + * - encoding: set by user. + * - decoding: set by user. + */ + int flags2; + + /** + * simulates errors in the bitstream to test error concealment. + * - encoding: set by user. + * - decoding: unused. + */ + int error_rate; + + /** + * MP3 antialias algorithm, see FF_AA_* below. + * - encoding: unused + * - decoding: set by user + */ + int antialias_algo; +#define FF_AA_AUTO 0 +#define FF_AA_FASTINT 1 //not implemented yet +#define FF_AA_INT 2 +#define FF_AA_FLOAT 3 } AVCodecContext; @@ -1446,7 +1591,8 @@ typedef struct AVPicture { * This structure defines a method for communicating palette changes * between and demuxer and a decoder. */ -#define AVPALETTE_SIZE 256 +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 typedef struct AVPaletteControl { /* demuxer sets this to 1 to indicate the palette has changed; @@ -1457,7 +1603,7 @@ typedef struct AVPaletteControl { * the individual palette components should be on a 8-bit scale; if * the palette data comes from a IBM VGA native format, the component * data is probably 6 bits in size and needs to be scaled */ - unsigned int palette[AVPALETTE_SIZE]; + unsigned int palette[AVPALETTE_COUNT]; } AVPaletteControl; @@ -1472,6 +1618,7 @@ extern AVCodec h263_encoder; extern AVCodec h263p_encoder; extern AVCodec flv_encoder; extern AVCodec rv10_encoder; +extern AVCodec rv20_encoder; extern AVCodec mjpeg_encoder; extern AVCodec ljpeg_encoder; extern AVCodec mpeg4_encoder; @@ -1487,6 +1634,7 @@ extern AVCodec asv2_encoder; extern AVCodec vcr1_encoder; extern AVCodec ffv1_encoder; extern AVCodec mdec_encoder; +extern AVCodec zlib_encoder; extern AVCodec h263_decoder; extern AVCodec mpeg4_decoder; @@ -1497,10 +1645,12 @@ extern AVCodec wmv1_decoder; extern AVCodec wmv2_decoder; extern AVCodec mpeg1video_decoder; extern AVCodec mpeg2video_decoder; +extern AVCodec mpegvideo_decoder; extern AVCodec mpeg_xvmc_decoder; extern AVCodec h263i_decoder; extern AVCodec flv_decoder; extern AVCodec rv10_decoder; +extern AVCodec rv20_decoder; extern AVCodec svq1_decoder; extern AVCodec svq3_decoder; extern AVCodec dvvideo_decoder; @@ -1542,6 +1692,14 @@ extern AVCodec msrle_decoder; extern AVCodec msvideo1_decoder; extern AVCodec vqa_decoder; extern AVCodec idcin_decoder; +extern AVCodec eightbps_decoder; +extern AVCodec smc_decoder; +extern AVCodec flic_decoder; +extern AVCodec vmdvideo_decoder; +extern AVCodec vmdaudio_decoder; +extern AVCodec truemotion1_decoder; +extern AVCodec mszh_decoder; +extern AVCodec zlib_decoder; extern AVCodec ra_144_decoder; extern AVCodec ra_288_decoder; extern AVCodec roq_dpcm_decoder; @@ -1811,6 +1969,61 @@ typedef enum { */ int avcodec(void* handle, avc_cmd_t cmd, void* pin, void* pout); +/* frame parsing */ +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t last_frame_offset; /* offset of the last frame */ + /* video info */ + int pict_type; /* XXX: put it back in AVCodecContext */ + int repeat_pict; /* XXX: put it back in AVCodecContext */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[3]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + struct AVCodecParser *next; +} AVCodecParser; + +extern AVCodecParser *av_first_parser; + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); +int av_parser_parse(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts); +void av_parser_close(AVCodecParserContext *s); + +extern AVCodecParser mpegvideo_parser; +extern AVCodecParser mpeg4video_parser; +extern AVCodecParser h263_parser; +extern AVCodecParser h264_parser; +extern AVCodecParser mpegaudio_parser; +extern AVCodecParser ac3_parser; + /* memory */ void *av_malloc(unsigned int size); void *av_mallocz(unsigned int size); @@ -1829,6 +2042,31 @@ void *__av_mallocz_static(void** location, unsigned int size); /* add by bero : in adx.c */ int is_adx(const unsigned char *buf,size_t bufsize); +void img_copy(AVPicture *dst, const AVPicture *src, + int pix_fmt, int width, int height); + +/* av_log API */ + +#include <stdarg.h> + +#define AV_LOG_ERROR 0 +#define AV_LOG_INFO 1 +#define AV_LOG_DEBUG 2 + +extern void av_log(AVCodecContext*, int level, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 3, 4))); +extern void av_vlog(AVCodecContext*, int level, const char *fmt, va_list); +extern int av_log_get_level(void); +extern void av_log_set_level(int); +extern void av_log_set_callback(void (*)(AVCodecContext*, int, const char*, va_list)); + +#undef AV_LOG_TRAP_PRINTF +#ifdef AV_LOG_TRAP_PRINTF +#define printf DO NOT USE +#define fprintf DO NOT USE +#undef stderr +#define stderr DO NOT USE +#endif + #ifdef __cplusplus } #endif diff --git a/src/libffmpeg/libavcodec/cinepak.c b/src/libffmpeg/libavcodec/cinepak.c index 56c4990be..e8d3efc3c 100644 --- a/src/libffmpeg/libavcodec/cinepak.c +++ b/src/libffmpeg/libavcodec/cinepak.c @@ -37,9 +37,6 @@ #define PALETTE_COUNT 256 -#undef BE_16 -#undef BE_32 - #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) #define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ (((uint8_t*)(x))[1] << 16) | \ @@ -421,7 +418,7 @@ static int cinepak_decode_frame(AVCodecContext *avctx, s->size = buf_size; if (avctx->get_buffer(avctx, &s->frame)) { - printf (" Cinepak: get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, " Cinepak: get_buffer() failed\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/common.c b/src/libffmpeg/libavcodec/common.c index 673ce7083..79c6f52a0 100644 --- a/src/libffmpeg/libavcodec/common.c +++ b/src/libffmpeg/libavcodec/common.c @@ -1,6 +1,7 @@ /* * Common bit i/o utils * Copyright (c) 2000, 2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -48,7 +49,6 @@ void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size) { s->buf = buffer; s->buf_end = s->buf + buffer_size; - s->data_out_size = 0; #ifdef ALT_BITSTREAM_WRITER s->index=0; ((uint32_t*)(s->buf))[0]=0; @@ -60,15 +60,16 @@ void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size) #endif } -#if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) +//#ifdef CONFIG_ENCODERS +#if 1 /* return the number of bits output */ -int64_t get_bit_count(PutBitContext *s) +int get_bit_count(PutBitContext *s) { #ifdef ALT_BITSTREAM_WRITER - return s->data_out_size * 8 + s->index; + return s->index; #else - return (s->buf_ptr - s->buf + s->data_out_size) * 8 + 32 - (int64_t)s->bit_left; + return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left; #endif } @@ -196,7 +197,8 @@ void align_get_bits(GetBitContext *s) int check_marker(GetBitContext *s, const char *msg) { int bit= get_bits1(s); - if(!bit) printf("Marker bit missing %s\n", msg); + if(!bit) + av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg); return bit; } @@ -281,11 +283,11 @@ static int build_table(VLC *vlc, int table_nb_bits, nb = 1 << (table_nb_bits - n); for(k=0;k<nb;k++) { #ifdef DEBUG_VLC - printf("%4x: code=%d n=%d\n", + av_log(NULL, AV_LOG_DEBUG, "%4x: code=%d n=%d\n", j, i, n); #endif if (table[j][1] /*bits*/ != 0) { - fprintf(stderr, "incorrect codes\n"); + av_log(NULL, AV_LOG_ERROR, "incorrect codes\n"); av_abort(); } table[j][1] = n; //bits diff --git a/src/libffmpeg/libavcodec/common.h b/src/libffmpeg/libavcodec/common.h index 870a3279c..5f51ba25d 100644 --- a/src/libffmpeg/libavcodec/common.h +++ b/src/libffmpeg/libavcodec/common.h @@ -18,6 +18,10 @@ //#define A32_BITSTREAM_READER #define LIBMPEG2_BITSTREAM_READER_HACK //add BERO +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + #ifdef HAVE_AV_CONFIG_H /* only include the following when compiling package */ # include "config.h" @@ -37,10 +41,6 @@ # define ENODATA 61 # endif -#ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif - #include <stddef.h> #ifndef offsetof # define offsetof(T,F) ((unsigned int)((char *)&((T *)0)->F)) @@ -82,19 +82,49 @@ extern const struct AVOption avoptions_workaround_bug[11]; # define always_inline inline #endif +#ifndef EMULATE_INTTYPES +# include <inttypes.h> +#else + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; + +# ifdef CONFIG_WIN32 + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; +# else /* other OS */ + typedef signed long long int64_t; + typedef unsigned long long uint64_t; +# endif /* other OS */ +#endif /* HAVE_INTTYPES_H */ + +#ifndef INT64_MAX +#define INT64_MAX 9223372036854775807LL +#endif + +#ifdef EMULATE_FAST_INT +/* note that we don't emulate 64bit ints */ +typedef signed char int_fast8_t; +typedef signed int int_fast16_t; +typedef signed int int_fast32_t; +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef unsigned int uint_fast32_t; +#endif + +#if defined(CONFIG_OS2) || defined(CONFIG_SUNOS) +static inline float floorf(float f) { + return floor(f); +} +#endif + #ifdef CONFIG_WIN32 /* windows */ -typedef unsigned short uint16_t; -typedef signed short int16_t; -typedef unsigned char uint8_t; -typedef unsigned int uint32_t; -typedef unsigned __int64 uint64_t; -typedef signed char int8_t; -typedef signed int int32_t; -typedef signed __int64 int64_t; - # ifndef __MINGW32__ # define int64_t_C(c) (c ## i64) # define uint64_t_C(c) (c ## i64) @@ -121,8 +151,6 @@ typedef signed __int64 int64_t; #elif defined (CONFIG_OS2) /* OS/2 EMX */ -#include <inttypes.h> - #ifndef int64_t_C #define int64_t_C(c) (c ## LL) #define uint64_t_C(c) (c ## ULL) @@ -143,8 +171,6 @@ typedef signed __int64 int64_t; /* unix */ -#include <inttypes.h> - #ifndef int64_t_C #define int64_t_C(c) (c ## LL) #define uint64_t_C(c) (c ## ULL) @@ -202,7 +228,7 @@ inline void dprintf(const char* fmt,...) {} # endif /* !CONFIG_WIN32 */ -# define av_abort() do { fprintf(stderr, "Abort at %s:%d\n", __FILE__, __LINE__); abort(); } while (0) +# define av_abort() do { av_log(NULL, AV_LOG_ERROR, "Abort at %s:%d\n", __FILE__, __LINE__); abort(); } while (0) //rounded divison & shift #define RSHIFT(a,b) ((a) > 0 ? ((a) + (1<<((b)-1)))>>(b) : ((a) + (1<<((b)-1))-1)>>(b)) @@ -268,12 +294,11 @@ typedef struct PutBitContext { int bit_left; uint8_t *buf, *buf_ptr, *buf_end; #endif - int64_t data_out_size; /* in bytes */ } PutBitContext; void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size); -int64_t get_bit_count(PutBitContext *s); /* XXX: change function name */ +int get_bit_count(PutBitContext *s); /* XXX: change function name */ void align_put_bits(PutBitContext *s); void flush_put_bits(PutBitContext *s); void put_string(PutBitContext * pbc, char *s); @@ -1001,23 +1026,31 @@ static inline int av_log2_16bit(unsigned int v) return n; } - /* median of 3 */ static inline int mid_pred(int a, int b, int c) { - int vmin, vmax; - vmax = vmin = a; - if (b < vmin) - vmin = b; - else - vmax = b; - - if (c < vmin) - vmin = c; - else if (c > vmax) - vmax = c; - - return a + b + c - vmin - vmax; +#if 0 + int t= (a-b)&((a-b)>>31); + a-=t; + b+=t; + b-= (b-c)&((b-c)>>31); + b+= (a-b)&((a-b)>>31); + + return b; +#else + if(a>b){ + if(c>b){ + if(c>a) b=a; + else b=c; + } + }else{ + if(b>c){ + if(c>a) b=c; + else b=a; + } + } + return b; +#endif } static inline int clip(int a, int amin, int amax) diff --git a/src/libffmpeg/libavcodec/cyuv.c b/src/libffmpeg/libavcodec/cyuv.c index d1a6eabf2..70b55066a 100644 --- a/src/libffmpeg/libavcodec/cyuv.c +++ b/src/libffmpeg/libavcodec/cyuv.c @@ -88,7 +88,7 @@ static int cyuv_decode_frame(AVCodecContext *avctx, * of 4 pixels. Thus, the total size of the buffer ought to be: * (3 * 16) + height * (width * 3 / 4) */ if (buf_size != 48 + s->height * (s->width * 3 / 4)) { - printf ("ffmpeg: cyuv: got a buffer with %d bytes when %d were expected\n", + av_log(avctx, AV_LOG_ERROR, "ffmpeg: cyuv: got a buffer with %d bytes when %d were expected\n", buf_size, 48 + s->height * (s->width * 3 / 4)); return -1; @@ -102,7 +102,7 @@ static int cyuv_decode_frame(AVCodecContext *avctx, s->frame.reference = 0; if(avctx->get_buffer(avctx, &s->frame) < 0) { - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/dpcm.c b/src/libffmpeg/libavcodec/dpcm.c index aed892c53..b59a9cd6b 100644 --- a/src/libffmpeg/libavcodec/dpcm.c +++ b/src/libffmpeg/libavcodec/dpcm.c @@ -44,10 +44,6 @@ typedef struct DPCMContext { #define SATURATE_S16(x) if (x < -32768) x = -32768; \ else if (x > 32767) x = 32767; #define SE_16BIT(x) if (x & 0x8000) x -= 0x10000; - -#undef LE_16 -#undef LE_32 - #define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) #define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \ (((uint8_t*)(x))[2] << 16) | \ @@ -129,6 +125,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx, unsigned char byte; short diff; + if (!buf_size) + return 0; + switch(avctx->codec->id) { case CODEC_ID_ROQ_DPCM: diff --git a/src/libffmpeg/libavcodec/dsputil.c b/src/libffmpeg/libavcodec/dsputil.c index 270424706..114d67b50 100644 --- a/src/libffmpeg/libavcodec/dsputil.c +++ b/src/libffmpeg/libavcodec/dsputil.c @@ -1,6 +1,7 @@ /* * DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -28,7 +29,7 @@ #include "dsputil.h" #include "mpegvideo.h" #include "simple_idct.h" - +#include "faandct.h" uint8_t cropTbl[256 + 2 * MAX_NEG_CROP]; uint32_t squareTbl[512]; @@ -44,6 +45,19 @@ const uint8_t ff_zigzag_direct[64] = { 53, 60, 61, 54, 47, 55, 62, 63 }; +/* Specific zigzag scan for 248 idct. NOTE that unlike the + specification, we interleave the fields */ +const uint8_t ff_zigzag248_direct[64] = { + 0, 8, 1, 9, 16, 24, 2, 10, + 17, 25, 32, 40, 48, 56, 33, 41, + 18, 26, 3, 11, 4, 12, 19, 27, + 34, 42, 49, 57, 50, 58, 35, 43, + 20, 28, 5, 13, 6, 14, 21, 29, + 36, 44, 51, 59, 52, 60, 37, 45, + 22, 30, 7, 15, 23, 31, 38, 46, + 53, 61, 54, 62, 39, 47, 55, 63, +}; + /* not permutated inverse zigzag_direct + 1 for MMX quantizer */ uint16_t __align8 inv_zigzag_direct16[64]; @@ -205,13 +219,13 @@ static void bswap_buf(uint32_t *dst, uint32_t *src, int w){ } } -static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) +static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int s, i; uint32_t *sq = squareTbl + 256; s = 0; - for (i = 0; i < 8; i++) { + for (i = 0; i < h; i++) { s += sq[pix1[0] - pix2[0]]; s += sq[pix1[1] - pix2[1]]; s += sq[pix1[2] - pix2[2]]; @@ -226,13 +240,13 @@ static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) return s; } -static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size) +static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; uint32_t *sq = squareTbl + 256; s = 0; - for (i = 0; i < 16; i++) { + for (i = 0; i < h; i++) { s += sq[pix1[ 0] - pix2[ 0]]; s += sq[pix1[ 1] - pix2[ 1]]; s += sq[pix1[ 2] - pix2[ 2]]; @@ -2248,13 +2262,82 @@ static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){ wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8); } +static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){ + int x; + const int strength= ff_h263_loop_filter_strength[qscale]; + + for(x=0; x<8; x++){ + int d1, d2, ad1; + int p0= src[x-2*stride]; + int p1= src[x-1*stride]; + int p2= src[x+0*stride]; + int p3= src[x+1*stride]; + int d = (p0 - p3 + 4*(p2 - p1)) / 8; + + if (d<-2*strength) d1= 0; + else if(d<- strength) d1=-2*strength - d; + else if(d< strength) d1= d; + else if(d< 2*strength) d1= 2*strength - d; + else d1= 0; + + p1 += d1; + p2 -= d1; + if(p1&256) p1= ~(p1>>31); + if(p2&256) p2= ~(p2>>31); + + src[x-1*stride] = p1; + src[x+0*stride] = p2; + + ad1= ABS(d1)>>1; + + d2= clip((p0-p3)/4, -ad1, ad1); + + src[x-2*stride] = p0 - d2; + src[x+ stride] = p3 + d2; + } +} + +static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){ + int y; + const int strength= ff_h263_loop_filter_strength[qscale]; + + for(y=0; y<8; y++){ + int d1, d2, ad1; + int p0= src[y*stride-2]; + int p1= src[y*stride-1]; + int p2= src[y*stride+0]; + int p3= src[y*stride+1]; + int d = (p0 - p3 + 4*(p2 - p1)) / 8; + + if (d<-2*strength) d1= 0; + else if(d<- strength) d1=-2*strength - d; + else if(d< strength) d1= d; + else if(d< 2*strength) d1= 2*strength - d; + else d1= 0; + + p1 += d1; + p2 -= d1; + if(p1&256) p1= ~(p1>>31); + if(p2&256) p2= ~(p2>>31); + + src[y*stride-1] = p1; + src[y*stride+0] = p2; -static inline int pix_abs16x16_c(uint8_t *pix1, uint8_t *pix2, int line_size) + ad1= ABS(d1)>>1; + + d2= clip((p0-p3)/4, -ad1, ad1); + + src[y*stride-2] = p0 - d2; + src[y*stride+1] = p3 + d2; + } +} + +static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; s = 0; - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - pix2[0]); s += abs(pix1[1] - pix2[1]); s += abs(pix1[2] - pix2[2]); @@ -2277,12 +2360,12 @@ static inline int pix_abs16x16_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int pix_abs16x16_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; s = 0; - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - avg2(pix2[0], pix2[1])); s += abs(pix1[1] - avg2(pix2[1], pix2[2])); s += abs(pix1[2] - avg2(pix2[2], pix2[3])); @@ -2305,13 +2388,13 @@ static int pix_abs16x16_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int pix_abs16x16_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; uint8_t *pix3 = pix2 + line_size; s = 0; - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - avg2(pix2[0], pix3[0])); s += abs(pix1[1] - avg2(pix2[1], pix3[1])); s += abs(pix1[2] - avg2(pix2[2], pix3[2])); @@ -2335,13 +2418,13 @@ static int pix_abs16x16_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int pix_abs16x16_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; uint8_t *pix3 = pix2 + line_size; s = 0; - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1])); s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2])); s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3])); @@ -2365,12 +2448,12 @@ static int pix_abs16x16_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static inline int pix_abs8x8_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; s = 0; - for(i=0;i<8;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - pix2[0]); s += abs(pix1[1] - pix2[1]); s += abs(pix1[2] - pix2[2]); @@ -2385,12 +2468,12 @@ static inline int pix_abs8x8_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int pix_abs8x8_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; s = 0; - for(i=0;i<8;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - avg2(pix2[0], pix2[1])); s += abs(pix1[1] - avg2(pix2[1], pix2[2])); s += abs(pix1[2] - avg2(pix2[2], pix2[3])); @@ -2405,13 +2488,13 @@ static int pix_abs8x8_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int pix_abs8x8_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; uint8_t *pix3 = pix2 + line_size; s = 0; - for(i=0;i<8;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - avg2(pix2[0], pix3[0])); s += abs(pix1[1] - avg2(pix2[1], pix3[1])); s += abs(pix1[2] - avg2(pix2[2], pix3[2])); @@ -2427,13 +2510,13 @@ static int pix_abs8x8_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int pix_abs8x8_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size) +static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; uint8_t *pix3 = pix2 + line_size; s = 0; - for(i=0;i<8;i++) { + for(i=0;i<h;i++) { s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1])); s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2])); s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3])); @@ -2449,14 +2532,6 @@ static int pix_abs8x8_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -static int sad16x16_c(void *s, uint8_t *a, uint8_t *b, int stride){ - return pix_abs16x16_c(a,b,stride); -} - -static int sad8x8_c(void *s, uint8_t *a, uint8_t *b, int stride){ - return pix_abs8x8_c(a,b,stride); -} - /** * permutes an 8x8 block. * @param block the block which will be permuted according to the given permutation vector @@ -2486,6 +2561,53 @@ void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scant } } +static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride, int h){ + return 0; +} + +void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){ + int i; + + memset(cmp, 0, sizeof(void*)*5); + + for(i=0; i<5; i++){ + switch(type&0xFF){ + case FF_CMP_SAD: + cmp[i]= c->sad[i]; + break; + case FF_CMP_SATD: + cmp[i]= c->hadamard8_diff[i]; + break; + case FF_CMP_SSE: + cmp[i]= c->sse[i]; + break; + case FF_CMP_DCT: + cmp[i]= c->dct_sad[i]; + break; + case FF_CMP_PSNR: + cmp[i]= c->quant_psnr[i]; + break; + case FF_CMP_BIT: + cmp[i]= c->bit[i]; + break; + case FF_CMP_RD: + cmp[i]= c->rd[i]; + break; + case FF_CMP_VSAD: + cmp[i]= c->vsad[i]; + break; + case FF_CMP_VSSE: + cmp[i]= c->vsse[i]; + break; + case FF_CMP_ZERO: + cmp[i]= zero_cmp; + break; + default: + av_log(NULL, AV_LOG_ERROR,"internal error in cmp function selection\n"); + } + } +} + /** * memset(blocks, 0, sizeof(DCTELEM)*6*64) */ @@ -2559,10 +2681,12 @@ o2= (i1)-(i2); #define BUTTERFLYA(x,y) (ABS((x)+(y)) + ABS((x)-(y))) -static int hadamard8_diff_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride){ +static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){ int i; int temp[64]; int sum=0; + + assert(h==8); for(i=0; i<8; i++){ //FIXME try pointer walks @@ -2609,17 +2733,19 @@ if(sum>maxi){ return sum; } -static int hadamard8_abs_c(uint8_t *src, int stride, int mean){ +static int hadamard8_intra8x8_c(/*MpegEncContext*/ void *s, uint8_t *src, uint8_t *dummy, int stride, int h){ int i; int temp[64]; int sum=0; -//FIXME OOOPS ignore 0 term instead of mean mess + + assert(h==8); + for(i=0; i<8; i++){ //FIXME try pointer walks - BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0]-mean,src[stride*i+1]-mean); - BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2]-mean,src[stride*i+3]-mean); - BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4]-mean,src[stride*i+5]-mean); - BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6]-mean,src[stride*i+7]-mean); + BUTTERFLY2(temp[8*i+0], temp[8*i+1], src[stride*i+0],src[stride*i+1]); + BUTTERFLY2(temp[8*i+2], temp[8*i+3], src[stride*i+2],src[stride*i+3]); + BUTTERFLY2(temp[8*i+4], temp[8*i+5], src[stride*i+4],src[stride*i+5]); + BUTTERFLY2(temp[8*i+6], temp[8*i+7], src[stride*i+6],src[stride*i+7]); BUTTERFLY1(temp[8*i+0], temp[8*i+2]); BUTTERFLY1(temp[8*i+1], temp[8*i+3]); @@ -2650,14 +2776,18 @@ static int hadamard8_abs_c(uint8_t *src, int stride, int mean){ +BUTTERFLYA(temp[8*3+i], temp[8*7+i]); } + sum -= ABS(temp[8*0] + temp[8*4]); // -mean + return sum; } -static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){ +static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){ MpegEncContext * const s= (MpegEncContext *)c; uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64/8]; DCTELEM * const temp= (DCTELEM*)aligned_temp; int sum=0, i; + + assert(h==8); s->dsp.diff_pixels(temp, src1, src2, stride); s->dsp.fdct(temp); @@ -2670,13 +2800,14 @@ static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2 void simple_idct(DCTELEM *block); //FIXME -static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){ +static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){ MpegEncContext * const s= (MpegEncContext *)c; uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64*2/8]; DCTELEM * const temp= (DCTELEM*)aligned_temp; DCTELEM * const bak = ((DCTELEM*)aligned_temp)+64; int sum=0, i; + assert(h==8); s->mb_intra=0; s->dsp.diff_pixels(temp, src1, src2, stride); @@ -2684,7 +2815,7 @@ static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *s memcpy(bak, temp, 64*sizeof(DCTELEM)); s->block_last_index[0/*FIXME*/]= s->fast_dct_quantize(s, temp, 0/*FIXME*/, s->qscale, &i); - s->dct_unquantize(s, temp, 0, s->qscale); + s->dct_unquantize_inter(s, temp, 0, s->qscale); simple_idct(temp); //FIXME for(i=0; i<64; i++) @@ -2693,7 +2824,7 @@ static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *s return sum; } -static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){ +static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){ MpegEncContext * const s= (MpegEncContext *)c; const uint8_t *scantable= s->intra_scantable.permutated; uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64/8]; @@ -2705,6 +2836,8 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int uint8_t * length; uint8_t * last_length; + assert(h==8); + for(i=0; i<8; i++){ ((uint32_t*)(bak + i*stride))[0]= ((uint32_t*)(src2 + i*stride))[0]; ((uint32_t*)(bak + i*stride))[1]= ((uint32_t*)(src2 + i*stride))[1]; @@ -2757,17 +2890,20 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int } if(last>=0){ - s->dct_unquantize(s, temp, 0, s->qscale); + if(s->mb_intra) + s->dct_unquantize_intra(s, temp, 0, s->qscale); + else + s->dct_unquantize_inter(s, temp, 0, s->qscale); } s->dsp.idct_add(bak, stride, temp); - distoration= s->dsp.sse[1](NULL, bak, src1, stride); + distoration= s->dsp.sse[1](NULL, bak, src1, stride, 8); return distoration + ((bits*s->qscale*s->qscale*109 + 64)>>7); } -static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){ +static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){ MpegEncContext * const s= (MpegEncContext *)c; const uint8_t *scantable= s->intra_scantable.permutated; uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64/8]; @@ -2776,6 +2912,8 @@ static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, in const int esc_length= s->ac_esc_length; uint8_t * length; uint8_t * last_length; + + assert(h==8); s->dsp.diff_pixels(temp, src1, src2, stride); @@ -2825,12 +2963,73 @@ static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, in return bits; } +static int vsad_intra16_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ + int score=0; + int x,y; + + for(y=1; y<h; y++){ + for(x=0; x<16; x+=4){ + score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride]) + +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]); + } + s+= stride; + } + + return score; +} -WARPER88_1616(hadamard8_diff_c, hadamard8_diff16_c) -WARPER88_1616(dct_sad8x8_c, dct_sad16x16_c) -WARPER88_1616(quant_psnr8x8_c, quant_psnr16x16_c) -WARPER88_1616(rd8x8_c, rd16x16_c) -WARPER88_1616(bit8x8_c, bit16x16_c) +static int vsad16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){ + int score=0; + int x,y; + + for(y=1; y<h; y++){ + for(x=0; x<16; x++){ + score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]); + } + s1+= stride; + s2+= stride; + } + + return score; +} + +#define SQ(a) ((a)*(a)) +static int vsse_intra16_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy, int stride, int h){ + int score=0; + int x,y; + + for(y=1; y<h; y++){ + for(x=0; x<16; x+=4){ + score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) + +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); + } + s+= stride; + } + + return score; +} + +static int vsse16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int stride, int h){ + int score=0; + int x,y; + + for(y=1; y<h; y++){ + for(x=0; x<16; x++){ + score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]); + } + s1+= stride; + s2+= stride; + } + + return score; +} + +WARPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c) +WARPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c) +WARPER8_16_SQ(dct_sad8x8_c, dct_sad16_c) +WARPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c) +WARPER8_16_SQ(rd8x8_c, rd16_c) +WARPER8_16_SQ(bit8x8_c, bit16_c) /* XXX: those functions should be suppressed ASAP when all IDCTs are converted */ @@ -2869,10 +3068,18 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) int i; #ifdef CONFIG_ENCODERS - if(avctx->dct_algo==FF_DCT_FASTINT) + if(avctx->dct_algo==FF_DCT_FASTINT) { c->fdct = fdct_ifast; - else + c->fdct248 = fdct_ifast248; + } + else if(avctx->dct_algo==FF_DCT_FAAN) { + c->fdct = ff_faandct; + c->fdct248 = ff_faandct248; + } + else { c->fdct = ff_jpeg_fdct_islow; //slow/accurate/default + c->fdct248 = ff_fdct248_islow; + } #endif //CONFIG_ENCODERS if(avctx->idct_algo==FF_IDCT_INT){ @@ -2896,18 +3103,16 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->clear_blocks = clear_blocks_c; c->pix_sum = pix_sum_c; c->pix_norm1 = pix_norm1_c; - c->sse[0]= sse16_c; - c->sse[1]= sse8_c; /* TODO [0] 16 [1] 8 */ - c->pix_abs16x16 = pix_abs16x16_c; - c->pix_abs16x16_x2 = pix_abs16x16_x2_c; - c->pix_abs16x16_y2 = pix_abs16x16_y2_c; - c->pix_abs16x16_xy2 = pix_abs16x16_xy2_c; - c->pix_abs8x8 = pix_abs8x8_c; - c->pix_abs8x8_x2 = pix_abs8x8_x2_c; - c->pix_abs8x8_y2 = pix_abs8x8_y2_c; - c->pix_abs8x8_xy2 = pix_abs8x8_xy2_c; + c->pix_abs[0][0] = pix_abs16_c; + c->pix_abs[0][1] = pix_abs16_x2_c; + c->pix_abs[0][2] = pix_abs16_y2_c; + c->pix_abs[0][3] = pix_abs16_xy2_c; + c->pix_abs[1][0] = pix_abs8_c; + c->pix_abs[1][1] = pix_abs8_x2_c; + c->pix_abs[1][2] = pix_abs8_y2_c; + c->pix_abs[1][3] = pix_abs8_xy2_c; #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## NUM ## _c; \ @@ -3004,29 +3209,32 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c; c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c; - c->hadamard8_diff[0]= hadamard8_diff16_c; - c->hadamard8_diff[1]= hadamard8_diff_c; - c->hadamard8_abs = hadamard8_abs_c; - - c->dct_sad[0]= dct_sad16x16_c; - c->dct_sad[1]= dct_sad8x8_c; +#define SET_CMP_FUNC(name) \ + c->name[0]= name ## 16_c;\ + c->name[1]= name ## 8x8_c; - c->sad[0]= sad16x16_c; - c->sad[1]= sad8x8_c; - - c->quant_psnr[0]= quant_psnr16x16_c; - c->quant_psnr[1]= quant_psnr8x8_c; - - c->rd[0]= rd16x16_c; - c->rd[1]= rd8x8_c; - - c->bit[0]= bit16x16_c; - c->bit[1]= bit8x8_c; + SET_CMP_FUNC(hadamard8_diff) + c->hadamard8_diff[4]= hadamard8_intra16_c; + SET_CMP_FUNC(dct_sad) + c->sad[0]= pix_abs16_c; + c->sad[1]= pix_abs8_c; + c->sse[0]= sse16_c; + c->sse[1]= sse8_c; + SET_CMP_FUNC(quant_psnr) + SET_CMP_FUNC(rd) + SET_CMP_FUNC(bit) + c->vsad[0]= vsad16_c; + c->vsad[4]= vsad_intra16_c; + c->vsse[0]= vsse16_c; + c->vsse[4]= vsse_intra16_c; c->add_bytes= add_bytes_c; c->diff_bytes= diff_bytes_c; c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c; c->bswap_buf= bswap_buf; + + c->h263_h_loop_filter= h263_h_loop_filter_c; + c->h263_v_loop_filter= h263_v_loop_filter_c; #ifdef HAVE_MMX dsputil_init_mmx(c, avctx); @@ -3068,7 +3276,7 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx) c->idct_permutation[i]= ((i&7)<<3) | (i>>3); break; default: - fprintf(stderr, "Internal error, IDCT permutation not set\n"); + av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n"); } } diff --git a/src/libffmpeg/libavcodec/dsputil.h b/src/libffmpeg/libavcodec/dsputil.h index 6126c78b5..d8346d509 100644 --- a/src/libffmpeg/libavcodec/dsputil.h +++ b/src/libffmpeg/libavcodec/dsputil.h @@ -1,6 +1,7 @@ /* * DSP utils * Copyright (c) 2000, 2001, 2002 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -29,28 +30,31 @@ #include "common.h" #include "avcodec.h" -#include "xineutils.h" #if defined(ARCH_X86) -#define HAVE_MMX 1 +#define HAVE_MMX 1 #endif -#undef DEBUG - +//#define DEBUG /* dct code */ typedef short DCTELEM; void fdct_ifast (DCTELEM *data); +void fdct_ifast248 (DCTELEM *data); void ff_jpeg_fdct_islow (DCTELEM *data); +void ff_fdct248_islow (DCTELEM *data); void j_rev_dct (DCTELEM *data); void ff_fdct_mmx(DCTELEM *block); +void ff_fdct_mmx2(DCTELEM *block); +void ff_fdct_sse2(DCTELEM *block); /* encoding scans */ extern const uint8_t ff_alternate_horizontal_scan[64]; extern const uint8_t ff_alternate_vertical_scan[64]; extern const uint8_t ff_zigzag_direct[64]; +extern const uint8_t ff_zigzag248_direct[64]; /* pixel operations */ #define MAX_NEG_CROP 384 @@ -80,6 +84,7 @@ void clear_blocks_c(DCTELEM *blocks); /* add and put pixel (decoding) */ // blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16 +//h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4 typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h); typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h); typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); @@ -110,10 +115,9 @@ static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\ } /* motion estimation */ - -typedef int (*op_pixels_abs_func)(uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size)/* __attribute__ ((const))*/; - -typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size)/* __attribute__ ((const))*/; +// h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2 +// allthough currently h<4 is not used as functions with width <8 are not used and neither implemented +typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/; /** @@ -137,25 +141,28 @@ typedef struct DSPContext { void (*clear_blocks)(DCTELEM *blocks/*align 16*/); int (*pix_sum)(uint8_t * pix, int line_size); int (*pix_norm1)(uint8_t * pix, int line_size); - me_cmp_func sad[2]; /* identical to pix_absAxA except additional void * */ - me_cmp_func sse[2]; - me_cmp_func hadamard8_diff[2]; - me_cmp_func dct_sad[2]; - me_cmp_func quant_psnr[2]; - me_cmp_func bit[2]; - me_cmp_func rd[2]; - int (*hadamard8_abs )(uint8_t *src, int stride, int mean); - - me_cmp_func me_pre_cmp[11]; - me_cmp_func me_cmp[11]; - me_cmp_func me_sub_cmp[11]; - me_cmp_func mb_cmp[11]; - - /* maybe create an array for 16/8/4/2 functions */ +// 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4 + + me_cmp_func sad[5]; /* identical to pix_absAxA except additional void * */ + me_cmp_func sse[5]; + me_cmp_func hadamard8_diff[5]; + me_cmp_func dct_sad[5]; + me_cmp_func quant_psnr[5]; + me_cmp_func bit[5]; + me_cmp_func rd[5]; + me_cmp_func vsad[5]; + me_cmp_func vsse[5]; + + me_cmp_func me_pre_cmp[5]; + me_cmp_func me_cmp[5]; + me_cmp_func me_sub_cmp[5]; + me_cmp_func mb_cmp[5]; + me_cmp_func ildct_cmp[5]; //only width 16 used + /** * Halfpel motion compensation with rounding (a+b+1)>>1. * this is an array[4][4] of motion compensation funcions for 4 - * horizontal blocksizes (2,4,8,16) and the 4 halfpel positions<br> + * horizontal blocksizes (8,16) and the 4 halfpel positions<br> * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination where the result is stored * @param pixels source @@ -167,7 +174,7 @@ typedef struct DSPContext { /** * Halfpel motion compensation with rounding (a+b+1)>>1. * This is an array[4][4] of motion compensation functions for 4 - * horizontal blocksizes (2,4,8,16) and the 4 halfpel positions<br> + * horizontal blocksizes (8,16) and the 4 halfpel positions<br> * *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination into which the result is averaged (a+b+1)>>1 * @param pixels source @@ -227,14 +234,7 @@ typedef struct DSPContext { qpel_mc_func put_h264_qpel_pixels_tab[3][16]; qpel_mc_func avg_h264_qpel_pixels_tab[3][16]; - op_pixels_abs_func pix_abs16x16; - op_pixels_abs_func pix_abs16x16_x2; - op_pixels_abs_func pix_abs16x16_y2; - op_pixels_abs_func pix_abs16x16_xy2; - op_pixels_abs_func pix_abs8x8; - op_pixels_abs_func pix_abs8x8_x2; - op_pixels_abs_func pix_abs8x8_y2; - op_pixels_abs_func pix_abs8x8_xy2; + me_cmp_func pix_abs[2][4]; /* huffyuv specific */ void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w); @@ -246,8 +246,12 @@ typedef struct DSPContext { void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top); void (*bswap_buf)(uint32_t *dst, uint32_t *src, int w); + void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale); + void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale); + /* (I)DCT */ void (*fdct)(DCTELEM *block/* align 16*/); + void (*fdct248)(DCTELEM *block/* align 16*/); /* IDCT really*/ void (*idct)(DCTELEM *block/* align 16*/); @@ -295,6 +299,8 @@ void dsputil_init(DSPContext* p, AVCodecContext *avctx); */ void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last); +void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type); + #define BYTE_VEC32(c) ((c)*0x01010101UL) static inline uint32_t rnd_avg32(uint32_t a, uint32_t b) @@ -322,25 +328,21 @@ int mm_support(void); #undef emms_c -#if 0 #define MM_MMX 0x0001 /* standard MMX */ #define MM_3DNOW 0x0004 /* AMD 3DNOW */ #define MM_MMXEXT 0x0002 /* SSE integer functions or AMD MMX ext */ #define MM_SSE 0x0008 /* SSE functions */ #define MM_SSE2 0x0010 /* PIV SSE2 functions */ -#endif extern int mm_flags; void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); -#if 0 static inline void emms(void) { __asm __volatile ("emms;":::"memory"); } -#endif #define emms_c() \ @@ -382,7 +384,9 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx); extern int mm_flags; #if defined(HAVE_ALTIVEC) && !defined(CONFIG_DARWIN) +#define pixel altivec_pixel #include <altivec.h> +#undef pixel #endif #define __align8 __attribute__ ((aligned (16))) @@ -483,12 +487,24 @@ void ff_mdct_calc(MDCTContext *s, FFTSample *out, const FFTSample *input, FFTSample *tmp); void ff_mdct_end(MDCTContext *s); -#define WARPER88_1616(name8, name16)\ -static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride){\ - return name8(s, dst , src , stride)\ - +name8(s, dst+8 , src+8 , stride)\ - +name8(s, dst +8*stride, src +8*stride, stride)\ - +name8(s, dst+8+8*stride, src+8+8*stride, stride);\ +#define WARPER8_16(name8, name16)\ +static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\ + return name8(s, dst , src , stride, h)\ + +name8(s, dst+8 , src+8 , stride, h);\ +} + +#define WARPER8_16_SQ(name8, name16)\ +static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\ + int score=0;\ + score +=name8(s, dst , src , stride, 8);\ + score +=name8(s, dst+8 , src+8 , stride, 8);\ + if(h==16){\ + dst += 8*stride;\ + src += 8*stride;\ + score +=name8(s, dst , src , stride, 8);\ + score +=name8(s, dst+8 , src+8 , stride, 8);\ + }\ + return score;\ } #ifndef HAVE_LRINTF @@ -506,10 +522,4 @@ static inline long int lrintf(float x) } #endif -#if defined(CONFIG_OS2) || defined(CONFIG_SUNOS) -static inline float floorf(float f) { - return floor(f); -} -#endif - #endif diff --git a/src/libffmpeg/libavcodec/dv.c b/src/libffmpeg/libavcodec/dv.c index c969e937b..8e041b503 100644 --- a/src/libffmpeg/libavcodec/dv.c +++ b/src/libffmpeg/libavcodec/dv.c @@ -35,45 +35,56 @@ typedef struct DVVideoDecodeContext { const DVprofile* sys; - GetBitContext gb; AVFrame picture; - DCTELEM block[5*6][64] __align8; - /* FIXME: the following is extracted from DSP */ uint8_t dv_zigzag[2][64]; - uint8_t idct_permutation[64]; + uint8_t dv_idct_shift[2][22][64]; + void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size); - void (*fdct)(DCTELEM *block); - - /* XXX: move it to static storage ? */ - uint8_t dv_shift[2][22][64]; + void (*fdct[2])(DCTELEM *block); void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block); + + GetBitContext gb; + DCTELEM block[5*6][64] __align8; } DVVideoDecodeContext; #define TEX_VLC_BITS 9 + +#ifdef DV_CODEC_TINY_TARGET +#define DV_VLC_MAP_RUN_SIZE 15 +#define DV_VLC_MAP_LEV_SIZE 23 +#else +#define DV_VLC_MAP_RUN_SIZE 64 +#define DV_VLC_MAP_LEV_SIZE 512 +#endif + /* XXX: also include quantization */ static RL_VLC_ELEM *dv_rl_vlc[1]; -static VLC_TYPE dv_vlc_codes[15][23]; +/* VLC encoding lookup table */ +static struct dv_vlc_pair { + uint32_t vlc; + uint8_t size; +} (*dv_vlc_map)[DV_VLC_MAP_LEV_SIZE] = NULL; -static void dv_build_unquantize_tables(DVVideoDecodeContext *s) +static void dv_build_unquantize_tables(DVVideoDecodeContext *s, uint8_t* perm) { int i, q, j; /* NOTE: max left shift is 6 */ for(q = 0; q < 22; q++) { - /* 88 unquant */ + /* 88DCT */ for(i = 1; i < 64; i++) { /* 88 table */ - j = s->idct_permutation[i]; - s->dv_shift[0][q][j] = + j = perm[i]; + s->dv_idct_shift[0][q][j] = dv_quant_shifts[q][dv_88_areas[i]] + 1; } - /* 248 unquant */ + /* 248DCT */ for(i = 1; i < 64; i++) { /* 248 table */ - s->dv_shift[1][q][i] = - dv_quant_shifts[q][dv_248_areas[i]] + 1; + s->dv_idct_shift[1][q][i] = + dv_quant_shifts[q][dv_248_areas[i]] + 1; } } } @@ -81,8 +92,9 @@ static void dv_build_unquantize_tables(DVVideoDecodeContext *s) static int dvvideo_init(AVCodecContext *avctx) { DVVideoDecodeContext *s = avctx->priv_data; - MpegEncContext s2; + DSPContext dsp; static int done=0; + int i, j; if (!done) { int i; @@ -90,12 +102,20 @@ static int dvvideo_init(AVCodecContext *avctx) done = 1; + dv_vlc_map = av_mallocz(DV_VLC_MAP_LEV_SIZE*DV_VLC_MAP_RUN_SIZE*sizeof(struct dv_vlc_pair)); + if (!dv_vlc_map) + return -ENOMEM; + /* NOTE: as a trick, we use the fact the no codes are unused to accelerate the parsing of partial codes */ init_vlc(&dv_vlc, TEX_VLC_BITS, NB_DV_VLC, dv_vlc_len, 1, 1, dv_vlc_bits, 2, 2); dv_rl_vlc[0] = av_malloc(dv_vlc.table_size * sizeof(RL_VLC_ELEM)); + if (!dv_rl_vlc[0]) { + av_free(dv_vlc_map); + return -ENOMEM; + } for(i = 0; i < dv_vlc.table_size; i++){ int code= dv_vlc.table[i][0]; int len = dv_vlc.table[i][1]; @@ -117,34 +137,62 @@ static int dvvideo_init(AVCodecContext *avctx) dv_rl_vlc[0][i].run = run; } - memset(dv_vlc_codes, 0xff, sizeof(dv_vlc_codes)); for (i = 0; i < NB_DV_VLC - 1; i++) { - if (dv_vlc_run[i] < 15 && dv_vlc_level[i] < 23 && dv_vlc_len[i] < 15) - dv_vlc_codes[dv_vlc_run[i]][dv_vlc_level[i]] = i; + if (dv_vlc_run[i] >= DV_VLC_MAP_RUN_SIZE || dv_vlc_level[i] >= DV_VLC_MAP_LEV_SIZE) + continue; + + if (dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].size != 0) + continue; + + dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].vlc = dv_vlc_bits[i] << + (!!dv_vlc_level[i]); + dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].size = dv_vlc_len[i] + + (!!dv_vlc_level[i]); + } + for (i = 0; i < DV_VLC_MAP_RUN_SIZE; i++) { +#ifdef DV_CODEC_TINY_TARGET + for (j = 1; j < DV_VLC_MAP_LEV_SIZE; j++) { + if (dv_vlc_map[i][j].size == 0) { + dv_vlc_map[i][j].vlc = dv_vlc_map[0][j].vlc | + (dv_vlc_map[i-1][0].vlc << (dv_vlc_map[0][j].size)); + dv_vlc_map[i][j].size = dv_vlc_map[i-1][0].size + + dv_vlc_map[0][j].size; + } + } +#else + for (j = 1; j < DV_VLC_MAP_LEV_SIZE/2; j++) { + if (dv_vlc_map[i][j].size == 0) { + dv_vlc_map[i][j].vlc = dv_vlc_map[0][j].vlc | + (dv_vlc_map[i-1][0].vlc << (dv_vlc_map[0][j].size)); + dv_vlc_map[i][j].size = dv_vlc_map[i-1][0].size + + dv_vlc_map[0][j].size; + } + dv_vlc_map[i][((uint16_t)(-j))&0x1ff].vlc = + dv_vlc_map[i][j].vlc | 1; + dv_vlc_map[i][((uint16_t)(-j))&0x1ff].size = + dv_vlc_map[i][j].size; + } +#endif } } - /* ugly way to get the idct & scantable */ - /* XXX: fix it */ - memset(&s2, 0, sizeof(MpegEncContext)); - s2.avctx = avctx; - dsputil_init(&s2.dsp, avctx); - if (DCT_common_init(&s2) < 0) - return -1; + /* Generic DSP setup */ + dsputil_init(&dsp, avctx); + s->get_pixels = dsp.get_pixels; - s->get_pixels = s2.dsp.get_pixels; - s->fdct = s2.dsp.fdct; - - s->idct_put[0] = s2.dsp.idct_put; - memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64); - memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64); + /* 88DCT setup */ + s->fdct[0] = dsp.fdct; + s->idct_put[0] = dsp.idct_put; + for (i=0; i<64; i++) + s->dv_zigzag[0][i] = dsp.idct_permutation[ff_zigzag_direct[i]]; - /* XXX: use MMX also for idct248 */ - s->idct_put[1] = simple_idct248_put; - memcpy(s->dv_zigzag[1], dv_248_zigzag, 64); + /* 248DCT setup */ + s->fdct[1] = dsp.fdct248; + s->idct_put[1] = simple_idct248_put; // FIXME: need to add it to DSP + memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); /* XXX: do it only for constant case */ - dv_build_unquantize_tables(s); + dv_build_unquantize_tables(s, dsp.idct_permutation); /* FIXME: I really don't think this should be here */ if (dv_codec_profile(avctx)) @@ -171,6 +219,10 @@ typedef struct BlockInfo { static const uint16_t block_sizes[6] = { 112, 112, 112, 112, 80, 80 }; +/* bit budget for AC only in 5 MBs */ +static const int vs_total_ac_bits = (100 * 4 + 68*2) * 5; +/* see dv_88_areas and dv_248_areas for details */ +static const int mb_area_start[5] = { 1, 6, 21, 43, 64 }; #ifndef ALT_BITSTREAM_READER #warning only works with ALT_BITSTREAM_READER @@ -297,7 +349,7 @@ static void dv_decode_ac(DVVideoDecodeContext *s, if (pos >= 64) { read_error: #if defined(VLC_DEBUG) || 1 - fprintf(stderr, "error pos=%d\n", pos); + av_log(NULL, AV_LOG_ERROR, "error pos=%d\n", pos); #endif /* for errors, we consider the eob is reached */ mb->eob_reached = 1; @@ -367,7 +419,7 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s, mb->scan_table = s->dv_zigzag[dct_mode]; class1 = get_bits(&s->gb, 2); mb->shift_offset = (class1 == 3); - mb->shift_table = s->dv_shift[dct_mode] + mb->shift_table = s->dv_idct_shift[dct_mode] [quant + dv_quant_offset[class1]]; dc = dc << 2; /* convert to unsigned because 128 is not added in the @@ -517,8 +569,9 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s, } } +#ifdef DV_CODEC_TINY_TARGET /* Converts run and level (where level != 0) pair into vlc, returning bit size */ -static inline int dv_rl2vlc(int run, int l, uint32_t* vlc) +static always_inline int dv_rl2vlc(int run, int l, uint32_t* vlc) { int sign = l >> 8; int level = (l ^ sign) - sign; @@ -526,148 +579,222 @@ static inline int dv_rl2vlc(int run, int l, uint32_t* vlc) sign = (sign & 1); - if (run < 15 && level < 23 && dv_vlc_codes[run][level] != -1) { - *vlc = (dv_vlc_bits[dv_vlc_codes[run][level]] << 1) | sign; - size = dv_vlc_len[dv_vlc_codes[run][level]] + 1; + if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) { + *vlc = dv_vlc_map[run][level].vlc | sign; + size = dv_vlc_map[run][level].size; } else { - if (level < 23) { - *vlc = (dv_vlc_bits[dv_vlc_codes[0][level]] << 1) | sign; - size = dv_vlc_len[dv_vlc_codes[0][level]] + 1; + if (level < DV_VLC_MAP_LEV_SIZE) { + *vlc = dv_vlc_map[0][level].vlc | sign; + size = dv_vlc_map[0][level].size; } else { - *vlc = 0xfe00 | (level << 1) | sign; + *vlc = 0xfe00 | (level << 1) | sign; size = 16; } - - switch(run) { - case 0: - break; - case 1: - case 2: - *vlc |= ((0x7ce | (run - 1)) << size); - size += 11; - break; - case 3: - case 4: - case 5: - case 6: - *vlc |= ((0xfac | (run - 3)) << size); - size += 12; - break; - default: - *vlc |= ((0x1f80 | (run - 1)) << size); - size += 13; - break; + if (run) { + *vlc |= ((run < 16) ? dv_vlc_map[run-1][0].vlc : + (0x1f80 | (run - 1))) << size; + size += (run < 16) ? dv_vlc_map[run-1][0].size : 13; } } return size; } +static always_inline int dv_rl2vlc_size(int run, int l) +{ + int level = (l ^ (l >> 8)) - (l >> 8); + int size; + + if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) { + size = dv_vlc_map[run][level].size; + } + else { + size = (level < DV_VLC_MAP_LEV_SIZE) ? dv_vlc_map[0][level].size : 16; + if (run) { + size += (run < 16) ? dv_vlc_map[run-1][0].size : 13; + } + } + return size; +} +#else +static always_inline int dv_rl2vlc(int run, int l, uint32_t* vlc) +{ + *vlc = dv_vlc_map[run][((uint16_t)l)&0x1ff].vlc; + return dv_vlc_map[run][((uint16_t)l)&0x1ff].size; +} + +static always_inline int dv_rl2vlc_size(int run, int l) +{ + return dv_vlc_map[run][((uint16_t)l)&0x1ff].size; +} +#endif + typedef struct EncBlockInfo { - int qno; + int area_q[4]; + int bit_size[4]; + int prev_run[4]; + int cur_ac; int cno; int dct_mode; - int block_size; DCTELEM *mb; - PutBitContext pb; + uint8_t partial_bit_count; + uint32_t partial_bit_buffer; /* we can't use uint16_t here */ } EncBlockInfo; -static inline int dv_bits_left(EncBlockInfo* bi) +static always_inline int dv_bits_left(PutBitContext* s) { - return (bi->block_size - get_bit_count(&bi->pb)); + return (s->buf_end - s->buf) * 8 - + ((s->buf_ptr - s->buf) * 8 + 32 - (int64_t)s->bit_left); } -static inline void dv_encode_ac(EncBlockInfo* bi, PutBitContext* heap) +static always_inline void dv_encode_ac(EncBlockInfo* bi, PutBitContext* pb_pool, + int pb_size) { - int i, level, size, run = 0; - uint32_t vlc; - PutBitContext* cpb = &bi->pb; + int run; + int bits_left; + PutBitContext* pb = pb_pool; + int size = bi->partial_bit_count; + uint32_t vlc = bi->partial_bit_buffer; - for (i=1; i<64; i++) { - level = bi->mb[ff_zigzag_direct[i]] / - (1<<(dv_quant_shifts[bi->qno + dv_quant_offset[bi->cno]] - [dv_88_areas[ff_zigzag_direct[i]]] + 4 + (bi->cno == 3))); - if (level != 0) { - size = dv_rl2vlc(run, level, &vlc); -put_vlc: - -#ifdef VLC_DEBUG - printf(" %3d:%3d", run, level); -#endif - if (cpb == &bi->pb && size > dv_bits_left(bi)) { - size -= dv_bits_left(bi); - put_bits(cpb, dv_bits_left(bi), vlc >> size); - vlc = vlc & ((1<<size)-1); - cpb = heap; + bi->partial_bit_count = bi->partial_bit_buffer = 0; +vlc_loop: + /* Find suitable storage space */ + for (; size > (bits_left = dv_bits_left(pb)); pb++) { + if (bits_left) { + size -= bits_left; + put_bits(pb, bits_left, vlc >> size); + vlc = vlc & ((1<<size)-1); + } + if (pb_size == 1) { + bi->partial_bit_count = size; + bi->partial_bit_buffer = vlc; + return; + } + --pb_size; + } + + /* Store VLC */ + put_bits(pb, size, vlc); + + /* Construct the next VLC */ + run = 0; + for (; bi->cur_ac < 64; bi->cur_ac++, run++) { + if (bi->mb[bi->cur_ac]) { + size = dv_rl2vlc(run, bi->mb[bi->cur_ac], &vlc); + bi->cur_ac++; + goto vlc_loop; } - put_bits(cpb, size, vlc); - run = 0; - } else - run++; - } + } - if (i == 64) { - size = 4; vlc = 6; /* End Of Block stamp */ - goto put_vlc; - } + if (bi->cur_ac == 64) { + size = 4; vlc = 6; /* End Of Block stamp */ + bi->cur_ac++; + goto vlc_loop; + } } -static inline void dv_redistr_bits(EncBlockInfo* bi, int count, uint8_t* extra_data, int extra_bits, PutBitContext* heap) +static always_inline void dv_set_class_number(DCTELEM* blk, EncBlockInfo* bi, + const uint8_t* zigzag_scan, int bias) { - int i; - GetBitContext gb; - - init_get_bits(&gb, extra_data, extra_bits); + int i, area; + int run; + int classes[] = {12, 24, 36, 0xffff}; + + run = 0; + bi->mb[0] = blk[0]; + bi->cno = 0; + for (area = 0; area < 4; area++) { + bi->prev_run[area] = run; + bi->bit_size[area] = 0; + for (i=mb_area_start[area]; i<mb_area_start[area+1]; i++) { + bi->mb[i] = (blk[zigzag_scan[i]] / 16); + while ((bi->mb[i] ^ (bi->mb[i] >> 8)) > classes[bi->cno]) + bi->cno++; + + if (bi->mb[i]) { + bi->bit_size[area] += dv_rl2vlc_size(run, bi->mb[i]); + run = 0; + } else + ++run; + } + } + bi->bit_size[3] += 4; /* EOB marker */ + bi->cno += bias; - for (i=0; i<count; i++) { - int bits_left = dv_bits_left(bi); -#ifdef VLC_DEBUG - if (bits_left) - printf("------------> inserting %d bytes in %d:%d\n", bits_left, i/6, i%6); -#endif - if (bits_left > extra_bits) { - bit_copy(&bi->pb, &gb, extra_bits); - extra_bits = 0; - break; - } else - bit_copy(&bi->pb, &gb, bits_left); - - extra_bits -= bits_left; - bi++; + if (bi->cno >= 3) { /* FIXME: we have to recreate bit_size[], prev_run[] */ + bi->cno = 3; + for (i=1; i<64; i++) + bi->mb[i] /= 2; } +} + +#define SC(x, y) ((s[x] - s[y]) ^ ((s[x] - s[y]) >> 7)) +static always_inline int dv_guess_dct_mode(DCTELEM *blk) { + DCTELEM *s; + int score88 = 0; + int score248 = 0; + int i; - if (extra_bits > 0 && heap) - bit_copy(heap, &gb, extra_bits); + /* Compute 8-8 score (small values give a better chance for 8-8 DCT) */ + s = blk; + for(i=0; i<7; i++) { + score88 += SC(0, 8) + SC(1, 9) + SC(2, 10) + SC(3, 11) + + SC(4, 12) + SC(5,13) + SC(6, 14) + SC(7, 15); + s += 8; + } + /* Compute 2-4-8 score (small values give a better chance for 2-4-8 DCT) */ + s = blk; + for(i=0; i<6; i++) { + score248 += SC(0, 16) + SC(1,17) + SC(2, 18) + SC(3, 19) + + SC(4, 20) + SC(5,21) + SC(6, 22) + SC(7, 23); + s += 8; + } + + return (score88 - score248 > -10); } -static inline void dv_set_class_number(EncBlockInfo* bi, int j) +static inline void dv_guess_qnos(EncBlockInfo* blks, int* qnos) { - int i, max_ac = 0; - - for (i=1; i<64; i++) { - int ac = abs(bi->mb[ff_zigzag_direct[i]]) / 4; - if (max_ac < ac) - max_ac = ac; - } - if (max_ac < 12) - bi->cno = j; - else if (max_ac < 24) - bi->cno = j + 1; - else if (max_ac < 36) - bi->cno = j + 2; - else - bi->cno = j + 3; + int size[5]; + int i, j, k, a, run; + EncBlockInfo* b; - if (bi->cno > 3) - bi->cno = 3; + do { + b = blks; + for (i=0; i<5; i++) { + if (!qnos[i]) + continue; + + qnos[i]--; + size[i] = 0; + for (j=0; j<6; j++, b++) { + for (a=0; a<4; a++) { + if (b->area_q[a] != dv_quant_shifts[qnos[i] + dv_quant_offset[b->cno]][a]) { + b->bit_size[a] = (a==3)?4:0; + b->area_q[a]++; + run = b->prev_run[a]; + for (k=mb_area_start[a]; k<mb_area_start[a+1]; k++) { + b->mb[k] /= 2; + if (b->mb[k]) { + b->bit_size[a] += dv_rl2vlc_size(run, b->mb[k]); + run = 0; + } else + ++run; + } + } + size[i] += b->bit_size[a]; + } + } + } + } while ((vs_total_ac_bits < size[0] + size[1] + size[2] + size[3] + size[4]) && + (qnos[0]|qnos[1]|qnos[2]|qnos[3]|qnos[4])); } /* * This is a very rough initial implementaion. The performance is - * horrible and some features are missing, mainly 2-4-8 DCT encoding. - * The weighting is missing as well, but it's missing from the decoding - * step also -- so at least we're on the same page with decoder ;-) + * horrible and the weighting is missing. But it's missing from the + * decoding step also -- so at least we're on the same page with decoder ;-) */ static inline void dv_encode_video_segment(DVVideoDecodeContext *s, uint8_t *dif, @@ -677,20 +804,18 @@ static inline void dv_encode_video_segment(DVVideoDecodeContext *s, int mb_x, mb_y, c_offset, linesize; uint8_t* y_ptr; uint8_t* data; + uint8_t* ptr; int do_edge_wrap; - DCTELEM *block; + DCTELEM block[64] __align8; EncBlockInfo enc_blks[5*6]; + PutBitContext pbs[5*6]; + PutBitContext* pb; EncBlockInfo* enc_blk; - int free_vs_bits; - int extra_bits; - PutBitContext extra_vs; - uint8_t extra_vs_data[5*6*128]; - uint8_t extra_mb_data[6*128]; - - int QNO = 15; + int vs_bit_size = 0; + int qnos[5]; - /* Stage 1 -- doing DCT on 5 MBs */ - block = &s->block[0][0]; + enc_blk = &enc_blks[0]; + pb = &pbs[0]; for(mb_index = 0; mb_index < 5; mb_index++) { v = *mb_pos_ptr++; mb_x = v & 0xff; @@ -700,6 +825,8 @@ static inline void dv_encode_video_segment(DVVideoDecodeContext *s, ((mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8)) : (((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8)); do_edge_wrap = 0; + qnos[mb_index] = 15; /* No quantization */ + ptr = dif + mb_index*80 + 4; for(j = 0;j < 6; j++) { if (j < 4) { /* Four Y blocks */ /* NOTE: at end of line, the macroblock is handled as 420 */ @@ -731,101 +858,58 @@ static inline void dv_encode_video_segment(DVVideoDecodeContext *s, } else { /* Simple copy: 8x8 -> 8x8 */ s->get_pixels(block, data, linesize); } - - s->fdct(block); + + enc_blk->dct_mode = dv_guess_dct_mode(block); + enc_blk->mb = &s->block[mb_index*6+j][0]; + enc_blk->area_q[0] = enc_blk->area_q[1] = enc_blk->area_q[2] = enc_blk->area_q[3] = 0; + enc_blk->partial_bit_count = 0; + enc_blk->partial_bit_buffer = 0; + enc_blk->cur_ac = 1; + + s->fdct[enc_blk->dct_mode](block); + + dv_set_class_number(block, enc_blk, + enc_blk->dct_mode ? ff_zigzag248_direct : ff_zigzag_direct, + j/4*(j%2)); + + init_put_bits(pb, ptr, block_sizes[j]/8); + put_bits(pb, 9, (uint16_t)(((enc_blk->mb[0] >> 3) - 1024) >> 2)); + put_bits(pb, 1, enc_blk->dct_mode); + put_bits(pb, 2, enc_blk->cno); - block += 64; + vs_bit_size += enc_blk->bit_size[0] + enc_blk->bit_size[1] + + enc_blk->bit_size[2] + enc_blk->bit_size[3]; + ++enc_blk; + ++pb; + ptr += block_sizes[j]/8; } } - /* Stage 2 -- setup for encoding phase */ - enc_blk = &enc_blks[0]; - block = &s->block[0][0]; - for (i=0; i<5; i++) { - for (j=0; j<6; j++) { - enc_blk->mb = block; - enc_blk->dct_mode = 0; - enc_blk->block_size = block_sizes[j]; - - dv_set_class_number(enc_blk, j/4*(j%2)); - - block += 64; - enc_blk++; - } - } - - /* Stage 3 -- encoding by trial-and-error */ -encode_vs: - enc_blk = &enc_blks[0]; - for (i=0; i<5; i++) { - uint8_t* p = dif + i*80 + 4; - for (j=0; j<6; j++) { - enc_blk->qno = QNO; - init_put_bits(&enc_blk->pb, p, block_sizes[j]/8); - enc_blk++; - p += block_sizes[j]/8; - } - } + if (vs_total_ac_bits < vs_bit_size) + dv_guess_qnos(&enc_blks[0], &qnos[0]); - init_put_bits(&extra_vs, extra_vs_data, sizeof(extra_vs_data)); - free_vs_bits = 0; - enc_blk = &enc_blks[0]; for (i=0; i<5; i++) { - PutBitContext extra_mb; - EncBlockInfo* enc_blk2 = enc_blk; - int free_mb_bits = 0; - - init_put_bits(&extra_mb, extra_mb_data, sizeof(extra_mb_data)); - dif[i*80 + 3] = enc_blk->qno; - - for (j=0; j<6; j++) { - uint16_t dc = ((enc_blk->mb[0] >> 3) - 1024) >> 2; + dif[i*80 + 3] = qnos[i]; + } - put_bits(&enc_blk->pb, 9, dc); - put_bits(&enc_blk->pb, 1, enc_blk->dct_mode); - put_bits(&enc_blk->pb, 2, enc_blk->cno); + /* First pass over individual cells only */ + for (j=0; j<5*6; j++) + dv_encode_ac(&enc_blks[j], &pbs[j], 1); -#ifdef VLC_DEBUG - printf("[%d, %d]: ", i, j); -#endif - dv_encode_ac(enc_blk, &extra_mb); -#ifdef VLC_DEBUG - printf("\n"); -#endif - - free_mb_bits += dv_bits_left(enc_blk); - enc_blk++; - } - - /* We can't flush extra_mb just yet -- since it'll round up bit number */ - extra_bits = get_bit_count(&extra_mb); - if (free_mb_bits > extra_bits) - free_vs_bits += free_mb_bits - extra_bits; - - if (extra_bits) { /* FIXME: speed up things when free_mb_bits == 0 */ - flush_put_bits(&extra_mb); - dv_redistr_bits(enc_blk2, 6, extra_mb_data, extra_bits, &extra_vs); - } - } - - /* We can't flush extra_mb just yet -- since it'll round up bit number */ - extra_bits = get_bit_count(&extra_vs); - if (extra_bits > free_vs_bits && QNO) { /* FIXME: very crude trial-and-error */ - QNO--; - goto encode_vs; - } - - if (extra_bits) { - flush_put_bits(&extra_vs); - dv_redistr_bits(&enc_blks[0], 5*6, extra_vs_data, extra_bits, NULL); + /* Second pass over each MB space */ + for (j=0; j<5*6; j++) { + if (enc_blks[j].cur_ac < 65 || enc_blks[j].partial_bit_count) + dv_encode_ac(&enc_blks[j], &pbs[(j/6)*6], 6); } - for (i=0; i<6*5; i++) { - flush_put_bits(&enc_blks[i].pb); -#ifdef VLC_DEBUG - printf("[%d:%d] qno=%d cno=%d\n", i/6, i%6, enc_blks[i].qno, enc_blks[i].cno); -#endif + /* Third and final pass over the whole vides segment space */ + for (j=0; j<5*6; j++) { + if (enc_blks[j].cur_ac < 65 || enc_blks[j].partial_bit_count) + dv_encode_ac(&enc_blks[j], &pbs[0], 6*5); } + + for (j=0; j<5*6; j++) + flush_put_bits(&pbs[j]); } /* NOTE: exactly one frame must be given (120000 bytes for NTSC, @@ -837,6 +921,11 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, DVVideoDecodeContext *s = avctx->priv_data; int ds, vs; const uint16_t *mb_pos_ptr; + + *data_size=0; + /* special case for last picture */ + if(buf_size==0) + return 0; s->sys = dv_frame_profile(buf); if (!s->sys || buf_size < s->sys->frame_size) @@ -851,7 +940,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, avctx->width = s->sys->width; avctx->height = s->sys->height; if(avctx->get_buffer(avctx, &s->picture) < 0) { - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.interlaced_frame = 1; diff --git a/src/libffmpeg/libavcodec/dvdata.h b/src/libffmpeg/libavcodec/dvdata.h index 16b5786a8..e6e0986ba 100644 --- a/src/libffmpeg/libavcodec/dvdata.h +++ b/src/libffmpeg/libavcodec/dvdata.h @@ -37,6 +37,7 @@ typedef struct DVprofile { int ltc_divisor; /* FPS from the LTS standpoint */ int height; /* picture height in pixels */ int width; /* picture width in pixels */ + AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ const uint16_t *video_place; /* positions of all DV macro blocks */ enum PixelFormat pix_fmt; /* picture pixel format */ @@ -50,6 +51,11 @@ typedef struct DVprofile { #define NB_DV_VLC 409 +/* + * There's a catch about the following three tables: the mapping they establish + * between (run, level) and vlc is not 1-1. So you have to watch out for that + * when building misc. tables. E.g. (1, 0) can be either 0x7cf or 0x1f82. + */ static const uint16_t dv_vlc_bits[409] = { 0x0000, 0x0002, 0x0007, 0x0008, 0x0009, 0x0014, 0x0015, 0x0016, 0x0017, 0x0030, 0x0031, 0x0032, 0x0033, 0x0068, 0x0069, 0x006a, @@ -270,19 +276,6 @@ static const uint8_t dv_vlc_level[409] = { 0, }; -/* Specific zigzag scan for 248 idct. NOTE that unlike the - specification, we interleave the fields */ -static const uint8_t dv_248_zigzag[64] = { - 0, 8, 1, 9, 16, 24, 2, 10, - 17, 25, 32, 40, 48, 56, 33, 41, - 18, 26, 3, 11, 4, 12, 19, 27, - 34, 42, 49, 57, 50, 58, 35, 43, - 20, 28, 5, 13, 6, 14, 21, 29, - 36, 44, 51, 59, 52, 60, 37, 45, - 22, 30, 7, 15, 23, 31, 38, 46, - 53, 61, 54, 62, 39, 47, 55, 63, -}; - /* unquant tables (not used directly) */ static const uint8_t dv_88_areas[64] = { 0,0,0,1,1,1,2,2, @@ -1306,6 +1299,7 @@ static const DVprofile dv_profiles[] = { .frame_rate_base = 1001, .height = 480, .width = 720, + .sar = {{72, 79}, {96, 79}}, .video_place = dv_place_411, .pix_fmt = PIX_FMT_YUV411P, .audio_stride = 90, @@ -1321,6 +1315,7 @@ static const DVprofile dv_profiles[] = { .ltc_divisor = 25, .height = 576, .width = 720, + .sar = {{128, 117}, {512, 351}}, .video_place = dv_place_420, .pix_fmt = PIX_FMT_YUV420P, .audio_stride = 108, @@ -1336,6 +1331,7 @@ static const DVprofile dv_profiles[] = { .ltc_divisor = 25, .height = 576, .width = 720, + .sar = {{128, 117}, {512, 351}}, .video_place = dv_place_411P, .pix_fmt = PIX_FMT_YUV411P, .audio_stride = 108, diff --git a/src/libffmpeg/libavcodec/error_resilience.c b/src/libffmpeg/libavcodec/error_resilience.c index 5ac2190b3..5067a248f 100644 --- a/src/libffmpeg/libavcodec/error_resilience.c +++ b/src/libffmpeg/libavcodec/error_resilience.c @@ -1,7 +1,7 @@ /* * Error resilience / concealment * - * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -209,8 +209,8 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR); int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR); int offset= b_x*8 + b_y*stride*8; - int16_t *left_mv= s->motion_val[s->block_wrap[0]*((b_y<<(1-is_luma)) + 1) + ( b_x <<(1-is_luma))]; - int16_t *right_mv= s->motion_val[s->block_wrap[0]*((b_y<<(1-is_luma)) + 1) + ((b_x+1)<<(1-is_luma))]; + int16_t *left_mv= s->current_picture.motion_val[0][s->block_wrap[0]*((b_y<<(1-is_luma)) + 1) + ( b_x <<(1-is_luma))]; + int16_t *right_mv= s->current_picture.motion_val[0][s->block_wrap[0]*((b_y<<(1-is_luma)) + 1) + ((b_x+1)<<(1-is_luma))]; if(!(left_damage||right_damage)) continue; // both undamaged @@ -269,8 +269,8 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR); int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR); int offset= b_x*8 + b_y*stride*8; - int16_t *top_mv= s->motion_val[s->block_wrap[0]*(( b_y <<(1-is_luma)) + 1) + (b_x<<(1-is_luma))]; - int16_t *bottom_mv= s->motion_val[s->block_wrap[0]*(((b_y+1)<<(1-is_luma)) + 1) + (b_x<<(1-is_luma))]; + int16_t *top_mv= s->current_picture.motion_val[0][s->block_wrap[0]*(( b_y <<(1-is_luma)) + 1) + (b_x<<(1-is_luma))]; + int16_t *bottom_mv= s->current_picture.motion_val[0][s->block_wrap[0]*(((b_y+1)<<(1-is_luma)) + 1) + (b_x<<(1-is_luma))]; if(!(top_damage||bottom_damage)) continue; // both undamaged @@ -380,8 +380,8 @@ int score_sum=0; int best_pred=0; const int mot_stride= mb_width*2+2; const int mot_index= mb_x*2 + 1 + (mb_y*2+1)*mot_stride; - int prev_x= s->motion_val[mot_index][0]; - int prev_y= s->motion_val[mot_index][1]; + int prev_x= s->current_picture.motion_val[0][mot_index][0]; + int prev_y= s->current_picture.motion_val[0][mot_index][1]; if((mb_x^mb_y^pass)&1) continue; @@ -406,23 +406,23 @@ int score_sum=0; none_left=0; if(mb_x>0 && fixed[mb_xy-1]){ - mv_predictor[pred_count][0]= s->motion_val[mot_index - 2][0]; - mv_predictor[pred_count][1]= s->motion_val[mot_index - 2][1]; + mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - 2][0]; + mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - 2][1]; pred_count++; } if(mb_x+1<mb_width && fixed[mb_xy+1]){ - mv_predictor[pred_count][0]= s->motion_val[mot_index + 2][0]; - mv_predictor[pred_count][1]= s->motion_val[mot_index + 2][1]; + mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + 2][0]; + mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + 2][1]; pred_count++; } if(mb_y>0 && fixed[mb_xy-mb_stride]){ - mv_predictor[pred_count][0]= s->motion_val[mot_index - mot_stride*2][0]; - mv_predictor[pred_count][1]= s->motion_val[mot_index - mot_stride*2][1]; + mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*2][0]; + mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*2][1]; pred_count++; } if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){ - mv_predictor[pred_count][0]= s->motion_val[mot_index + mot_stride*2][0]; - mv_predictor[pred_count][1]= s->motion_val[mot_index + mot_stride*2][1]; + mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*2][0]; + mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*2][1]; pred_count++; } if(pred_count==0) continue; @@ -467,8 +467,8 @@ int score_sum=0; pred_count++; /* last MV */ - mv_predictor[pred_count][0]= s->motion_val[mot_index][0]; - mv_predictor[pred_count][1]= s->motion_val[mot_index][1]; + mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index][0]; + mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index][1]; pred_count++; s->mv_dir = MV_DIR_FORWARD; @@ -485,8 +485,8 @@ int score_sum=0; int score=0; uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; - s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0]; - s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1]; + s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0]; + s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1]; decode_mb(s); @@ -517,9 +517,9 @@ int score_sum=0; } } score_sum+= best_score; -//FIXME no need to set s->motion_val[mot_index][0] explicit - s->motion_val[mot_index][0]= s->mv[0][0][0]= mv_predictor[best_pred][0]; - s->motion_val[mot_index][1]= s->mv[0][0][1]= mv_predictor[best_pred][1]; +//FIXME no need to set s->current_picture.motion_val[0][mot_index][0] explicit + s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[best_pred][0]; + s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[best_pred][1]; decode_mb(s); @@ -582,8 +582,8 @@ static int is_intra_more_likely(MpegEncContext *s){ uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize; - is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize); - is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize); + is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16); + is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16); }else{ if(IS_INTRA(s->current_picture.mb_type[mb_xy])) is_intra_likely++; @@ -669,14 +669,20 @@ void ff_er_frame_end(MpegEncContext *s){ if(!s->error_resilience || s->error_count==0) return; - fprintf(stderr, "concealing errors\n"); + av_log(s->avctx, AV_LOG_INFO, "concealing errors\n"); - if(s->motion_val == NULL){ + if(s->current_picture.motion_val[0] == NULL){ int size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); + Picture *pic= s->current_picture_ptr; - fprintf(stderr, "Warning MVs not available\n"); - - s->motion_val= av_mallocz(size * 2 * sizeof(int16_t)); + av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); + + for(i=0; i<2; i++){ + pic->motion_val_base[i]= av_mallocz((size+1) * 2 * sizeof(uint16_t)); //FIXME size + pic->motion_val[i]= pic->motion_val_base[i]+1; + } + pic->motion_subsample_log2= 3; + s->current_picture= *s->current_picture_ptr; } if(s->avctx->debug&FF_DEBUG_ER){ @@ -684,9 +690,9 @@ void ff_er_frame_end(MpegEncContext *s){ for(mb_x=0; mb_x<s->mb_width; mb_x++){ int status= s->error_status_table[mb_x + mb_y*s->mb_stride]; - printf("%2X ", status); + av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status); } - printf("\n"); + av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } @@ -843,13 +849,13 @@ void ff_er_frame_end(MpegEncContext *s){ int j; s->mv_type = MV_TYPE_8X8; for(j=0; j<4; j++){ - s->mv[0][j][0] = s->motion_val[ mb_index + (j&1) + (j>>1)*s->block_wrap[0] ][0]; - s->mv[0][j][1] = s->motion_val[ mb_index + (j&1) + (j>>1)*s->block_wrap[0] ][1]; + s->mv[0][j][0] = s->current_picture.motion_val[0][ mb_index + (j&1) + (j>>1)*s->block_wrap[0] ][0]; + s->mv[0][j][1] = s->current_picture.motion_val[0][ mb_index + (j&1) + (j>>1)*s->block_wrap[0] ][1]; } }else{ s->mv_type = MV_TYPE_16X16; - s->mv[0][0][0] = s->motion_val[ mb_x*2+1 + (mb_y*2+1)*s->block_wrap[0] ][0]; - s->mv[0][0][1] = s->motion_val[ mb_x*2+1 + (mb_y*2+1)*s->block_wrap[0] ][1]; + s->mv[0][0][0] = s->current_picture.motion_val[0][ mb_x*2+1 + (mb_y*2+1)*s->block_wrap[0] ][0]; + s->mv[0][0][1] = s->current_picture.motion_val[0][ mb_x*2+1 + (mb_y*2+1)*s->block_wrap[0] ][1]; } s->dsp.clear_blocks(s->block[0]); @@ -882,10 +888,10 @@ void ff_er_frame_end(MpegEncContext *s){ int time_pp= s->pp_time; int time_pb= s->pb_time; - s->mv[0][0][0] = s->motion_val[xy][0]*time_pb/time_pp; - s->mv[0][0][1] = s->motion_val[xy][1]*time_pb/time_pp; - s->mv[1][0][0] = s->motion_val[xy][0]*(time_pb - time_pp)/time_pp; - s->mv[1][0][1] = s->motion_val[xy][1]*(time_pb - time_pp)/time_pp; + s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp; + s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp; + s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; + s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp; }else{ s->mv[0][0][0]= 0; s->mv[0][0][1]= 0; diff --git a/src/libffmpeg/libavcodec/eval.c b/src/libffmpeg/libavcodec/eval.c index 28a492cd4..714ba046c 100644 --- a/src/libffmpeg/libavcodec/eval.c +++ b/src/libffmpeg/libavcodec/eval.c @@ -61,7 +61,7 @@ static void evalExpression(Parser *p); static void push(Parser *p, double d){ if(p->stack_index+1>= STACK_SIZE){ - fprintf(stderr, "stack overflow in the parser\n"); + av_log(NULL, AV_LOG_ERROR, "stack overflow in the parser\n"); return; } p->stack[ p->stack_index++ ]= d; @@ -70,7 +70,7 @@ static void push(Parser *p, double d){ static double pop(Parser *p){ if(p->stack_index<=0){ - fprintf(stderr, "stack underflow in the parser\n"); + av_log(NULL, AV_LOG_ERROR, "stack underflow in the parser\n"); return NAN; } //printf("pop\n"); fflush(stdout); @@ -109,7 +109,7 @@ static void evalPrimary(Parser *p){ p->s= strchr(p->s, '('); if(p->s==NULL){ - fprintf(stderr, "Parser: missing ( in \"%s\"\n", next); + av_log(NULL, AV_LOG_ERROR, "Parser: missing ( in \"%s\"\n", next); return; } p->s++; // "(" @@ -159,13 +159,13 @@ static void evalPrimary(Parser *p){ } if(error){ - fprintf(stderr, "Parser: unknown function in \"%s\"\n", next); + av_log(NULL, AV_LOG_ERROR, "Parser: unknown function in \"%s\"\n", next); return; } } if(p->s[-1]!= ')'){ - fprintf(stderr, "Parser: missing ) in \"%s\"\n", next); + av_log(NULL, AV_LOG_ERROR, "Parser: missing ) in \"%s\"\n", next); return; } push(p, d); @@ -185,7 +185,7 @@ static void evalPow(Parser *p){ evalExpression(p); if(p->s[0]!=')') - fprintf(stderr, "Parser: missing )\n"); + av_log(NULL, AV_LOG_ERROR, "Parser: missing )\n"); p->s++; }else{ evalPrimary(p); diff --git a/src/libffmpeg/libavcodec/fastmemcpy.h b/src/libffmpeg/libavcodec/fastmemcpy.h index ed34007ff..3459bf1ce 100644 --- a/src/libffmpeg/libavcodec/fastmemcpy.h +++ b/src/libffmpeg/libavcodec/fastmemcpy.h @@ -1,8 +1,4 @@ -#ifndef __FASTMEMCPY_H__ -#define __FASTMEMCPY_H__ - -#include "xineutils.h" - +#if 0 +extern void *xine_fast_memcpy(void *to, const void *from, size_t len); #define memcpy(a,b,c) xine_fast_memcpy(a,b,c) - #endif diff --git a/src/libffmpeg/libavcodec/golomb.c b/src/libffmpeg/libavcodec/golomb.c index a696b2a76..a63f82280 100644 --- a/src/libffmpeg/libavcodec/golomb.c +++ b/src/libffmpeg/libavcodec/golomb.c @@ -25,7 +25,7 @@ * @author Michael Niedermayer <michaelni@gmx.at> */ -#include <inttypes.h> +#include "common.h" const uint8_t ff_golomb_vlc_len[512]={ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, diff --git a/src/libffmpeg/libavcodec/h263.c b/src/libffmpeg/libavcodec/h263.c index 2d53547ac..de9110a4e 100644 --- a/src/libffmpeg/libavcodec/h263.c +++ b/src/libffmpeg/libavcodec/h263.c @@ -3,6 +3,7 @@ * Copyright (c) 2000,2001 Fabrice Bellard. * H263+ support. * Copyright (c) 2001 Juan J. Sierralta P. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -49,6 +50,8 @@ #define SPRITE_TRAJ_VLC_BITS 6 #define MB_TYPE_B_VLC_BITS 4 #define TEX_VLC_BITS 9 +#define H263_MBTYPE_B_VLC_BITS 6 +#define CBPC_B_VLC_BITS 3 #ifdef CONFIG_ENCODERS static void h263_encode_block(MpegEncContext * s, DCTELEM * block, @@ -67,8 +70,8 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr); static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, int n, int coded, int intra, int rvlc); -#ifdef CONFIG_ENCODERS static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr); +#ifdef CONFIG_ENCODERS static void mpeg4_encode_visual_object_header(MpegEncContext * s); static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number); #endif //CONFIG_ENCODERS @@ -185,7 +188,7 @@ void ff_flv_encode_picture_header(MpegEncContext * s, int picture_number) if(s->h263_aic){ s->y_dc_scale_table= - s->c_dc_scale_table= h263_aic_dc_scale_table; + s->c_dc_scale_table= ff_aic_dc_scale_table; }else{ s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; @@ -200,8 +203,6 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) /* Update the pointer to last GOB */ s->ptr_lastgob = pbBufPtr(&s->pb); - s->gob_number = 0; - put_bits(&s->pb, 22, 0x20); /* PSC */ put_bits(&s->pb, 8, (((int64_t)s->picture_number * 30 * s->avctx->frame_rate_base) / s->avctx->frame_rate) & 0xff); @@ -222,7 +223,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) calculation of the current MB to see if we're on the limits */ put_bits(&s->pb, 1, 0); /* unrestricted motion vector: off */ put_bits(&s->pb, 1, 0); /* SAC: off */ - put_bits(&s->pb, 1, 0); /* advanced prediction mode: off */ + put_bits(&s->pb, 1, s->obmc); /* advanced prediction mode */ put_bits(&s->pb, 1, 0); /* not PB frame */ put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ @@ -237,17 +238,16 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) put_bits(&s->pb, 3, format); put_bits(&s->pb,1,0); /* Custom PCF: off */ - s->umvplus = s->unrestricted_mv; - put_bits(&s->pb, 1, s->umvplus); /* Unrestricted Motion Vector */ + put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */ put_bits(&s->pb,1,0); /* SAC: off */ - put_bits(&s->pb,1,0); /* Advanced Prediction Mode: off */ + put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */ put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */ - put_bits(&s->pb,1,0); /* Deblocking Filter: off */ - put_bits(&s->pb,1,0); /* Slice Structured: off */ + put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */ + put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */ put_bits(&s->pb,1,0); /* Reference Picture Selection: off */ put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */ - put_bits(&s->pb,1,0); /* Alternative Inter VLC: off */ - put_bits(&s->pb,1,0); /* Modified Quantization: off */ + put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */ + put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,3,0); /* Reserved */ @@ -279,16 +279,28 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ if (s->umvplus) // put_bits(&s->pb,1,1); /* Limited according tables of Annex D */ +//FIXME check actual requested range put_bits(&s->pb,2,1); /* unlimited */ + if(s->h263_slice_structured) + put_bits(&s->pb,2,0); /* no weird submodes */ put_bits(&s->pb, 5, s->qscale); } put_bits(&s->pb, 1, 0); /* no PEI */ + if(s->h263_slice_structured){ + put_bits(&s->pb, 1, 1); + + assert(s->mb_x == 0 && s->mb_y == 0); + ff_h263_encode_mba(s); + + put_bits(&s->pb, 1, 1); + } + if(s->h263_aic){ s->y_dc_scale_table= - s->c_dc_scale_table= h263_aic_dc_scale_table; + s->c_dc_scale_table= ff_aic_dc_scale_table; }else{ s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; @@ -298,22 +310,27 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number) /** * Encodes a group of blocks header. */ -int h263_encode_gob_header(MpegEncContext * s, int mb_line) +void h263_encode_gob_header(MpegEncContext * s, int mb_line) { - align_put_bits(&s->pb); - flush_put_bits(&s->pb); - /* Call the RTP callback to send the last GOB */ - if (s->rtp_callback) { - int pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; - s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number); - } - put_bits(&s->pb, 17, 1); /* GBSC */ - s->gob_number = mb_line / s->gob_index; - put_bits(&s->pb, 5, s->gob_number); /* GN */ - put_bits(&s->pb, 2, s->pict_type == I_TYPE); /* GFID */ - put_bits(&s->pb, 5, s->qscale); /* GQUANT */ - //fprintf(stderr,"\nGOB: %2d size: %d", s->gob_number - 1, pdif); - return 0; + put_bits(&s->pb, 17, 1); /* GBSC */ + + if(s->h263_slice_structured){ + put_bits(&s->pb, 1, 1); + + ff_h263_encode_mba(s); + + if(s->mb_num > 1583) + put_bits(&s->pb, 1, 1); + put_bits(&s->pb, 5, s->qscale); /* GQUANT */ + put_bits(&s->pb, 1, 1); + put_bits(&s->pb, 2, s->pict_type == I_TYPE); /* GFID */ + }else{ + int gob_number= mb_line / s->gob_index; + + put_bits(&s->pb, 5, gob_number); /* GN */ + put_bits(&s->pb, 2, s->pict_type == I_TYPE); /* GFID */ + put_bits(&s->pb, 5, s->qscale); /* GQUANT */ + } } static inline int get_block_rate(MpegEncContext * s, DCTELEM block[64], int block_last_index, uint8_t scantable[64]){ @@ -463,9 +480,9 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ for(i=1; i<s->mb_num; i++){ int mb_xy= s->mb_index2xy[i]; - if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&MB_TYPE_INTER4V)){ - s->mb_type[mb_xy]&= ~MB_TYPE_INTER4V; - s->mb_type[mb_xy]|= MB_TYPE_INTER; + if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTER4V)){ + s->mb_type[mb_xy]&= ~CANDIDATE_MB_TYPE_INTER4V; + s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_INTER; } } @@ -492,9 +509,9 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ for(i=1; i<s->mb_num; i++){ int mb_xy= s->mb_index2xy[i]; - if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&MB_TYPE_DIRECT)){ - s->mb_type[mb_xy]&= ~MB_TYPE_DIRECT; - s->mb_type[mb_xy]|= MB_TYPE_BIDIR; + if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_DIRECT)){ + s->mb_type[mb_xy]&= ~CANDIDATE_MB_TYPE_DIRECT; + s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_BIDIR; } } } @@ -507,7 +524,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){ */ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ const int mb_index= s->mb_x + s->mb_y*s->mb_stride; - const int colocated_mb_type= s->next_picture.mb_type[mb_index]; //FIXME or next? + const int colocated_mb_type= s->next_picture.mb_type[mb_index]; int xy= s->block_index[0]; uint16_t time_pp= s->pp_time; uint16_t time_pb= s->pb_time; @@ -519,39 +536,39 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){ s->mv_type = MV_TYPE_8X8; for(i=0; i<4; i++){ xy= s->block_index[i]; - s->mv[0][i][0] = s->motion_val[xy][0]*time_pb/time_pp + mx; - s->mv[0][i][1] = s->motion_val[xy][1]*time_pb/time_pp + my; - s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->motion_val[xy][0] - : s->motion_val[xy][0]*(time_pb - time_pp)/time_pp; - s->mv[1][i][1] = my ? s->mv[0][i][1] - s->motion_val[xy][1] - : s->motion_val[xy][1]*(time_pb - time_pp)/time_pp; + s->mv[0][i][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp + mx; + s->mv[0][i][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp + my; + s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->next_picture.motion_val[0][xy][0] + : s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; + s->mv[1][i][1] = my ? s->mv[0][i][1] - s->next_picture.motion_val[0][xy][1] + : s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp; } return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1; } else if(IS_INTERLACED(colocated_mb_type)){ s->mv_type = MV_TYPE_FIELD; for(i=0; i<2; i++){ if(s->top_field_first){ - time_pp= s->pp_field_time - s->field_select_table[mb_index][i] + i; - time_pb= s->pb_field_time - s->field_select_table[mb_index][i] + i; + time_pp= s->pp_field_time - s->p_field_select_table[i][mb_index] + i; + time_pb= s->pb_field_time - s->p_field_select_table[i][mb_index] + i; }else{ - time_pp= s->pp_field_time + s->field_select_table[mb_index][i] - i; - time_pb= s->pb_field_time + s->field_select_table[mb_index][i] - i; + time_pp= s->pp_field_time + s->p_field_select_table[i][mb_index] - i; + time_pb= s->pb_field_time + s->p_field_select_table[i][mb_index] - i; } - s->mv[0][i][0] = s->field_mv_table[mb_index][i][0]*time_pb/time_pp + mx; - s->mv[0][i][1] = s->field_mv_table[mb_index][i][1]*time_pb/time_pp + my; - s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->field_mv_table[mb_index][i][0] - : s->field_mv_table[mb_index][i][0]*(time_pb - time_pp)/time_pp; - s->mv[1][i][1] = my ? s->mv[0][i][1] - s->field_mv_table[mb_index][i][1] - : s->field_mv_table[mb_index][i][1]*(time_pb - time_pp)/time_pp; + s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx; + s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my; + s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0] + : s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp; + s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1] + : s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp; } return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED; }else{ - s->mv[0][0][0] = s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->motion_val[xy][0]*time_pb/time_pp + mx; - s->mv[0][0][1] = s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->motion_val[xy][1]*time_pb/time_pp + my; - s->mv[1][0][0] = s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = mx ? s->mv[0][0][0] - s->motion_val[xy][0] - : s->motion_val[xy][0]*(time_pb - time_pp)/time_pp; - s->mv[1][0][1] = s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = my ? s->mv[0][0][1] - s->motion_val[xy][1] - : s->motion_val[xy][1]*(time_pb - time_pp)/time_pp; + s->mv[0][0][0] = s->mv[0][1][0] = s->mv[0][2][0] = s->mv[0][3][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp + mx; + s->mv[0][0][1] = s->mv[0][1][1] = s->mv[0][2][1] = s->mv[0][3][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp + my; + s->mv[1][0][0] = s->mv[1][1][0] = s->mv[1][2][0] = s->mv[1][3][0] = mx ? s->mv[0][0][0] - s->next_picture.motion_val[0][xy][0] + : s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; + s->mv[1][0][1] = s->mv[1][1][1] = s->mv[1][2][1] = s->mv[1][3][1] = my ? s->mv[0][0][1] - s->next_picture.motion_val[0][xy][1] + : s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp; if((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) || !s->quarter_sample) s->mv_type= MV_TYPE_16X16; else @@ -582,26 +599,28 @@ void ff_h263_update_motion_val(MpegEncContext * s){ motion_y = s->mv[0][0][1] + s->mv[0][1][1]; motion_x = (motion_x>>1) | (motion_x&1); for(i=0; i<2; i++){ - s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0]; - s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1]; - s->field_select_table[mb_xy][i]= s->field_select[0][i]; + s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0]; + s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1]; + s->p_field_select_table[i][mb_xy]= s->field_select[0][i]; } } /* no update if 8X8 because it has been done during parsing */ - s->motion_val[xy][0] = motion_x; - s->motion_val[xy][1] = motion_y; - s->motion_val[xy + 1][0] = motion_x; - s->motion_val[xy + 1][1] = motion_y; - s->motion_val[xy + wrap][0] = motion_x; - s->motion_val[xy + wrap][1] = motion_y; - s->motion_val[xy + 1 + wrap][0] = motion_x; - s->motion_val[xy + 1 + wrap][1] = motion_y; + s->current_picture.motion_val[0][xy][0] = motion_x; + s->current_picture.motion_val[0][xy][1] = motion_y; + s->current_picture.motion_val[0][xy + 1][0] = motion_x; + s->current_picture.motion_val[0][xy + 1][1] = motion_y; + s->current_picture.motion_val[0][xy + wrap][0] = motion_x; + s->current_picture.motion_val[0][xy + wrap][1] = motion_y; + s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x; + s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y; } if(s->encoding){ //FIXME encoding MUST be cleaned up if (s->mv_type == MV_TYPE_8X8) s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8; + else if(s->mb_intra) + s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA; else s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16; } @@ -617,7 +636,7 @@ static inline int get_p_cbp(MpegEncContext * s, if(s->flags & CODEC_FLAG_CBP_RD){ int best_cbpy_score= INT_MAX; int best_cbpc_score= INT_MAX; - int cbpc, cbpy; + int cbpc = (-1), cbpy= (-1); const int offset= (s->mv_type==MV_TYPE_16X16 ? 0 : 16) + (s->dquant ? 8 : 0); const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6); @@ -721,18 +740,19 @@ void mpeg4_encode_mb(MpegEncContext * s, // printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); if (!s->mb_intra) { - /* compute cbp */ int i, cbp; if(s->pict_type==B_TYPE){ static const int mb_type_table[8]= {-1, 2, 3, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */ int mb_type= mb_type_table[s->mv_dir]; - + if(s->mb_x==0){ - s->last_mv[0][0][0]= - s->last_mv[0][0][1]= - s->last_mv[1][0][0]= - s->last_mv[1][0][1]= 0; + for(i=0; i<2; i++){ + s->last_mv[i][0][0]= + s->last_mv[i][0][1]= + s->last_mv[i][1][0]= + s->last_mv[i][1][1]= 0; + } } assert(s->dquant>=-2 && s->dquant<=2); @@ -786,50 +806,64 @@ void mpeg4_encode_mb(MpegEncContext * s, if(cbp) put_bits(&s->pb, 1, s->interlaced_dct); if(mb_type) // not diect mode - put_bits(&s->pb, 1, 0); // no interlaced ME yet + put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD); } if(interleaved_stats){ s->misc_bits+= get_bits_diff(s); } - switch(mb_type) - { - case 0: /* direct */ + if(mb_type == 0){ + assert(s->mv_dir & MV_DIRECT); h263_encode_motion(s, motion_x, 1); h263_encode_motion(s, motion_y, 1); s->b_count++; s->f_count++; - break; - case 1: /* bidir */ - h263_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); - h263_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); - h263_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); - h263_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); - s->last_mv[0][0][0]= s->mv[0][0][0]; - s->last_mv[0][0][1]= s->mv[0][0][1]; - s->last_mv[1][0][0]= s->mv[1][0][0]; - s->last_mv[1][0][1]= s->mv[1][0][1]; - s->b_count++; - s->f_count++; - break; - case 2: /* backward */ - h263_encode_motion(s, motion_x - s->last_mv[1][0][0], s->b_code); - h263_encode_motion(s, motion_y - s->last_mv[1][0][1], s->b_code); - s->last_mv[1][0][0]= motion_x; - s->last_mv[1][0][1]= motion_y; - s->b_count++; - break; - case 3: /* forward */ - h263_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); - h263_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); - s->last_mv[0][0][0]= motion_x; - s->last_mv[0][0][1]= motion_y; - s->f_count++; - break; - default: - printf("unknown mb type\n"); - return; + }else{ + assert(mb_type > 0 && mb_type < 4); + if(s->mv_type != MV_TYPE_FIELD){ + if(s->mv_dir & MV_DIR_FORWARD){ + h263_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); + h263_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); + s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->mv[0][0][0]; + s->last_mv[0][0][1]= s->last_mv[0][1][1]= s->mv[0][0][1]; + s->f_count++; + } + if(s->mv_dir & MV_DIR_BACKWARD){ + h263_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); + h263_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); + s->last_mv[1][0][0]= s->last_mv[1][1][0]= s->mv[1][0][0]; + s->last_mv[1][0][1]= s->last_mv[1][1][1]= s->mv[1][0][1]; + s->b_count++; + } + }else{ + if(s->mv_dir & MV_DIR_FORWARD){ + put_bits(&s->pb, 1, s->field_select[0][0]); + put_bits(&s->pb, 1, s->field_select[0][1]); + } + if(s->mv_dir & MV_DIR_BACKWARD){ + put_bits(&s->pb, 1, s->field_select[1][0]); + put_bits(&s->pb, 1, s->field_select[1][1]); + } + if(s->mv_dir & MV_DIR_FORWARD){ + for(i=0; i<2; i++){ + h263_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); + h263_encode_motion(s, s->mv[0][i][1] - s->last_mv[0][i][1]/2, s->f_code); + s->last_mv[0][i][0]= s->mv[0][i][0]; + s->last_mv[0][i][1]= s->mv[0][i][1]*2; + } + s->f_count++; + } + if(s->mv_dir & MV_DIR_BACKWARD){ + for(i=0; i<2; i++){ + h263_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code); + h263_encode_motion(s, s->mv[1][i][1] - s->last_mv[1][i][1]/2, s->b_code); + s->last_mv[1][i][0]= s->mv[1][i][0]; + s->last_mv[1][i][1]= s->mv[1][i][1]*2; + } + s->b_count++; + } + } } if(interleaved_stats){ @@ -844,6 +878,7 @@ void mpeg4_encode_mb(MpegEncContext * s, if(interleaved_stats){ s->p_tex_bits+= get_bits_diff(s); } + }else{ /* s->pict_type==B_TYPE */ cbp= get_p_cbp(s, block, motion_x, motion_y); @@ -872,7 +907,7 @@ void mpeg4_encode_mb(MpegEncContext * s, if(pic==NULL || pic->pict_type!=B_TYPE) break; b_pic= pic->data[0] + offset + 16; //FIXME +16 - diff= s->dsp.pix_abs16x16(p_pic, b_pic, s->linesize); + diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16); if(diff>s->qscale*70){ //FIXME check that 70 is optimal s->mb_skiped=0; break; @@ -896,15 +931,15 @@ void mpeg4_encode_mb(MpegEncContext * s, } put_bits(&s->pb, 1, 0); /* mb coded */ + cbpc = cbp & 3; + cbpy = cbp >> 2; + cbpy ^= 0xf; if(s->mv_type==MV_TYPE_16X16){ - cbpc = cbp & 3; if(s->dquant) cbpc+= 8; put_bits(&s->pb, inter_MCBPC_bits[cbpc], inter_MCBPC_code[cbpc]); - cbpy = cbp >> 2; - cbpy ^= 0xf; put_bits(pb2, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); if(s->dquant) put_bits(pb2, 2, dquant_code[s->dquant+2]); @@ -912,7 +947,7 @@ void mpeg4_encode_mb(MpegEncContext * s, if(!s->progressive_sequence){ if(cbp) put_bits(pb2, 1, s->interlaced_dct); - put_bits(pb2, 1, 0); // no interlaced ME yet + put_bits(pb2, 1, 0); } if(interleaved_stats){ @@ -924,13 +959,41 @@ void mpeg4_encode_mb(MpegEncContext * s, h263_encode_motion(s, motion_x - pred_x, s->f_code); h263_encode_motion(s, motion_y - pred_y, s->f_code); - }else{ - cbpc = (cbp & 3)+16; + }else if(s->mv_type==MV_TYPE_FIELD){ + if(s->dquant) cbpc+= 8; put_bits(&s->pb, inter_MCBPC_bits[cbpc], inter_MCBPC_code[cbpc]); - cbpy = cbp >> 2; - cbpy ^= 0xf; + + put_bits(pb2, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); + if(s->dquant) + put_bits(pb2, 2, dquant_code[s->dquant+2]); + + assert(!s->progressive_sequence); + if(cbp) + put_bits(pb2, 1, s->interlaced_dct); + put_bits(pb2, 1, 1); + + if(interleaved_stats){ + s->misc_bits+= get_bits_diff(s); + } + + /* motion vectors: 16x8 interlaced mode */ + h263_pred_motion(s, 0, &pred_x, &pred_y); + pred_y /=2; + + put_bits(&s->pb, 1, s->field_select[0][0]); + put_bits(&s->pb, 1, s->field_select[0][1]); + + h263_encode_motion(s, s->mv[0][0][0] - pred_x, s->f_code); + h263_encode_motion(s, s->mv[0][0][1] - pred_y, s->f_code); + h263_encode_motion(s, s->mv[0][1][0] - pred_x, s->f_code); + h263_encode_motion(s, s->mv[0][1][1] - pred_y, s->f_code); + }else{ + assert(s->mv_type==MV_TYPE_8X8); + put_bits(&s->pb, + inter_MCBPC_bits[cbpc+16], + inter_MCBPC_code[cbpc+16]); put_bits(pb2, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); if(!s->progressive_sequence){ @@ -946,8 +1009,8 @@ void mpeg4_encode_mb(MpegEncContext * s, /* motion vectors: 8x8 mode*/ h263_pred_motion(s, i, &pred_x, &pred_y); - h263_encode_motion(s, s->motion_val[ s->block_index[i] ][0] - pred_x, s->f_code); - h263_encode_motion(s, s->motion_val[ s->block_index[i] ][1] - pred_y, s->f_code); + h263_encode_motion(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x, s->f_code); + h263_encode_motion(s, s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code); } } @@ -1067,70 +1130,114 @@ void h263_encode_mb(MpegEncContext * s, s->misc_bits++; s->last_bits++; } + s->skip_count++; + return; } put_bits(&s->pb, 1, 0); /* mb coded */ + cbpc = cbp & 3; - if(s->dquant) cbpc+= 8; - put_bits(&s->pb, - inter_MCBPC_bits[cbpc], - inter_MCBPC_code[cbpc]); cbpy = cbp >> 2; - cbpy ^= 0xf; - put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); - if(s->dquant) - put_bits(&s->pb, 2, dquant_code[s->dquant+2]); + if(s->alt_inter_vlc==0 || cbpc!=3) + cbpy ^= 0xF; + if(s->dquant) cbpc+= 8; + if(s->mv_type==MV_TYPE_16X16){ + put_bits(&s->pb, + inter_MCBPC_bits[cbpc], + inter_MCBPC_code[cbpc]); - if(interleaved_stats){ - s->misc_bits+= get_bits_diff(s); - } + put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); + if(s->dquant) + put_bits(&s->pb, 2, dquant_code[s->dquant+2]); + + if(interleaved_stats){ + s->misc_bits+= get_bits_diff(s); + } - /* motion vectors: 16x16 mode only now */ - h263_pred_motion(s, 0, &pred_x, &pred_y); - - if (!s->umvplus) { - h263_encode_motion(s, motion_x - pred_x, s->f_code); - h263_encode_motion(s, motion_y - pred_y, s->f_code); - } - else { - h263p_encode_umotion(s, motion_x - pred_x); - h263p_encode_umotion(s, motion_y - pred_y); - if (((motion_x - pred_x) == 1) && ((motion_y - pred_y) == 1)) - /* To prevent Start Code emulation */ - put_bits(&s->pb,1,1); + /* motion vectors: 16x16 mode */ + h263_pred_motion(s, 0, &pred_x, &pred_y); + + if (!s->umvplus) { + h263_encode_motion(s, motion_x - pred_x, 1); + h263_encode_motion(s, motion_y - pred_y, 1); + } + else { + h263p_encode_umotion(s, motion_x - pred_x); + h263p_encode_umotion(s, motion_y - pred_y); + if (((motion_x - pred_x) == 1) && ((motion_y - pred_y) == 1)) + /* To prevent Start Code emulation */ + put_bits(&s->pb,1,1); + } + }else{ + put_bits(&s->pb, + inter_MCBPC_bits[cbpc+16], + inter_MCBPC_code[cbpc+16]); + put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); + if(s->dquant) + put_bits(&s->pb, 2, dquant_code[s->dquant+2]); + + if(interleaved_stats){ + s->misc_bits+= get_bits_diff(s); + } + + for(i=0; i<4; i++){ + /* motion vectors: 8x8 mode*/ + h263_pred_motion(s, i, &pred_x, &pred_y); + + motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0]; + motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1]; + if (!s->umvplus) { + h263_encode_motion(s, motion_x - pred_x, 1); + h263_encode_motion(s, motion_y - pred_y, 1); + } + else { + h263p_encode_umotion(s, motion_x - pred_x); + h263p_encode_umotion(s, motion_y - pred_y); + if (((motion_x - pred_x) == 1) && ((motion_y - pred_y) == 1)) + /* To prevent Start Code emulation */ + put_bits(&s->pb,1,1); + } + } } if(interleaved_stats){ s->mv_bits+= get_bits_diff(s); } } else { - int li = s->h263_aic ? 0 : 1; + assert(s->mb_intra); cbp = 0; - for(i=0; i<6; i++) { + if (s->h263_aic) { /* Predict DC */ - if (s->h263_aic && s->mb_intra) { + for(i=0; i<6; i++) { int16_t level = block[i][0]; - + int scale; + + if(i<4) scale= s->y_dc_scale; + else scale= s->c_dc_scale; + pred_dc = h263_pred_dc(s, i, &dc_ptr[i]); level -= pred_dc; /* Quant */ - if (level < 0) - level = (level + (s->qscale >> 1))/(s->y_dc_scale); + if (level >= 0) + level = (level + (scale>>1))/scale; else - level = (level - (s->qscale >> 1))/(s->y_dc_scale); + level = (level - (scale>>1))/scale; /* AIC can change CBP */ if (level == 0 && s->block_last_index[i] == 0) s->block_last_index[i] = -1; - else if (level < -127) - level = -127; - else if (level > 127) - level = 127; - + + if(!s->modified_quant){ + if (level < -127) + level = -127; + else if (level > 127) + level = 127; + } + block[i][0] = level; /* Reconstruction */ - rec_intradc[i] = (s->y_dc_scale*level) + pred_dc; + rec_intradc[i] = scale*level + pred_dc; /* Oddify */ rec_intradc[i] |= 1; //if ((rec_intradc[i] % 2) == 0) @@ -1143,10 +1250,15 @@ void h263_encode_mb(MpegEncContext * s, /* Update AC/DC tables */ *dc_ptr[i] = rec_intradc[i]; + if (s->block_last_index[i] >= 0) + cbp |= 1 << (5 - i); + } + }else{ + for(i=0; i<6; i++) { + /* compute cbp */ + if (s->block_last_index[i] >= 1) + cbp |= 1 << (5 - i); } - /* compute cbp */ - if (s->block_last_index[i] >= li) - cbp |= 1 << (5 - i); } cbpc = cbp & 3; @@ -1197,6 +1309,94 @@ void h263_encode_mb(MpegEncContext * s, } } } +#endif + +void ff_h263_loop_filter(MpegEncContext * s){ + int qp_c; + const int linesize = s->linesize; + const int uvlinesize= s->uvlinesize; + const int xy = s->mb_y * s->mb_stride + s->mb_x; + uint8_t *dest_y = s->dest[0]; + uint8_t *dest_cb= s->dest[1]; + uint8_t *dest_cr= s->dest[2]; + +// if(s->pict_type==B_TYPE && !s->readable) return; + + /* + Diag Top + Left Center + */ + if(!IS_SKIP(s->current_picture.mb_type[xy])){ + qp_c= s->qscale; + s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c); + s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c); + }else + qp_c= 0; + + if(s->mb_y){ + int qp_dt, qp_t, qp_tc; + + if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride])) + qp_t=0; + else + qp_t= s->current_picture.qscale_table[xy-s->mb_stride]; + + if(qp_c) + qp_tc= qp_c; + else + qp_tc= qp_t; + + if(qp_tc){ + const int chroma_qp= s->chroma_qscale_table[qp_tc]; + s->dsp.h263_v_loop_filter(dest_y , linesize, qp_tc); + s->dsp.h263_v_loop_filter(dest_y+8, linesize, qp_tc); + + s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp); + s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp); + } + + if(qp_t) + s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_t); + + if(s->mb_x){ + if(qp_t || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride])) + qp_dt= qp_t; + else + qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride]; + + if(qp_dt){ + const int chroma_qp= s->chroma_qscale_table[qp_dt]; + s->dsp.h263_h_loop_filter(dest_y -8*linesize , linesize, qp_dt); + s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp); + s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp); + } + } + } + + if(qp_c){ + s->dsp.h263_h_loop_filter(dest_y +8, linesize, qp_c); + if(s->mb_y + 1 == s->mb_height) + s->dsp.h263_h_loop_filter(dest_y+8*linesize+8, linesize, qp_c); + } + + if(s->mb_x){ + int qp_lc; + if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1])) + qp_lc= qp_c; + else + qp_lc= s->current_picture.qscale_table[xy-1]; + + if(qp_lc){ + s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc); + if(s->mb_y + 1 == s->mb_height){ + const int chroma_qp= s->chroma_qscale_table[qp_lc]; + s->dsp.h263_h_loop_filter(dest_y +8* linesize, linesize, qp_lc); + s->dsp.h263_h_loop_filter(dest_cb , uvlinesize, chroma_qp); + s->dsp.h263_h_loop_filter(dest_cr , uvlinesize, chroma_qp); + } + } + } +} static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr) { @@ -1242,7 +1442,6 @@ static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr) *dc_val_ptr = &dc_val[x + y * wrap]; return pred_dc; } -#endif static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n) { @@ -1252,7 +1451,7 @@ static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n) /* find prediction */ if (n < 4) { x = 2 * s->mb_x + 1 + (n & 1); - y = 2 * s->mb_y + 1 + ((n & 2) >> 1); + y = 2 * s->mb_y + 1 + (n>> 1); wrap = s->mb_width * 2 + 2; dc_val = s->dc_val[0]; ac_val = s->ac_val[0][0]; @@ -1276,10 +1475,13 @@ static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n) c = dc_val[(x) + (y - 1) * wrap]; /* No prediction outside GOB boundary */ - if (s->first_slice_line && ((n < 2) || (n > 3))) - c = 1024; - pred_dc = 1024; + if(s->first_slice_line && n!=3){ + if(n!=2) c= 1024; + if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024; + } + if (s->ac_pred) { + pred_dc = 1024; if (s->h263_aic_dir) { /* left prediction */ if (a != 1024) { @@ -1314,8 +1516,8 @@ static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n) if (block[0] < 0) block[0] = 0; - else if (!(block[0] & 1)) - block[0]++; + else + block[0] |= 1; /* Update AC/DC tables */ dc_val[(x) + (y) * wrap] = block[0]; @@ -1338,9 +1540,9 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, wrap = s->block_wrap[0]; xy = s->block_index[block]; - mot_val = s->motion_val[xy]; + mot_val = s->current_picture.motion_val[0][xy]; - A = s->motion_val[xy - 1]; + A = s->current_picture.motion_val[0][xy - 1]; /* special case for first (slice) line */ if (s->first_slice_line && block<3) { // we cant just change some MVs to simulate that as we need them for the B frames (and ME) @@ -1348,8 +1550,8 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, if(block==0){ //most common case if(s->mb_x == s->resync_mb_x){ //rare *px= *py = 0; - }else if(s->mb_x + 1 == s->resync_mb_x){ //rare - C = s->motion_val[xy + off[block] - wrap]; + }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare + C = s->current_picture.motion_val[0][xy + off[block] - wrap]; if(s->mb_x==0){ *px = C[0]; *py = C[1]; @@ -1362,8 +1564,8 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, *py = A[1]; } }else if(block==1){ - if(s->mb_x + 1 == s->resync_mb_x){ //rare - C = s->motion_val[xy + off[block] - wrap]; + if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare + C = s->current_picture.motion_val[0][xy + off[block] - wrap]; *px = mid_pred(A[0], 0, C[0]); *py = mid_pred(A[1], 0, C[1]); }else{ @@ -1371,8 +1573,8 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, *py = A[1]; } }else{ /* block==2*/ - B = s->motion_val[xy - wrap]; - C = s->motion_val[xy + off[block] - wrap]; + B = s->current_picture.motion_val[0][xy - wrap]; + C = s->current_picture.motion_val[0][xy + off[block] - wrap]; if(s->mb_x == s->resync_mb_x) //rare A[0]=A[1]=0; @@ -1380,14 +1582,75 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, *py = mid_pred(A[1], B[1], C[1]); } } else { - B = s->motion_val[xy - wrap]; - C = s->motion_val[xy + off[block] - wrap]; + B = s->current_picture.motion_val[0][xy - wrap]; + C = s->current_picture.motion_val[0][xy + off[block] - wrap]; *px = mid_pred(A[0], B[0], C[0]); *py = mid_pred(A[1], B[1], C[1]); } return mot_val; } +// identical to above but with s->current_picture->motion_val, the above one will be removed, and this renamed to it +int16_t *h263_pred_motion2(MpegEncContext * s, int block, int dir, + int *px, int *py) +{ + int xy, wrap; + int16_t *A, *B, *C, (*mot_val)[2]; + static const int off[4]= {2, 1, 1, -1}; + + wrap = s->b8_stride; + xy = 2*(s->mb_x + s->mb_y * wrap); + + mot_val = s->current_picture.motion_val[dir] + xy; + + A = mot_val[ - 1]; + /* special case for first (slice) line */ + if (s->first_slice_line && block<3) { + // we cant just change some MVs to simulate that as we need them for the B frames (and ME) + // and if we ever support non rectangular objects than we need to do a few ifs here anyway :( + if(block==0){ //most common case + if(s->mb_x == s->resync_mb_x){ //rare + *px= *py = 0; + }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare + C = mot_val[off[block] - wrap]; + if(s->mb_x==0){ + *px = C[0]; + *py = C[1]; + }else{ + *px = mid_pred(A[0], 0, C[0]); + *py = mid_pred(A[1], 0, C[1]); + } + }else{ + *px = A[0]; + *py = A[1]; + } + }else if(block==1){ + if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare + C = mot_val[off[block] - wrap]; + *px = mid_pred(A[0], 0, C[0]); + *py = mid_pred(A[1], 0, C[1]); + }else{ + *px = A[0]; + *py = A[1]; + } + }else{ /* block==2*/ + B = mot_val[ - wrap]; + C = mot_val[off[block] - wrap]; + if(s->mb_x == s->resync_mb_x) //rare + A[0]=A[1]=0; + + *px = mid_pred(A[0], B[0], C[0]); + *py = mid_pred(A[1], B[1], C[1]); + } + } else { + B = mot_val[ - wrap]; + C = mot_val[off[block] - wrap]; + *px = mid_pred(A[0], B[0], C[0]); + *py = mid_pred(A[1], B[1], C[1]); + } + return *mot_val; +} + #ifdef CONFIG_ENCODERS static void h263_encode_motion(MpegEncContext * s, int val, int f_code) { @@ -1714,9 +1977,15 @@ void h263_encode_init(MpegEncContext *s) break; case CODEC_ID_H263P: - s->fcode_tab= umv_fcode_tab; - s->min_qcoeff= -127; - s->max_qcoeff= 127; + if(s->umvplus) + s->fcode_tab= umv_fcode_tab; + if(s->modified_quant){ + s->min_qcoeff= -2047; + s->max_qcoeff= 2047; + }else{ + s->min_qcoeff= -127; + s->max_qcoeff= 127; + } break; //Note for mpeg4 & h263 the dc-scale table will be set per frame as needed later case CODEC_ID_FLV1: @@ -1765,12 +2034,50 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n) if (level == 128) //FIXME check rv10 put_bits(&s->pb, 8, 0xff); else - put_bits(&s->pb, 8, level & 0xff); + put_bits(&s->pb, 8, level); i = 1; } else { i = 0; if (s->h263_aic && s->mb_intra) rl = &rl_intra_aic; + + if(s->alt_inter_vlc && !s->mb_intra){ + int aic_vlc_bits=0; + int inter_vlc_bits=0; + int wrong_pos=-1; + int aic_code; + + last_index = s->block_last_index[n]; + last_non_zero = i - 1; + for (; i <= last_index; i++) { + j = s->intra_scantable.permutated[i]; + level = block[j]; + if (level) { + run = i - last_non_zero - 1; + last = (i == last_index); + + if(level<0) level= -level; + + code = get_rl_index(rl, last, run, level); + aic_code = get_rl_index(&rl_intra_aic, last, run, level); + inter_vlc_bits += rl->table_vlc[code][1]+1; + aic_vlc_bits += rl_intra_aic.table_vlc[aic_code][1]+1; + + if (code == rl->n) { + inter_vlc_bits += 1+6+8-1; + } + if (aic_code == rl_intra_aic.n) { + aic_vlc_bits += 1+6+8-1; + wrong_pos += run + 1; + }else + wrong_pos += wrong_run[aic_code]; + last_non_zero = i; + } + } + i = 0; + if(aic_vlc_bits < inter_vlc_bits && wrong_pos > 63) + rl = &rl_intra_aic; + } } /* AC coefs */ @@ -1797,7 +2104,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n) assert(slevel != 0); - if(slevel < 128 && slevel > -128) + if(level < 128) put_bits(&s->pb, 8, slevel & 0xff); else{ put_bits(&s->pb, 8, 128); @@ -1805,8 +2112,7 @@ static void h263_encode_block(MpegEncContext * s, DCTELEM * block, int n) put_bits(&s->pb, 6, (slevel>>5)&0x3f); } }else{ - if(slevel < 64 && slevel > -64) { - /* 7-bit level */ + if(level < 64) { // 7-bit level put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, last); put_bits(&s->pb, 6, run); @@ -1848,8 +2154,8 @@ void ff_mpeg4_stuffing(PutBitContext * pbc) void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){ int time_div, time_mod; - if(s->current_picture.pts) - s->time= (s->current_picture.pts*s->time_increment_resolution + 500*1000)/(1000*1000); + if(s->current_picture_ptr->pts) + s->time= (s->current_picture_ptr->pts*s->time_increment_resolution + 500*1000)/(1000*1000); else s->time= av_rescale(picture_number*(int64_t)s->avctx->frame_rate_base, s->time_increment_resolution, s->avctx->frame_rate); time_div= s->time/s->time_increment_resolution; @@ -1867,11 +2173,18 @@ void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){ static void mpeg4_encode_gop_header(MpegEncContext * s){ int hours, minutes, seconds; + int64_t time; put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); - seconds= s->time/s->time_increment_resolution; + if(s->current_picture_ptr->pts && s->reordered_input_picture[1]){ + time= FFMIN(s->reordered_input_picture[1]->pts, s->current_picture_ptr->pts); + time= (time*s->time_increment_resolution + 500*1000)/(1000*1000); + }else + time= av_rescale(s->current_picture_ptr->coded_picture_number*(int64_t)s->avctx->frame_rate_base, s->time_increment_resolution, s->avctx->frame_rate); + + seconds= time/s->time_increment_resolution; minutes= seconds/60; seconds %= 60; hours= minutes/60; minutes %= 60; hours%=24; @@ -1881,8 +2194,10 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){ put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, seconds); - put_bits(&s->pb, 1, 0); //closed gov == NO + put_bits(&s->pb, 1, !!(s->flags&CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); //broken link == NO + + s->last_time_base= time / s->time_increment_resolution; ff_mpeg4_stuffing(&s->pb); } @@ -2058,7 +2373,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number) } put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */ if(!s->progressive_sequence){ - put_bits(&s->pb, 1, s->top_field_first); + put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); put_bits(&s->pb, 1, s->alternate_scan); } //FIXME sprite stuff @@ -2078,19 +2393,20 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number) #endif //CONFIG_ENCODERS /** - * change qscale by given dquant and update qscale dependant variables. + * set qscale and update qscale dependant variables. */ -static void change_qscale(MpegEncContext * s, int dquant) +void ff_set_qscale(MpegEncContext * s, int qscale) { - s->qscale += dquant; - - if (s->qscale < 1) - s->qscale = 1; - else if (s->qscale > 31) - s->qscale = 31; + if (qscale < 1) + qscale = 1; + else if (qscale > 31) + qscale = 31; + + s->qscale = qscale; + s->chroma_qscale= s->chroma_qscale_table[qscale]; - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; - s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + s->y_dc_scale= s->y_dc_scale_table[ qscale ]; + s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ]; } /** @@ -2377,7 +2693,6 @@ static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n #endif } -#if 0 static inline int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc, uint8_t *scan_table) { @@ -2431,7 +2746,6 @@ static inline int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, in return len; } -#endif #endif @@ -2446,6 +2760,8 @@ static VLC mv_vlc; static VLC dc_lum, dc_chrom; static VLC sprite_trajectory; static VLC mb_type_b_vlc; +static VLC h263_mbtype_b_vlc; +static VLC cbpc_b_vlc; void init_vlc_rl(RLTable *rl) { @@ -2538,6 +2854,12 @@ void h263_decode_init_vlc(MpegEncContext *s) init_vlc(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4, &mb_type_b_tab[0][1], 2, 1, &mb_type_b_tab[0][0], 2, 1); + init_vlc(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15, + &h263_mbtype_b_tab[0][1], 2, 1, + &h263_mbtype_b_tab[0][0], 2, 1); + init_vlc(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4, + &cbpc_b_tab[0][1], 2, 1, + &cbpc_b_tab[0][0], 2, 1); } } @@ -2553,13 +2875,38 @@ int ff_h263_get_gob_height(MpegEncContext *s){ return 4; } +int ff_h263_decode_mba(MpegEncContext *s) +{ + int i, mb_pos; + + for(i=0; i<6; i++){ + if(s->mb_num-1 <= ff_mba_max[i]) break; + } + mb_pos= get_bits(&s->gb, ff_mba_length[i]); + s->mb_x= mb_pos % s->mb_width; + s->mb_y= mb_pos / s->mb_width; + + return mb_pos; +} + +void ff_h263_encode_mba(MpegEncContext *s) +{ + int i, mb_pos; + + for(i=0; i<6; i++){ + if(s->mb_num-1 <= ff_mba_max[i]) break; + } + mb_pos= s->mb_x + s->mb_width*s->mb_y; + put_bits(&s->pb, ff_mba_length[i], mb_pos); +} + /** - * decodes the group of blocks header. + * decodes the group of blocks header or slice header. * @return <0 if an error occured */ static int h263_decode_gob_header(MpegEncContext *s) { - unsigned int val, gfid; + unsigned int val, gfid, gob_number; int left; /* Check for GOB Start Code */ @@ -2577,32 +2924,43 @@ static int h263_decode_gob_header(MpegEncContext *s) if(left<=13) return -1; -#ifdef DEBUG - fprintf(stderr,"\nGOB Start Code at MB %d\n", (s->mb_y * s->mb_width) + s->mb_x); -#endif - s->gob_number = get_bits(&s->gb, 5); /* GN */ - gfid = get_bits(&s->gb, 2); /* GFID */ - s->qscale = get_bits(&s->gb, 5); /* GQUANT */ - if(s->qscale==0) - return -1; - s->mb_x= 0; - s->mb_y= s->gob_index* s->gob_number; + if(s->h263_slice_structured){ + if(get_bits1(&s->gb)==0) + return -1; + + ff_h263_decode_mba(s); + + if(s->mb_num > 1583) + if(get_bits1(&s->gb)==0) + return -1; + + s->qscale = get_bits(&s->gb, 5); /* SQUANT */ + if(get_bits1(&s->gb)==0) + return -1; + gfid = get_bits(&s->gb, 2); /* GFID */ + }else{ + gob_number = get_bits(&s->gb, 5); /* GN */ + s->mb_x= 0; + s->mb_y= s->gob_index* gob_number; + gfid = get_bits(&s->gb, 2); /* GFID */ + s->qscale = get_bits(&s->gb, 5); /* GQUANT */ + } + if(s->mb_y >= s->mb_height) return -1; -#ifdef DEBUG - fprintf(stderr, "\nGN: %u GFID: %u Quant: %u\n", s->gob_number, gfid, s->qscale); -#endif + + if(s->qscale==0) + return -1; + return 0; } -#if 0 static inline void memsetw(short *tab, int val, int n) { int i; for(i=0;i<n;i++) tab[i] = val; } -#endif #ifdef CONFIG_ENCODERS @@ -2659,7 +3017,6 @@ void ff_mpeg4_encode_video_packet_header(MpegEncContext *s) { int mb_num_bits= av_log2(s->mb_num - 1) + 1; - ff_mpeg4_stuffing(&s->pb); put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0); put_bits(&s->pb, 1, 1); @@ -2725,7 +3082,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) } if(len!=ff_mpeg4_get_video_packet_prefix_length(s)){ - printf("marker does not match f_code\n"); + av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n"); return -1; } @@ -2736,7 +3093,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) mb_num= get_bits(&s->gb, mb_num_bits); if(mb_num>=s->mb_num){ - fprintf(stderr, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num); + av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num); return -1; } if(s->pict_type == B_TYPE){ @@ -2750,7 +3107,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) if(s->shape != BIN_ONLY_SHAPE){ int qscale= get_bits(&s->gb, s->quant_precision); if(qscale) - s->qscale= qscale; + s->chroma_qscale=s->qscale= qscale; } if(s->shape == RECT_SHAPE){ @@ -2775,7 +3132,7 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) //FIXME dont just ignore everything if(s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ mpeg4_decode_sprite_trajectory(s); - fprintf(stderr, "untested\n"); + av_log(s->avctx, AV_LOG_ERROR, "untested\n"); } //FIXME reduced res stuff here @@ -2783,13 +3140,13 @@ static int mpeg4_decode_video_packet_header(MpegEncContext *s) if (s->pict_type != I_TYPE) { int f_code = get_bits(&s->gb, 3); /* fcode_for */ if(f_code==0){ - printf("Error, video packet header damaged (f_code=0)\n"); + av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n"); } } if (s->pict_type == B_TYPE) { int b_code = get_bits(&s->gb, 3); if(b_code==0){ - printf("Error, video packet header damaged (b_code=0)\n"); + av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n"); } } } @@ -2952,7 +3309,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ do{ cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0){ - fprintf(stderr, "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } }while(cbpc == 8); @@ -2962,7 +3319,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ s->mb_intra = 1; if(cbpc & 4) { - change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); + ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } s->current_picture.qscale_table[xy]= s->qscale; @@ -2971,7 +3328,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ int dc_pred_dir; int dc= mpeg4_decode_dc(s, i, &dc_pred_dir); if(dc < 0){ - fprintf(stderr, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } dir<<=1; @@ -2980,7 +3337,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ s->pred_dir_table[xy]= dir; }else{ /* P/S_TYPE */ int mx, my, pred_x, pred_y, bits; - int16_t * const mot_val= s->motion_val[s->block_index[0]]; + int16_t * const mot_val= s->current_picture.motion_val[0][s->block_index[0]]; const int stride= s->block_wrap[0]*2; // do{ //FIXME @@ -3011,7 +3368,7 @@ static int mpeg4_decode_partition_a(MpegEncContext *s){ cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0){ - fprintf(stderr, "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "cbpc corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } // }while(cbpc == 20); @@ -3107,7 +3464,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ int ac_pred= get_bits1(&s->gb); int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ - fprintf(stderr, "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -3120,12 +3477,12 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ - fprintf(stderr, "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } if(s->cbp_table[xy] & 8) { - change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); + ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } s->current_picture.qscale_table[xy]= s->qscale; @@ -3133,7 +3490,7 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ int dc_pred_dir; int dc= mpeg4_decode_dc(s, i, &dc_pred_dir); if(dc < 0){ - fprintf(stderr, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } dir<<=1; @@ -3150,12 +3507,12 @@ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count){ int cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ - fprintf(stderr, "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } if(s->cbp_table[xy] & 8) { - change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); + ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } s->current_picture.qscale_table[xy]= s->qscale; @@ -3187,7 +3544,7 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s) } if(s->resync_mb_x + s->resync_mb_y*s->mb_width + mb_num > s->mb_num){ - fprintf(stderr, "slice below monitor ...\n"); + av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n"); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error); return -1; } @@ -3196,12 +3553,12 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s) if(s->pict_type==I_TYPE){ if(get_bits_long(&s->gb, 19)!=DC_MARKER){ - fprintf(stderr, "marker missing after first I partition at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "marker missing after first I partition at %d %d\n", s->mb_x, s->mb_y); return -1; } }else{ if(get_bits(&s->gb, 17)!=MOTION_MARKER){ - fprintf(stderr, "marker missing after first P partition at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "marker missing after first P partition at %d %d\n", s->mb_x, s->mb_y); return -1; } } @@ -3232,16 +3589,14 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) cbp = s->cbp_table[xy]; if(s->current_picture.qscale_table[xy] != s->qscale){ - s->qscale= s->current_picture.qscale_table[xy]; - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; - s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + ff_set_qscale(s, s->current_picture.qscale_table[xy] ); } if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) { int i; for(i=0; i<4; i++){ - s->mv[0][i][0] = s->motion_val[ s->block_index[i] ][0]; - s->mv[0][i][1] = s->motion_val[ s->block_index[i] ][1]; + s->mv[0][i][0] = s->current_picture.motion_val[0][ s->block_index[i] ][0]; + s->mv[0][i][1] = s->current_picture.motion_val[0][ s->block_index[i] ][1]; } s->mb_intra = IS_INTRA(mb_type); @@ -3280,7 +3635,7 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) /* decode each block */ for (i = 0; i < 6; i++) { if(mpeg4_decode_block(s, block[i], i, cbp&32, s->mb_intra, s->rvlc) < 0){ - fprintf(stderr, "texture corrupted at %d %d %d\n", s->mb_x, s->mb_y, s->mb_intra); + av_log(s->avctx, AV_LOG_ERROR, "texture corrupted at %d %d %d\n", s->mb_x, s->mb_y, s->mb_intra); return -1; } cbp+=cbp; @@ -3305,46 +3660,403 @@ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) } } +/** + * read the next MVs for OBMC. yes this is a ugly hack, feel free to send a patch :) + */ +static void preview_obmc(MpegEncContext *s){ + GetBitContext gb= s->gb; + + int cbpc, i, pred_x, pred_y, mx, my; + int16_t *mot_val; + const int xy= s->mb_x + 1 + s->mb_y * s->mb_stride; + const int stride= s->block_wrap[0]*2; + + for(i=0; i<4; i++) + s->block_index[i]+= 2; + for(i=4; i<6; i++) + s->block_index[i]+= 1; + s->mb_x++; + + assert(s->pict_type == P_TYPE); + + do{ + if (get_bits1(&s->gb)) { + /* skip mb */ + mot_val = s->current_picture.motion_val[0][ s->block_index[0] ]; + mot_val[0 ]= mot_val[2 ]= + mot_val[0+stride]= mot_val[2+stride]= 0; + mot_val[1 ]= mot_val[3 ]= + mot_val[1+stride]= mot_val[3+stride]= 0; + + s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + goto end; + } + cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); + }while(cbpc == 20); + + if(cbpc & 4){ + s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + }else{ + get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); + if (cbpc & 8) { + if(s->modified_quant){ + if(get_bits1(&s->gb)) skip_bits(&s->gb, 1); + else skip_bits(&s->gb, 5); + }else + skip_bits(&s->gb, 2); + } + + if ((cbpc & 16) == 0) { + s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + /* 16x16 motion prediction */ + mot_val= h263_pred_motion(s, 0, &pred_x, &pred_y); + if (s->umvplus) + mx = h263p_decode_umotion(s, pred_x); + else + mx = h263_decode_motion(s, pred_x, 1); + + if (s->umvplus) + my = h263p_decode_umotion(s, pred_y); + else + my = h263_decode_motion(s, pred_y, 1); + + mot_val[0 ]= mot_val[2 ]= + mot_val[0+stride]= mot_val[2+stride]= mx; + mot_val[1 ]= mot_val[3 ]= + mot_val[1+stride]= mot_val[3+stride]= my; + } else { + s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; + for(i=0;i<4;i++) { + mot_val = h263_pred_motion(s, i, &pred_x, &pred_y); + if (s->umvplus) + mx = h263p_decode_umotion(s, pred_x); + else + mx = h263_decode_motion(s, pred_x, 1); + + if (s->umvplus) + my = h263p_decode_umotion(s, pred_y); + else + my = h263_decode_motion(s, pred_y, 1); + if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) + skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ + mot_val[0] = mx; + mot_val[1] = my; + } + } + } +end: + + for(i=0; i<4; i++) + s->block_index[i]-= 2; + for(i=4; i<6; i++) + s->block_index[i]-= 1; + s->mb_x--; + + s->gb= gb; +} + +static void h263_decode_dquant(MpegEncContext *s){ + static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; + + if(s->modified_quant){ + if(get_bits1(&s->gb)) + s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ]; + else + s->qscale= get_bits(&s->gb, 5); + }else + s->qscale += quant_tab[get_bits(&s->gb, 2)]; + ff_set_qscale(s, s->qscale); +} + int ff_h263_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; int16_t *mot_val; - static int8_t quant_tab[4] = { -1, -2, 1, 2 }; const int xy= s->mb_x + s->mb_y * s->mb_stride; - if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) { - do{ - if (get_bits1(&s->gb)) { - /* skip mb */ - s->mb_intra = 0; - for(i=0;i<6;i++) - s->block_last_index[i] = -1; - s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_16X16; - if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ - s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; - s->mcsel=1; - s->mv[0][0][0]= get_amv(s, 0); - s->mv[0][0][1]= get_amv(s, 1); - - s->mb_skiped = 0; - }else{ + assert(!s->h263_pred); + + if (s->pict_type == P_TYPE) { + do{ + if (get_bits1(&s->gb)) { + /* skip mb */ + s->mb_intra = 0; + for(i=0;i<6;i++) + s->block_last_index[i] = -1; + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_16X16; s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; - s->mcsel=0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - s->mb_skiped = 1; + s->mb_skiped = !(s->obmc | s->loop_filter); + goto end; } - goto end; + cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); + //fprintf(stderr, "\tCBPC: %d", cbpc); + if (cbpc < 0){ + av_log(s->avctx, AV_LOG_ERROR, "cbpc damaged at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + }while(cbpc == 20); + + dquant = cbpc & 8; + s->mb_intra = ((cbpc & 4) != 0); + if (s->mb_intra) goto intra; + + cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); + + if(s->alt_inter_vlc==0 || (cbpc & 3)!=3) + cbpy ^= 0xF; + + cbp = (cbpc & 3) | (cbpy << 2); + if (dquant) { + h263_decode_dquant(s); } - cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); - //fprintf(stderr, "\tCBPC: %d", cbpc); - if (cbpc < 0){ - fprintf(stderr, "cbpc damaged at %d %d\n", s->mb_x, s->mb_y); + + s->mv_dir = MV_DIR_FORWARD; + if ((cbpc & 16) == 0) { + s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0; + /* 16x16 motion prediction */ + s->mv_type = MV_TYPE_16X16; + h263_pred_motion(s, 0, &pred_x, &pred_y); + if (s->umvplus) + mx = h263p_decode_umotion(s, pred_x); + else + mx = h263_decode_motion(s, pred_x, 1); + + if (mx >= 0xffff) + return -1; + + if (s->umvplus) + my = h263p_decode_umotion(s, pred_y); + else + my = h263_decode_motion(s, pred_y, 1); + + if (my >= 0xffff) + return -1; + s->mv[0][0][0] = mx; + s->mv[0][0][1] = my; + + if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) + skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ + } else { + s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; + s->mv_type = MV_TYPE_8X8; + for(i=0;i<4;i++) { + mot_val = h263_pred_motion(s, i, &pred_x, &pred_y); + if (s->umvplus) + mx = h263p_decode_umotion(s, pred_x); + else + mx = h263_decode_motion(s, pred_x, 1); + if (mx >= 0xffff) + return -1; + + if (s->umvplus) + my = h263p_decode_umotion(s, pred_y); + else + my = h263_decode_motion(s, pred_y, 1); + if (my >= 0xffff) + return -1; + s->mv[0][i][0] = mx; + s->mv[0][i][1] = my; + if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) + skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ + mot_val[0] = mx; + mot_val[1] = my; + } + } + + if(s->obmc){ + if(s->pict_type == P_TYPE && s->mb_x+1<s->mb_width) + preview_obmc(s); + } + } else if(s->pict_type==B_TYPE) { + int mb_type; + const int stride= s->b8_stride; + int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ]; + int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ]; +// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride; + + //FIXME ugly + mot_val0[0 ]= mot_val0[2 ]= mot_val0[0+2*stride]= mot_val0[2+2*stride]= + mot_val0[1 ]= mot_val0[3 ]= mot_val0[1+2*stride]= mot_val0[3+2*stride]= + mot_val1[0 ]= mot_val1[2 ]= mot_val1[0+2*stride]= mot_val1[2+2*stride]= + mot_val1[1 ]= mot_val1[3 ]= mot_val1[1+2*stride]= mot_val1[3+2*stride]= 0; + + do{ + mb_type= get_vlc2(&s->gb, h263_mbtype_b_vlc.table, H263_MBTYPE_B_VLC_BITS, 2); + if (mb_type < 0){ + av_log(s->avctx, AV_LOG_ERROR, "b mb_type damaged at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + + mb_type= h263_mb_type_b_map[ mb_type ]; + }while(!mb_type); + + s->mb_intra = IS_INTRA(mb_type); + if(HAS_CBP(mb_type)){ + cbpc = get_vlc2(&s->gb, cbpc_b_vlc.table, CBPC_B_VLC_BITS, 1); + if(s->mb_intra){ + dquant = IS_QUANT(mb_type); + goto intra; + } + + cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); + + if (cbpy < 0){ + av_log(s->avctx, AV_LOG_ERROR, "b cbpy damaged at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + + if(s->alt_inter_vlc==0 || (cbpc & 3)!=3) + cbpy ^= 0xF; + + cbp = (cbpc & 3) | (cbpy << 2); + }else + cbp=0; + + assert(!s->mb_intra); + + if(IS_QUANT(mb_type)){ + h263_decode_dquant(s); + } + + if(IS_DIRECT(mb_type)){ + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; + mb_type |= ff_mpeg4_set_direct_mv(s, 0, 0); + }else{ + s->mv_dir = 0; + s->mv_type= MV_TYPE_16X16; +//FIXME UMV + + if(USES_LIST(mb_type, 0)){ + int16_t *mot_val= h263_pred_motion2(s, 0, 0, &mx, &my); + s->mv_dir = MV_DIR_FORWARD; + + mx = h263_decode_motion(s, mx, 1); + my = h263_decode_motion(s, my, 1); + + s->mv[0][0][0] = mx; + s->mv[0][0][1] = my; + mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx; + mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my; + } + + if(USES_LIST(mb_type, 1)){ + int16_t *mot_val= h263_pred_motion2(s, 0, 1, &mx, &my); + s->mv_dir |= MV_DIR_BACKWARD; + + mx = h263_decode_motion(s, mx, 1); + my = h263_decode_motion(s, my, 1); + + s->mv[1][0][0] = mx; + s->mv[1][0][1] = my; + mot_val[0 ]= mot_val[2 ]= mot_val[0+2*stride]= mot_val[2+2*stride]= mx; + mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my; + } + } + + s->current_picture.mb_type[xy]= mb_type; + } else { /* I-Frame */ + do{ + cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); + if (cbpc < 0){ + av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + }while(cbpc == 8); + + dquant = cbpc & 4; + s->mb_intra = 1; +intra: + s->current_picture.mb_type[xy]= MB_TYPE_INTRA; + if (s->h263_aic) { + s->ac_pred = get_bits1(&s->gb); + if(s->ac_pred){ + s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED; + + s->h263_aic_dir = get_bits1(&s->gb); + } + }else + s->ac_pred = 0; + + cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); + if(cbpy<0){ + av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + cbp = (cbpc & 3) | (cbpy << 2); + if (dquant) { + h263_decode_dquant(s); + } + } + + /* decode each block */ + for (i = 0; i < 6; i++) { + if (h263_decode_block(s, block[i], i, cbp&32) < 0) return -1; + cbp+=cbp; + } +end: + + /* per-MB end of slice check */ + { + int v= show_bits(&s->gb, 16); + + if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){ + v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits; } - }while(cbpc == 20); + + if(v==0) + return SLICE_END; + } + + return SLICE_OK; +} + +int ff_mpeg4_decode_mb(MpegEncContext *s, + DCTELEM block[6][64]) +{ + int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; + int16_t *mot_val; + static int8_t quant_tab[4] = { -1, -2, 1, 2 }; + const int xy= s->mb_x + s->mb_y * s->mb_stride; + + assert(s->h263_pred); + + if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) { + do{ + if (get_bits1(&s->gb)) { + /* skip mb */ + s->mb_intra = 0; + for(i=0;i<6;i++) + s->block_last_index[i] = -1; + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_16X16; + if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ + s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; + s->mcsel=1; + s->mv[0][0][0]= get_amv(s, 0); + s->mv[0][0][1]= get_amv(s, 1); + + s->mb_skiped = 0; + }else{ + s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; + s->mcsel=0; + s->mv[0][0][0] = 0; + s->mv[0][0][1] = 0; + s->mb_skiped = 1; + } + goto end; + } + cbpc = get_vlc2(&s->gb, inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); + //fprintf(stderr, "\tCBPC: %d", cbpc); + if (cbpc < 0){ + av_log(s->avctx, AV_LOG_ERROR, "cbpc damaged at %d %d\n", s->mb_x, s->mb_y); + return -1; + } + }while(cbpc == 20); dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); @@ -3353,10 +4065,11 @@ int ff_h263_decode_mb(MpegEncContext *s, if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE && (cbpc & 16) == 0) s->mcsel= get_bits1(&s->gb); else s->mcsel= 0; - cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); - cbp = (cbpc & 3) | ((cbpy ^ 0xf) << 2); + cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F; + + cbp = (cbpc & 3) | (cbpy << 2); if (dquant) { - change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); + ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } if((!s->progressive_sequence) && (cbp || (s->workaround_bugs&FF_BUG_XVID_ILACE))) s->interlaced_dct= get_bits1(&s->gb); @@ -3398,49 +4111,32 @@ int ff_h263_decode_mb(MpegEncContext *s, /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; h263_pred_motion(s, 0, &pred_x, &pred_y); - if (s->umvplus) - mx = h263p_decode_umotion(s, pred_x); - else - mx = h263_decode_motion(s, pred_x, s->f_code); + mx = h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return -1; - if (s->umvplus) - my = h263p_decode_umotion(s, pred_y); - else - my = h263_decode_motion(s, pred_y, s->f_code); + my = h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return -1; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; - - if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) - skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ } } else { s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for(i=0;i<4;i++) { mot_val = h263_pred_motion(s, i, &pred_x, &pred_y); - if (s->umvplus) - mx = h263p_decode_umotion(s, pred_x); - else - mx = h263_decode_motion(s, pred_x, s->f_code); + mx = h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return -1; - if (s->umvplus) - my = h263p_decode_umotion(s, pred_y); - else - my = h263_decode_motion(s, pred_y, s->f_code); + my = h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return -1; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; - if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1) - skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */ mot_val[0] = mx; mot_val[1] = my; } @@ -3488,7 +4184,7 @@ int ff_h263_decode_mb(MpegEncContext *s, modb2= get_bits1(&s->gb); mb_type= get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1); if(mb_type<0){ - printf("illegal MB_type\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n"); return -1; } mb_type= mb_type_b_map[ mb_type ]; @@ -3497,7 +4193,7 @@ int ff_h263_decode_mb(MpegEncContext *s, if ((!IS_DIRECT(mb_type)) && cbp) { if(get_bits1(&s->gb)){ - change_qscale(s, get_bits1(&s->gb)*4 - 2); + ff_set_qscale(s, s->qscale + get_bits1(&s->gb)*4 - 2); } } @@ -3584,7 +4280,7 @@ int ff_h263_decode_mb(MpegEncContext *s, do{ cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0){ - fprintf(stderr, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } }while(cbpc == 8); @@ -3592,61 +4288,39 @@ int ff_h263_decode_mb(MpegEncContext *s, dquant = cbpc & 4; s->mb_intra = 1; intra: - s->current_picture.mb_type[xy]= MB_TYPE_INTRA; - if (s->h263_pred || s->h263_aic) { - s->ac_pred = get_bits1(&s->gb); - if(s->ac_pred){ - s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED; - - if (s->h263_aic) - s->h263_aic_dir = get_bits1(&s->gb); - } - }else - s->ac_pred = 0; + s->ac_pred = get_bits1(&s->gb); + if(s->ac_pred) + s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED; + else + s->current_picture.mb_type[xy]= MB_TYPE_INTRA; cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ - fprintf(stderr, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } cbp = (cbpc & 3) | (cbpy << 2); if (dquant) { - change_qscale(s, quant_tab[get_bits(&s->gb, 2)]); + ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); } if(!s->progressive_sequence) s->interlaced_dct= get_bits1(&s->gb); /* decode each block */ - if (s->h263_pred) { - for (i = 0; i < 6; i++) { - if (mpeg4_decode_block(s, block[i], i, cbp&32, 1, 0) < 0) - return -1; - cbp+=cbp; - } - } else { - for (i = 0; i < 6; i++) { - if (h263_decode_block(s, block[i], i, cbp&32) < 0) - return -1; - cbp+=cbp; - } + for (i = 0; i < 6; i++) { + if (mpeg4_decode_block(s, block[i], i, cbp&32, 1, 0) < 0) + return -1; + cbp+=cbp; } goto end; } /* decode each block */ - if (s->h263_pred) { - for (i = 0; i < 6; i++) { - if (mpeg4_decode_block(s, block[i], i, cbp&32, 0, 0) < 0) - return -1; - cbp+=cbp; - } - } else { - for (i = 0; i < 6; i++) { - if (h263_decode_block(s, block[i], i, cbp&32) < 0) - return -1; - cbp+=cbp; - } + for (i = 0; i < 6; i++) { + if (mpeg4_decode_block(s, block[i], i, cbp&32, 0, 0) < 0) + return -1; + cbp+=cbp; } end: @@ -3658,15 +4332,6 @@ end: return SLICE_OK; return SLICE_END; } - }else{ - int v= show_bits(&s->gb, 16); - - if(get_bits_count(&s->gb) + 16 > s->gb.size_in_bits){ - v>>= get_bits_count(&s->gb) + 16 - s->gb.size_in_bits; - } - - if(v==0) - return SLICE_END; } return SLICE_OK; @@ -3741,6 +4406,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, int code, level, i, j, last, run; RLTable *rl = &rl_inter; const uint8_t *scan_table; + GetBitContext gb= s->gb; scan_table = s->intra_scantable.permutated; if (s->h263_aic && s->mb_intra) { @@ -3754,7 +4420,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, } } else if (s->mb_intra) { /* DC coef */ - if(s->h263_rv10){ + if(s->codec_id == CODEC_ID_RV10){ if (s->rv10_version == 3 && s->pict_type == I_TYPE) { int component, diff; component = (n <= 3 ? 0 : n - 4 + 1); @@ -3775,8 +4441,9 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, }else{ level = get_bits(&s->gb, 8); if((level&0x7F) == 0){ - fprintf(stderr, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y); - return -1; + av_log(s->avctx, AV_LOG_ERROR, "illegal dc %d at %d %d\n", level, s->mb_x, s->mb_y); + if(s->error_resilience >= FF_ER_COMPLIANT) + return -1; } if (level == 255) level = 128; @@ -3792,11 +4459,11 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, s->block_last_index[n] = i - 1; return 0; } - +retry: for(;;) { code = get_vlc2(&s->gb, rl->vlc.table, TEX_VLC_BITS, 2); if (code < 0){ - fprintf(stderr, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "illegal ac vlc code at %dx%d\n", s->mb_x, s->mb_y); return -1; } if (code == rl->n) { @@ -3815,7 +4482,7 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, run = get_bits(&s->gb, 6); level = (int8_t)get_bits(&s->gb, 8); if(level == -128){ - if (s->h263_rv10) { + if (s->codec_id == CODEC_ID_RV10) { /* XXX: should patch encoder too */ level = get_sbits(&s->gb, 12); }else{ @@ -3833,7 +4500,15 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block, } i += run; if (i >= 64){ - fprintf(stderr, "run overflow at %dx%d\n", s->mb_x, s->mb_y); + if(s->alt_inter_vlc && rl == &rl_inter && !s->mb_intra){ + //looks like a hack but no, its the way its supposed to work ... + rl = &rl_intra_aic; + i = 0; + s->gb= gb; + memset(block, 0, sizeof(DCTELEM)*64); + goto retry; + } + av_log(s->avctx, AV_LOG_ERROR, "run overflow at %dx%d\n", s->mb_x, s->mb_y); return -1; } j = scan_table[i]; @@ -3867,7 +4542,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) else code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1); if (code < 0 || code > 9 /* && s->nbit<9 */){ - fprintf(stderr, "illegal dc vlc\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n"); return -1; } if (code == 0) { @@ -3889,7 +4564,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) if (code > 8){ if(get_bits1(&s->gb)==0){ /* marker */ if(s->error_resilience>=2){ - fprintf(stderr, "dc marker bit missing\n"); + av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n"); return -1; } } @@ -3899,7 +4574,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) level += pred; if (level < 0){ if(s->error_resilience>=3){ - fprintf(stderr, "dc<0 at %dx%d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y); return -1; } level = 0; @@ -3914,7 +4589,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) if(s->error_resilience>=3){ if(*dc_val > 2048 + s->y_dc_scale + s->c_dc_scale){ - fprintf(stderr, "dc overflow at %dx%d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "dc overflow at %dx%d\n", s->mb_x, s->mb_y); return -1; } } @@ -4013,7 +4688,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, /* escape */ if(rvlc){ if(SHOW_UBITS(re, &s->gb, 1)==0){ - fprintf(stderr, "1. marker bit missing in rvlc esc\n"); + av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in rvlc esc\n"); return -1; }; SKIP_CACHE(re, &s->gb, 1); @@ -4023,14 +4698,14 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, UPDATE_CACHE(re, &s->gb); if(SHOW_UBITS(re, &s->gb, 1)==0){ - fprintf(stderr, "2. marker bit missing in rvlc esc\n"); + av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in rvlc esc\n"); return -1; }; SKIP_CACHE(re, &s->gb, 1); level= SHOW_UBITS(re, &s->gb, 11); SKIP_CACHE(re, &s->gb, 11); if(SHOW_UBITS(re, &s->gb, 5)!=0x10){ - fprintf(stderr, "reverse esc missing\n"); + av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n"); return -1; }; SKIP_CACHE(re, &s->gb, 5); @@ -4060,14 +4735,14 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, level= SHOW_SBITS(re, &s->gb, 12); LAST_SKIP_BITS(re, &s->gb, 12); }else{ if(SHOW_UBITS(re, &s->gb, 1)==0){ - fprintf(stderr, "1. marker bit missing in 3. esc\n"); + av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in 3. esc\n"); return -1; }; SKIP_CACHE(re, &s->gb, 1); level= SHOW_SBITS(re, &s->gb, 12); SKIP_CACHE(re, &s->gb, 12); if(SHOW_UBITS(re, &s->gb, 1)==0){ - fprintf(stderr, "2. marker bit missing in 3. esc\n"); + av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in 3. esc\n"); return -1; }; LAST_SKIP_CACHE(re, &s->gb, 1); @@ -4075,7 +4750,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, } if(level*s->qscale>1024 || level*s->qscale<-1024){ - fprintf(stderr, "|level| overflow in 3. esc, qp=%d\n", s->qscale); + av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale); return -1; } #if 0 @@ -4084,7 +4759,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, if(abs_level<=MAX_LEVEL && run<=MAX_RUN){ const int run1= run - rl->max_run[last][abs_level] - 1; if(abs_level <= rl->max_level[last][run]){ - fprintf(stderr, "illegal 3. esc, vlc encoding possible\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n"); return -1; } if(s->error_resilience > FF_ER_COMPLIANT){ @@ -4141,7 +4816,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, if (i > 62){ i-= 192; if(i&(~63)){ - fprintf(stderr, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -4194,7 +4869,7 @@ int h263_decode_picture_header(MpegEncContext *s) } if (startcode != 0x20) { - fprintf(stderr, "Bad picture start code\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } /* temporal reference */ @@ -4203,20 +4878,17 @@ int h263_decode_picture_header(MpegEncContext *s) /* PTYPE starts here */ if (get_bits1(&s->gb) != 1) { /* marker */ - fprintf(stderr, "Bad marker\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad marker\n"); return -1; } if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Bad H263 id\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad H263 id\n"); return -1; /* h263 id */ } skip_bits1(&s->gb); /* split screen off */ skip_bits1(&s->gb); /* camera off */ skip_bits1(&s->gb); /* freeze picture release off */ - /* Reset GOB number */ - s->gob_number = 0; - format = get_bits(&s->gb, 3); /* 0 forbidden @@ -4235,22 +4907,20 @@ int h263_decode_picture_header(MpegEncContext *s) s->pict_type = I_TYPE + get_bits1(&s->gb); - s->unrestricted_mv = get_bits1(&s->gb); - s->h263_long_vectors = s->unrestricted_mv; + s->h263_long_vectors = get_bits1(&s->gb); if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "H263 SAC not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "H263 SAC not supported\n"); return -1; /* SAC: off */ } - if (get_bits1(&s->gb) != 0) { - s->mv_type = MV_TYPE_8X8; /* Advanced prediction mode */ - } + s->obmc= get_bits1(&s->gb); /* Advanced prediction mode */ + s->unrestricted_mv = s->h263_long_vectors || s->obmc; if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "H263 PB frame not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "H263 PB frame not supported\n"); return -1; /* not PB frame */ } - s->qscale = get_bits(&s->gb, 5); + s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */ s->width = width; @@ -4269,51 +4939,46 @@ int h263_decode_picture_header(MpegEncContext *s) dprintf("ufep=1, format: %d\n", format); skip_bits(&s->gb,1); /* Custom PCF */ s->umvplus = get_bits(&s->gb, 1); /* Unrestricted Motion Vector */ - s->unrestricted_mv = s->umvplus; - skip_bits1(&s->gb); /* Syntax-based Arithmetic Coding (SAC) */ - if (get_bits1(&s->gb) != 0) { - s->mv_type = MV_TYPE_8X8; /* Advanced prediction mode */ - } - if (get_bits1(&s->gb) != 0) { /* Advanced Intra Coding (AIC) */ - s->h263_aic = 1; - } - - if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Deblocking Filter not supported\n"); - } - if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Slice Structured not supported\n"); - } - if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Reference Picture Selection not supported\n"); - } if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Independent Segment Decoding not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "Syntax-based Arithmetic Coding (SAC) not supported\n"); } + s->obmc= get_bits1(&s->gb); /* Advanced prediction mode */ + s->h263_aic = get_bits1(&s->gb); /* Advanced Intra Coding (AIC) */ + s->loop_filter= get_bits1(&s->gb); + s->unrestricted_mv = s->umvplus || s->obmc || s->loop_filter; + + s->h263_slice_structured= get_bits1(&s->gb); if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Alternative Inter VLC not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "Reference Picture Selection not supported\n"); } if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Modified Quantization not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "Independent Segment Decoding not supported\n"); } + s->alt_inter_vlc= get_bits1(&s->gb); + s->modified_quant= get_bits1(&s->gb); + if(s->modified_quant) + s->chroma_qscale_table= ff_h263_chroma_qscale_table; skip_bits(&s->gb, 1); /* Prevent start code emulation */ skip_bits(&s->gb, 3); /* Reserved */ } else if (ufep != 0) { - fprintf(stderr, "Bad UFEP type (%d)\n", ufep); + av_log(s->avctx, AV_LOG_ERROR, "Bad UFEP type (%d)\n", ufep); return -1; } /* MPPTYPE */ - s->pict_type = get_bits(&s->gb, 3) + I_TYPE; - dprintf("pict_type: %d\n", s->pict_type); - if (s->pict_type != I_TYPE && - s->pict_type != P_TYPE) + s->pict_type = get_bits(&s->gb, 3); + switch(s->pict_type){ + case 0: s->pict_type= I_TYPE;break; + case 1: s->pict_type= P_TYPE;break; + case 3: s->pict_type= B_TYPE;break; + case 7: s->pict_type= I_TYPE;break; //ZYGO + default: return -1; + } skip_bits(&s->gb, 2); s->no_rounding = get_bits1(&s->gb); - dprintf("RTYPE: %d\n", s->no_rounding); skip_bits(&s->gb, 4); /* Get the picture dimensions */ @@ -4354,36 +5019,79 @@ int h263_decode_picture_header(MpegEncContext *s) if(get_bits1(&s->gb)==0) /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ skip_bits1(&s->gb); } + if(s->h263_slice_structured){ + if (get_bits1(&s->gb) != 0) { + av_log(s->avctx, AV_LOG_ERROR, "rectangular slices not supported\n"); + } + if (get_bits1(&s->gb) != 0) { + av_log(s->avctx, AV_LOG_ERROR, "unordered slices not supported\n"); + } + } } s->qscale = get_bits(&s->gb, 5); } + /* PEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } + + if(s->h263_slice_structured){ + if (get_bits1(&s->gb) != 1) { + av_log(s->avctx, AV_LOG_ERROR, "SEPB1 marker missing\n"); + return -1; + } + + ff_h263_decode_mba(s); + + if (get_bits1(&s->gb) != 1) { + av_log(s->avctx, AV_LOG_ERROR, "SEPB2 marker missing\n"); + return -1; + } + } s->f_code = 1; if(s->h263_aic){ s->y_dc_scale_table= - s->c_dc_scale_table= h263_aic_dc_scale_table; + s->c_dc_scale_table= ff_aic_dc_scale_table; }else{ s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; } if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("qp:%d %c size:%d rnd:%d %s %s %s %s\n", + av_log(s->avctx, AV_LOG_DEBUG, "qp:%d %c size:%d rnd:%d%s%s%s%s%s%s%s%s%s\n", s->qscale, av_get_pict_type_char(s->pict_type), s->gb.size_in_bits, 1-s->no_rounding, - s->mv_type == MV_TYPE_8X8 ? "ADV" : "", - s->umvplus ? "UMV" : "", - s->h263_long_vectors ? "LONG" : "", - s->h263_plus ? "+" : "" + s->obmc ? " AP" : "", + s->umvplus ? " UMV" : "", + s->h263_long_vectors ? " LONG" : "", + s->h263_plus ? " +" : "", + s->h263_aic ? " AIC" : "", + s->alt_inter_vlc ? " AIV" : "", + s->modified_quant ? " MQ" : "", + s->loop_filter ? " LOOP" : "", + s->h263_slice_structured ? " SS" : "" ); } +#if 1 + if (s->pict_type == I_TYPE && s->avctx->codec_tag == ff_get_fourcc("ZYGO")){ + int i,j; + for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb)); + av_log(s->avctx, AV_LOG_DEBUG, "\n"); + for(i=0; i<13; i++){ + for(j=0; j<3; j++){ + int v= get_bits(&s->gb, 8); + v |= get_sbits(&s->gb, 8)<<8; + av_log(s->avctx, AV_LOG_DEBUG, " %5d", v); + } + av_log(s->avctx, AV_LOG_DEBUG, "\n"); + } + for(i=0; i<50; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb)); + } +#endif - return 0; } @@ -4636,7 +5344,7 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ if ((s->vol_control_parameters=get_bits1(gb))) { /* vol control parameter */ int chroma_format= get_bits(gb, 2); if(chroma_format!=1){ - printf("illegal chroma format\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n"); } s->low_delay= get_bits1(gb); if(get_bits1(gb)){ /* vbv parameters */ @@ -4659,9 +5367,9 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ } s->shape = get_bits(gb, 2); /* vol shape */ - if(s->shape != RECT_SHAPE) printf("only rectangular vol supported\n"); + if(s->shape != RECT_SHAPE) av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n"); if(s->shape == GRAY_SHAPE && vo_ver_id != 1){ - printf("Gray shape not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n"); skip_bits(gb, 4); //video_object_layer_shape_extension } @@ -4693,15 +5401,16 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ } } - s->progressive_sequence= get_bits1(gb)^1; + s->progressive_sequence= + s->progressive_frame= get_bits1(gb)^1; if(!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO)) - printf("OBMC not supported (very likely buggy encoder)\n"); /* OBMC Disable */ + av_log(s->avctx, AV_LOG_INFO, "MPEG4 OBMC not supported (very likely buggy encoder)\n"); /* OBMC Disable */ if (vo_ver_id == 1) { s->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */ } else { s->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */ } - if(s->vol_sprite_usage==STATIC_SPRITE) printf("Static Sprites not supported\n"); + if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n"); if(s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE){ if(s->vol_sprite_usage==STATIC_SPRITE){ s->sprite_width = get_bits(gb, 13); @@ -4723,8 +5432,8 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ if (get_bits1(gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(gb, 4); /* quant_precision */ - if(get_bits(gb, 4)!=8) printf("N-bit not supported\n"); /* bits_per_pixel */ - if(s->quant_precision!=5) printf("quant precission %d\n", s->quant_precision); + if(get_bits(gb, 4)!=8) av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n"); /* bits_per_pixel */ + if(s->quant_precision!=5) av_log(s->avctx, AV_LOG_ERROR, "quant precission %d\n", s->quant_precision); } else { s->quant_precision = 5; } @@ -4797,7 +5506,7 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ s->quarter_sample= get_bits1(gb); else s->quarter_sample=0; - if(!get_bits1(gb)) printf("Complexity estimation not supported\n"); + if(!get_bits1(gb)) av_log(s->avctx, AV_LOG_ERROR, "Complexity estimation not supported\n"); s->resync_marker= !get_bits1(gb); /* resync_marker_disabled */ @@ -4809,12 +5518,12 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ if(vo_ver_id != 1) { s->new_pred= get_bits1(gb); if(s->new_pred){ - printf("new pred not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n"); skip_bits(gb, 2); /* requested upstream message type */ skip_bits1(gb); /* newpred segment type */ } s->reduced_res_vop= get_bits1(gb); - if(s->reduced_res_vop) printf("reduced resolution VOP not supported\n"); + if(s->reduced_res_vop) av_log(s->avctx, AV_LOG_ERROR, "reduced resolution VOP not supported\n"); } else{ s->new_pred=0; @@ -4849,7 +5558,7 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){ *gb= bak; }else - printf("scalability not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n"); // bin shape stuff FIXME } @@ -4916,7 +5625,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ s->pict_type = get_bits(gb, 2) + I_TYPE; /* pict type: I = 0 , P = 1 */ if(s->pict_type==B_TYPE && s->low_delay && s->vol_control_parameters==0 && !(s->flags & CODEC_FLAG_LOW_DELAY)){ - printf("low_delay flag set, but shouldnt, clearing it\n"); + av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set, but shouldnt, clearing it\n"); s->low_delay=0; } @@ -4924,7 +5633,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ if(s->partitioned_frame) s->decode_mb= mpeg4_decode_partitioned_mb; else - s->decode_mb= ff_h263_decode_mb; + s->decode_mb= ff_mpeg4_decode_mb; if(s->time_increment_resolution==0){ s->time_increment_resolution=1; @@ -4937,13 +5646,13 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ check_marker(gb, "before time_increment"); if(s->time_increment_bits==0){ - printf("hmm, seems the headers arnt complete, trying to guess time_increment_bits\n"); + av_log(s->avctx, AV_LOG_ERROR, "hmm, seems the headers arnt complete, trying to guess time_increment_bits\n"); for(s->time_increment_bits=1 ;s->time_increment_bits<16; s->time_increment_bits++){ if(show_bits(gb, s->time_increment_bits+1)&1) break; } - printf("my guess is %d bits ;)\n",s->time_increment_bits); + av_log(s->avctx, AV_LOG_ERROR, "my guess is %d bits ;)\n",s->time_increment_bits); } if(IS_3IV1) time_increment= get_bits1(gb); //FIXME investigate further @@ -4981,15 +5690,15 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; } - s->current_picture.pts= s->time*1000LL*1000LL / s->time_increment_resolution; + s->current_picture_ptr->pts= s->time*1000LL*1000LL / s->time_increment_resolution; if(s->avctx->debug&FF_DEBUG_PTS) - printf("MPEG4 PTS: %f\n", s->current_picture.pts/(1000.0*1000.0)); + av_log(s->avctx, AV_LOG_DEBUG, "MPEG4 PTS: %f\n", s->current_picture_ptr->pts/(1000.0*1000.0)); check_marker(gb, "before vop_coded"); /* vop coded */ if (get_bits1(gb) != 1){ - printf("vop not coded\n"); + av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n"); return FRAME_SKIPED; } //printf("time %d %d %d || %Ld %Ld %Ld\n", s->time_increment_bits, s->time_increment_resolution, s->time_base, @@ -5046,21 +5755,21 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ if(s->pict_type == S_TYPE && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){ mpeg4_decode_sprite_trajectory(s); - if(s->sprite_brightness_change) printf("sprite_brightness_change not supported\n"); - if(s->vol_sprite_usage==STATIC_SPRITE) printf("static sprite not supported\n"); + if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n"); + if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n"); } if (s->shape != BIN_ONLY_SHAPE) { - s->qscale = get_bits(gb, s->quant_precision); + s->chroma_qscale= s->qscale = get_bits(gb, s->quant_precision); if(s->qscale==0){ - printf("Error, header damaged or not MPEG4 header (qscale=0)\n"); + av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (qscale=0)\n"); return -1; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != I_TYPE) { s->f_code = get_bits(gb, 3); /* fcode_for */ if(s->f_code==0){ - printf("Error, header damaged or not MPEG4 header (f_code=0)\n"); + av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (f_code=0)\n"); return -1; // makes no sense to continue, as the MV decoding will break very quickly } }else @@ -5072,7 +5781,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ s->b_code=1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d\n", + av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d\n", s->qscale, s->f_code, s->b_code, s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")), gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, @@ -5088,7 +5797,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ if(s->enhancement_type){ int load_backward_shape= get_bits1(gb); if(load_backward_shape){ - printf("load backward shape isnt supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "load backward shape isnt supported\n"); } } skip_bits(gb, 2); //ref_select_code @@ -5097,7 +5806,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ /* detect buggy encoders which dont set the low_delay flag (divx4/xvid/opendivx)*/ // note we cannot detect divx5 without b-frames easyly (allthough its buggy too) if(s->vo_type==0 && s->vol_control_parameters==0 && s->divx_version==0 && s->picture_number==0){ - //printf("looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n"); + av_log(s->avctx, AV_LOG_ERROR, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n"); s->low_delay=1; } @@ -5132,7 +5841,7 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) if(get_bits_count(gb) >= gb->size_in_bits){ if(gb->size_in_bits==8 && s->divx_version){ - printf("frame skip %d\n", gb->size_in_bits); + av_log(s->avctx, AV_LOG_ERROR, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPED; //divx bug }else return -1; //end of stream @@ -5142,35 +5851,35 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb) continue; //no startcode if(s->avctx->debug&FF_DEBUG_STARTCODE){ - printf("startcode: %3X ", startcode); - if (startcode<=0x11F) printf("Video Object Start"); - else if(startcode<=0x12F) printf("Video Object Layer Start"); - else if(startcode<=0x13F) printf("Reserved"); - else if(startcode<=0x15F) printf("FGS bp start"); - else if(startcode<=0x1AF) printf("Reserved"); - else if(startcode==0x1B0) printf("Visual Object Seq Start"); - else if(startcode==0x1B1) printf("Visual Object Seq End"); - else if(startcode==0x1B2) printf("User Data"); - else if(startcode==0x1B3) printf("Group of VOP start"); - else if(startcode==0x1B4) printf("Video Session Error"); - else if(startcode==0x1B5) printf("Visual Object Start"); - else if(startcode==0x1B6) printf("Video Object Plane start"); - else if(startcode==0x1B7) printf("slice start"); - else if(startcode==0x1B8) printf("extension start"); - else if(startcode==0x1B9) printf("fgs start"); - else if(startcode==0x1BA) printf("FBA Object start"); - else if(startcode==0x1BB) printf("FBA Object Plane start"); - else if(startcode==0x1BC) printf("Mesh Object start"); - else if(startcode==0x1BD) printf("Mesh Object Plane start"); - else if(startcode==0x1BE) printf("Still Textutre Object start"); - else if(startcode==0x1BF) printf("Textutre Spatial Layer start"); - else if(startcode==0x1C0) printf("Textutre SNR Layer start"); - else if(startcode==0x1C1) printf("Textutre Tile start"); - else if(startcode==0x1C2) printf("Textutre Shape Layer start"); - else if(startcode==0x1C3) printf("stuffing start"); - else if(startcode<=0x1C5) printf("reserved"); - else if(startcode<=0x1FF) printf("System start"); - printf(" at %d\n", get_bits_count(gb)); + av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); + if (startcode<=0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); + else if(startcode<=0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); + else if(startcode<=0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); + else if(startcode<=0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); + else if(startcode<=0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); + else if(startcode==0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); + else if(startcode==0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); + else if(startcode==0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); + else if(startcode==0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); + else if(startcode==0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); + else if(startcode==0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); + else if(startcode==0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); + else if(startcode==0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); + else if(startcode==0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); + else if(startcode==0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); + else if(startcode==0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); + else if(startcode==0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); + else if(startcode==0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); + else if(startcode==0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); + else if(startcode==0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Textutre Object start"); + else if(startcode==0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Textutre Spatial Layer start"); + else if(startcode==0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Textutre SNR Layer start"); + else if(startcode==0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Textutre Tile start"); + else if(startcode==0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Textutre Shape Layer start"); + else if(startcode==0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); + else if(startcode<=0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); + else if(startcode<=0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); + av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); } if(startcode >= 0x120 && startcode <= 0x12F){ @@ -5199,17 +5908,17 @@ int intel_h263_decode_picture_header(MpegEncContext *s) /* picture header */ if (get_bits_long(&s->gb, 22) != 0x20) { - fprintf(stderr, "Bad picture start code\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */ if (get_bits1(&s->gb) != 1) { - fprintf(stderr, "Bad marker\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad marker\n"); return -1; /* marker */ } if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Bad H263 id\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad H263 id\n"); return -1; /* h263 id */ } skip_bits1(&s->gb); /* split screen off */ @@ -5218,7 +5927,7 @@ int intel_h263_decode_picture_header(MpegEncContext *s) format = get_bits(&s->gb, 3); if (format != 7) { - fprintf(stderr, "Intel H263 free format not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "Intel H263 free format not supported\n"); return -1; } s->h263_plus = 0; @@ -5229,22 +5938,23 @@ int intel_h263_decode_picture_header(MpegEncContext *s) s->h263_long_vectors = s->unrestricted_mv; if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "SAC not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "SAC not supported\n"); return -1; /* SAC: off */ } if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "Advanced Prediction Mode not supported\n"); - return -1; /* advanced prediction mode: off */ + s->obmc= 1; + av_log(s->avctx, AV_LOG_ERROR, "Advanced Prediction Mode not supported\n"); +// return -1; /* advanced prediction mode: off */ } if (get_bits1(&s->gb) != 0) { - fprintf(stderr, "PB frame mode no supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "PB frame mode no supported\n"); return -1; /* PB frame mode */ } /* skip unknown header garbage */ skip_bits(&s->gb, 41); - s->qscale = get_bits(&s->gb, 5); + s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); skip_bits1(&s->gb); /* Continuous Presence Multipoint mode: off */ /* PEI */ @@ -5265,12 +5975,12 @@ int flv_h263_decode_picture_header(MpegEncContext *s) /* picture header */ if (get_bits_long(&s->gb, 17) != 1) { - fprintf(stderr, "Bad picture start code\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } format = get_bits(&s->gb, 5); if (format != 0 && format != 1) { - fprintf(stderr, "Bad picture format\n"); + av_log(s->avctx, AV_LOG_ERROR, "Bad picture format\n"); return -1; } s->h263_flv = format+1; @@ -5318,7 +6028,7 @@ int flv_h263_decode_picture_header(MpegEncContext *s) if (s->pict_type > P_TYPE) s->pict_type = P_TYPE; skip_bits1(&s->gb); /* deblocking flag */ - s->qscale = get_bits(&s->gb, 5); + s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); s->h263_plus = 0; @@ -5332,7 +6042,7 @@ int flv_h263_decode_picture_header(MpegEncContext *s) s->f_code = 1; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ - printf("%c esc_type:%d, qp:%d num:%d\n", + av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n", av_get_pict_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number); } diff --git a/src/libffmpeg/libavcodec/h263data.h b/src/libffmpeg/libavcodec/h263data.h index 809b10da2..4da105ffc 100644 --- a/src/libffmpeg/libavcodec/h263data.h +++ b/src/libffmpeg/libavcodec/h263data.h @@ -29,6 +29,49 @@ const uint8_t inter_MCBPC_bits[28] = { 11, 13, 13, 13,/* inter4Q*/ }; +static const uint8_t h263_mbtype_b_tab[15][2] = { + {1, 1}, + {3, 3}, + {1, 5}, + {4, 4}, + {5, 4}, + {6, 6}, + {2, 4}, + {3, 4}, + {7, 6}, + {4, 6}, + {5, 6}, + {1, 6}, + {1,10}, + {1, 7}, + {1, 8}, +}; + +static const int h263_mb_type_b_map[15]= { + MB_TYPE_DIRECT2 | MB_TYPE_L0L1, + MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP, + MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT, + MB_TYPE_L0 | MB_TYPE_16x16, + MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_16x16, + MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16, + MB_TYPE_L1 | MB_TYPE_16x16, + MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_16x16, + MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16, + MB_TYPE_L0L1 | MB_TYPE_16x16, + MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_16x16, + MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16, + 0, //stuffing + MB_TYPE_INTRA4x4 | MB_TYPE_CBP, + MB_TYPE_INTRA4x4 | MB_TYPE_CBP | MB_TYPE_QUANT, +}; + +const uint8_t cbpc_b_tab[4][2] = { +{0, 1}, +{2, 2}, +{7, 3}, +{6, 3}, +}; + const uint8_t cbpy_tab[16][2] = { {3,4}, {5,5}, {4,5}, {9,4}, {3,5}, {7,4}, {2,6}, {11,4}, @@ -183,6 +226,22 @@ static RLTable rl_intra_aic = { intra_level_aic, }; +static const uint8_t wrong_run[102] = { + 1, 2, 3, 5, 4, 10, 9, 8, +11, 15, 17, 16, 23, 22, 21, 20, +19, 18, 25, 24, 27, 26, 11, 7, + 6, 1, 2, 13, 2, 2, 2, 2, + 6, 12, 3, 9, 1, 3, 4, 3, + 7, 4, 1, 1, 5, 5, 14, 6, + 1, 7, 1, 8, 1, 1, 1, 1, +10, 1, 1, 5, 9, 17, 25, 24, +29, 33, 32, 41, 2, 23, 28, 31, + 3, 22, 30, 4, 27, 40, 8, 26, + 6, 39, 7, 38, 16, 37, 15, 10, +11, 12, 13, 14, 1, 21, 20, 18, +19, 2, 1, 34, 35, 36 +}; + static const uint16_t h263_format[8][2] = { { 0, 0 }, { 128, 96 }, @@ -192,8 +251,35 @@ static const uint16_t h263_format[8][2] = { { 1408, 1152 }, }; -static uint8_t h263_aic_dc_scale_table[32]={ +uint8_t ff_aic_dc_scale_table[32]={ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 0, 2, 4, 6, 8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62 }; +static const uint8_t modified_quant_tab[2][32]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 +{ + 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,10,11,12,13,14,15,16,17,18,18,19,20,21,22,23,24,25,26,27,28 +},{ + 0, 2, 3, 4, 5, 6, 7, 8, 9,10,11,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,29,30,31,31,31,26 +} +}; + +const uint8_t ff_h263_chroma_qscale_table[32]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9,10,10,11,11,12,12,12,13,13,13,14,14,14,14,14,15,15,15,15,15 +}; + +const uint16_t ff_mba_max[6]={ + 47, 98, 395,1583,6335,9215 +}; + +const uint8_t ff_mba_length[6]={ + 6, 7, 9, 11, 13, 14 +}; + +const uint8_t ff_h263_loop_filter_strength[32]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9,10,10,10,11,11,11,12,12,12 +}; + diff --git a/src/libffmpeg/libavcodec/h263dec.c b/src/libffmpeg/libavcodec/h263dec.c index f83ff5442..88db359fe 100644 --- a/src/libffmpeg/libavcodec/h263dec.c +++ b/src/libffmpeg/libavcodec/h263dec.c @@ -1,6 +1,7 @@ /* * H.263 decoder * Copyright (c) 2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -28,17 +29,6 @@ //#define DEBUG //#define PRINT_FRAME_TIME -#ifdef PRINT_FRAME_TIME -static inline long long rdtsc() -{ - long long l; - asm volatile( "rdtsc\n\t" - : "=A" (l) - ); -// printf("%d\n", int(l/1000)); - return l; -} -#endif int ff_h263_decode_init(AVCodecContext *avctx) { @@ -62,9 +52,10 @@ int ff_h263_decode_init(AVCodecContext *avctx) /* select sub codec */ switch(avctx->codec->id) { case CODEC_ID_H263: - s->gob_number = 0; + s->unrestricted_mv= 0; break; case CODEC_ID_MPEG4: + s->decode_mb= ff_mpeg4_decode_mb; s->time_increment_bits = 4; /* default value for broken headers */ s->h263_pred = 1; s->low_delay = 0; //default, might be overriden in the vol header during header parsing @@ -95,7 +86,6 @@ int ff_h263_decode_init(AVCodecContext *avctx) s->msmpeg4_version=5; break; case CODEC_ID_H263I: - s->h263_intel = 1; break; case CODEC_ID_FLV1: s->h263_flv = 1; @@ -155,8 +145,7 @@ static int decode_slice(MpegEncContext *s){ s->resync_mb_x= s->mb_x; s->resync_mb_y= s->mb_y; - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; - s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + ff_set_qscale(s, s->qscale); if(s->partitioned_frame){ const int qscale= s->qscale; @@ -170,9 +159,7 @@ static int decode_slice(MpegEncContext *s){ s->first_slice_line=1; s->mb_x= s->resync_mb_x; s->mb_y= s->resync_mb_y; - s->qscale= qscale; - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; - s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + ff_set_qscale(s, qscale); } for(; s->mb_y < s->mb_height; s->mb_y++) { @@ -217,6 +204,8 @@ static int decode_slice(MpegEncContext *s){ const int xy= s->mb_x + s->mb_y*s->mb_stride; if(ret==SLICE_END){ MPV_decode_mb(s, s->block); + if(s->loop_filter) + ff_h263_loop_filter(s); //printf("%d %d %d %06X\n", s->mb_x, s->mb_y, s->gb.size*8 - get_bits_count(&s->gb), show_bits(&s->gb, 24)); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); @@ -230,17 +219,19 @@ static int decode_slice(MpegEncContext *s){ } return 0; }else if(ret==SLICE_NOEND){ - fprintf(stderr,"Slice mismatch at MB: %d\n", xy); + av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, (AC_END|DC_END|MV_END)&part_mask); return -1; } - fprintf(stderr,"Error at MB: %d\n", xy); + av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; } MPV_decode_mb(s, s->block); + if(s->loop_filter) + ff_h263_loop_filter(s); } ff_draw_horiz_band(s, s->mb_y*16, 16); @@ -292,17 +283,17 @@ static int decode_slice(MpegEncContext *s){ max_extra+= 256*256*256*64; if(left>max_extra){ - fprintf(stderr, "discarding %d junk bits at end, next would be %X\n", left, show_bits(&s->gb, 24)); + av_log(s->avctx, AV_LOG_ERROR, "discarding %d junk bits at end, next would be %X\n", left, show_bits(&s->gb, 24)); } else if(left<0){ - fprintf(stderr, "overreading %d bits\n", -left); + av_log(s->avctx, AV_LOG_ERROR, "overreading %d bits\n", -left); }else ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); return 0; } - fprintf(stderr, "slice end not reached but screenspace end (%d left %06X, score= %d)\n", + av_log(s->avctx, AV_LOG_ERROR, "slice end not reached but screenspace end (%d left %06X, score= %d)\n", s->gb.size_in_bits - get_bits_count(&s->gb), show_bits(&s->gb, 24), s->padding_bug_score); @@ -402,6 +393,7 @@ uint64_t time= rdtsc(); printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); #endif s->flags= avctx->flags; + s->flags2= avctx->flags2; *data_size = 0; @@ -426,7 +418,7 @@ uint64_t time= rdtsc(); }else if(s->codec_id==CODEC_ID_H263){ next= h263_find_frame_end(s, buf, buf_size); }else{ - fprintf(stderr, "this codec doesnt support truncated bitstreams\n"); + av_log(s->avctx, AV_LOG_ERROR, "this codec doesnt support truncated bitstreams\n"); return -1; } @@ -434,9 +426,10 @@ uint64_t time= rdtsc(); return buf_size; } + retry: - if(s->bitstream_buffer_size && buf_size<20){ //divx 5.01+ frame reorder + if(s->bitstream_buffer_size && (s->divx_packed || buf_size<20)){ //divx 5.01+/xvid frame reorder init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size*8); }else init_get_bits(&s->gb, buf, buf_size*8); @@ -446,6 +439,12 @@ retry: if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } + + //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there + if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ + int i= ff_find_unused_picture(s, 0); + s->current_picture_ptr= &s->picture[i]; + } /* let's go :-) */ if (s->msmpeg4_version==5) { @@ -463,7 +462,7 @@ retry: if(s->flags& CODEC_FLAG_LOW_DELAY) s->low_delay=1; - } else if (s->h263_intel) { + } else if (s->codec_id == CODEC_ID_H263I) { ret = intel_h263_decode_picture_header(s); } else if (s->h263_flv) { ret = flv_h263_decode_picture_header(s); @@ -475,7 +474,7 @@ retry: /* skip if the header was thrashed */ if (ret < 0){ - fprintf(stderr, "header damaged\n"); + av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } @@ -585,7 +584,7 @@ retry: } if(avctx->debug & FF_DEBUG_BUGS) - printf("bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", + av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", s->workaround_bugs, s->lavc_build, s->xvid_build, s->divx_version, s->divx_build, s->divx_packed ? "p" : ""); @@ -681,21 +680,26 @@ retry: /* divx 5.01+ bistream reorder stuff */ if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_packed){ int current_pos= get_bits_count(&s->gb)>>3; + int startcode_found=0; if( buf_size - current_pos > 5 && buf_size - current_pos < BITSTREAM_BUFFER_SIZE){ int i; - int startcode_found=0; for(i=current_pos; i<buf_size-3; i++){ if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){ startcode_found=1; break; } } - if(startcode_found){ - memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); - s->bitstream_buffer_size= buf_size - current_pos; - } + } + if(s->gb.buffer == s->bitstream_buffer && buf_size>20){ //xvid style + startcode_found=1; + current_pos=0; + } + + if(startcode_found){ + memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); + s->bitstream_buffer_size= buf_size - current_pos; } } @@ -707,10 +711,10 @@ assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); if(s->pict_type==B_TYPE || s->low_delay){ *pict= *(AVFrame*)&s->current_picture; - ff_print_debug_info(s, s->current_picture_ptr); + ff_print_debug_info(s, pict); } else { *pict= *(AVFrame*)&s->last_picture; - ff_print_debug_info(s, s->last_picture_ptr); + ff_print_debug_info(s, pict); } /* Return the Picture timestamp as the frame number */ diff --git a/src/libffmpeg/libavcodec/h264.c b/src/libffmpeg/libavcodec/h264.c index 96f0925d7..43ed13e99 100644 --- a/src/libffmpeg/libavcodec/h264.c +++ b/src/libffmpeg/libavcodec/h264.c @@ -643,7 +643,7 @@ static inline int check_intra4x4_pred_mode(H264Context *h){ for(i=0; i<4; i++){ int status= top[ h->intra4x4_pred_mode_cache[scan8[0] + i] ]; if(status<0){ - fprintf(stderr, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y); return -1; } else if(status){ h->intra4x4_pred_mode_cache[scan8[0] + i]= status; @@ -655,7 +655,7 @@ static inline int check_intra4x4_pred_mode(H264Context *h){ for(i=0; i<4; i++){ int status= left[ h->intra4x4_pred_mode_cache[scan8[0] + 8*i] ]; if(status<0){ - fprintf(stderr, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, s->mb_x, s->mb_y); return -1; } else if(status){ h->intra4x4_pred_mode_cache[scan8[0] + 8*i]= status; @@ -677,7 +677,7 @@ static inline int check_intra_pred_mode(H264Context *h, int mode){ if(!(h->top_samples_available&0x8000)){ mode= top[ mode ]; if(mode<0){ - fprintf(stderr, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y); return -1; } } @@ -685,7 +685,7 @@ static inline int check_intra_pred_mode(H264Context *h, int mode){ if(!(h->left_samples_available&0x8000)){ mode= left[ mode ]; if(mode<0){ - fprintf(stderr, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y); return -1; } } @@ -1001,8 +1001,6 @@ static uint8_t *decode_nal(H264Context *h, uint8_t *src, int *dst_length, int *c return dst; } -#ifdef CONFIG_ENCODERS - /** * @param src the data which should be escaped * @param dst the target buffer, dst+1 == src is allowed as a special case @@ -1076,8 +1074,6 @@ static void encode_rbsp_trailing(PutBitContext *pb){ if(length) put_bits(pb, length, 0); } -#endif - /** * identifies the exact end of the bitstream * @return the length of the trailing, or 0 if damaged @@ -1136,7 +1132,6 @@ static void h264_luma_dc_dequant_idct_c(DCTELEM *block, int qp){ } } -#if 0 /** * dct tranforms the 16 dc values. * @param qp quantization parameter ??? FIXME @@ -1174,8 +1169,6 @@ static void h264_luma_dc_dct_c(DCTELEM *block/*, int qp*/){ block[stride*10+offset]= (z0 - z3)>>1; } } -#endif - #undef xStride #undef stride @@ -1201,7 +1194,6 @@ static void chroma_dc_dequant_idct_c(DCTELEM *block, int qp){ block[stride*1 + xStride*1]= ((e-b)*qmul + 0)>>1; } -#if 0 static void chroma_dc_dct_c(DCTELEM *block){ const int stride= 16*2; const int xStride= 16; @@ -1222,7 +1214,6 @@ static void chroma_dc_dct_c(DCTELEM *block){ block[stride*1 + xStride*0]= (a-c); block[stride*1 + xStride*1]= (e-b); } -#endif /** * gets the chroma qp. @@ -1292,7 +1283,6 @@ static void h264_add_idct_c(uint8_t *dst, DCTELEM *block, int stride){ #endif } -#if 0 static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int stride){ int i; //FIXME try int temp instead of block @@ -1325,7 +1315,6 @@ static void h264_diff_dct_c(DCTELEM *block, uint8_t *src1, uint8_t *src2, int st block[3*4 + i]= z3 - 2*z2; } } -#endif //FIXME need to check that this doesnt overflow signed 32 bit for low qp, iam not sure, its very close //FIXME check that gcc inlines this (and optimizes intra & seperate_dc stuff away) @@ -2169,6 +2158,7 @@ static void common_init(H264Context *h){ init_pred_ptrs(h); + s->unrestricted_mv=1; s->decode=1; //FIXME } @@ -2343,11 +2333,9 @@ static void hl_decode_mb(H264Context *h){ } } -#if 0 static void decode_mb_cabac(H264Context *h){ // MpegEncContext * const s = &h->s; } -#endif /** * fills the default_ref_list. @@ -2460,7 +2448,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){ if(index >= h->ref_count[list]){ - fprintf(stderr, "reference count overflow\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n"); return -1; } @@ -2469,7 +2457,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){ const int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1; if(abs_diff_pic_num >= h->max_pic_num){ - fprintf(stderr, "abs_diff_pic_num overflow\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); return -1; } @@ -2491,7 +2479,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){ } if(i < index){ - fprintf(stderr, "reference picture missing during reorder\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME }else if(i > index){ Picture tmp= h->ref_list[list][i]; @@ -2503,7 +2491,7 @@ static int decode_ref_pic_list_reordering(H264Context *h){ }else if(reordering_of_pic_nums_idc==3) break; else{ - fprintf(stderr, "illegal reordering_of_pic_nums_idc\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n"); return -1; } } @@ -2573,12 +2561,12 @@ static Picture * remove_short(H264Context *h, int frame_num){ int i; if(s->avctx->debug&FF_DEBUG_MMCO) - printf("remove short %d count %d\n", frame_num, h->short_ref_count); + av_log(h->s.avctx, AV_LOG_DEBUG, "remove short %d count %d\n", frame_num, h->short_ref_count); for(i=0; i<h->short_ref_count; i++){ Picture *pic= h->short_ref[i]; if(s->avctx->debug&FF_DEBUG_MMCO) - printf("%d %d %p\n", i, pic->frame_num, pic); + av_log(h->s.avctx, AV_LOG_DEBUG, "%d %d %p\n", i, pic->frame_num, pic); if(pic->frame_num == frame_num){ h->short_ref[i]= NULL; memmove(&h->short_ref[i], &h->short_ref[i+1], (h->short_ref_count - i - 1)*sizeof(Picture*)); @@ -2617,11 +2605,11 @@ static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ Picture *pic; if((s->avctx->debug&FF_DEBUG_MMCO) && mmco_count==0) - printf("no mmco here\n"); + av_log(h->s.avctx, AV_LOG_DEBUG, "no mmco here\n"); for(i=0; i<mmco_count; i++){ if(s->avctx->debug&FF_DEBUG_MMCO) - printf("mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_frame_num, h->mmco[i].long_index); + av_log(h->s.avctx, AV_LOG_DEBUG, "mmco:%d %d %d\n", h->mmco[i].opcode, h->mmco[i].short_frame_num, h->mmco[i].long_index); switch(mmco[i].opcode){ case MMCO_SHORT2UNUSED: @@ -2679,7 +2667,7 @@ static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ pic= remove_short(h, s->current_picture_ptr->frame_num); if(pic){ pic->reference=0; - fprintf(stderr, "illegal short term buffer state detected\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term buffer state detected\n"); } if(h->short_ref_count) @@ -2722,13 +2710,13 @@ static int decode_ref_pic_marking(H264Context *h){ if(opcode==MMCO_SHORT2LONG || opcode==MMCO_LONG2UNUSED || opcode==MMCO_LONG || opcode==MMCO_SET_MAX_LONG){ h->mmco[i].long_index= get_ue_golomb(&s->gb); if(/*h->mmco[i].long_index >= h->long_ref_count || h->long_ref[ h->mmco[i].long_index ] == NULL*/ h->mmco[i].long_index >= 16){ - fprintf(stderr, "illegal long ref in memory management control operation %d\n", opcode); + av_log(h->s.avctx, AV_LOG_ERROR, "illegal long ref in memory management control operation %d\n", opcode); return -1; } } if(opcode > MMCO_LONG){ - fprintf(stderr, "illegal memory management control operation %d\n", opcode); + av_log(h->s.avctx, AV_LOG_ERROR, "illegal memory management control operation %d\n", opcode); return -1; } } @@ -2848,7 +2836,7 @@ static int decode_slice_header(H264Context *h){ h->slice_type= get_ue_golomb(&s->gb); if(h->slice_type > 9){ - fprintf(stderr, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, s->mb_x, s->mb_y); } if(h->slice_type > 4){ h->slice_type -= 5; @@ -2862,18 +2850,18 @@ static int decode_slice_header(H264Context *h){ pps_id= get_ue_golomb(&s->gb); if(pps_id>255){ - fprintf(stderr, "pps_id out of range\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n"); return -1; } h->pps= h->pps_buffer[pps_id]; if(h->pps.slice_group_count == 0){ - fprintf(stderr, "non existing PPS referenced\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "non existing PPS referenced\n"); return -1; } h->sps= h->sps_buffer[ h->pps.sps_id ]; if(h->sps.log2_max_frame_num == 0){ - fprintf(stderr, "non existing SPS referenced\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "non existing SPS referenced\n"); return -1; } @@ -2932,11 +2920,9 @@ static int decode_slice_header(H264Context *h){ h->max_pic_num= 1<<(h->sps.log2_max_frame_num + 1); } -#if 0 if(h->nal_unit_type == NAL_IDR_SLICE){ get_ue_golomb(&s->gb); /* idr_pic_id */ } -#endif if(h->sps.poc_type==0){ h->poc_lsb= get_bits(&s->gb, h->sps.log2_max_poc_lsb); @@ -2975,7 +2961,7 @@ static int decode_slice_header(H264Context *h){ h->ref_count[1]= get_ue_golomb(&s->gb) + 1; if(h->ref_count[0] > 32 || h->ref_count[1] > 32){ - fprintf(stderr, "reference overflow\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n"); return -1; } } @@ -2997,14 +2983,12 @@ static int decode_slice_header(H264Context *h){ s->qscale = h->pps.init_qp + get_se_golomb(&s->gb); //slice_qp_delta //FIXME qscale / qp ... stuff -#if 0 if(h->slice_type == SP_TYPE){ get_bits1(&s->gb); /* sp_for_switch_flag */ } if(h->slice_type==SP_TYPE || h->slice_type == SI_TYPE){ get_se_golomb(&s->gb); /* slice_qs_delta */ } -#endif if( h->pps.deblocking_filter_parameters_present ) { h->disable_deblocking_filter_idc= get_ue_golomb(&s->gb); @@ -3021,7 +3005,7 @@ static int decode_slice_header(H264Context *h){ #endif if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("mb:%d %c pps:%d frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d\n", + av_log(h->s.avctx, AV_LOG_DEBUG, "mb:%d %c pps:%d frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d\n", first_mb_in_slice, av_get_pict_type_char(h->slice_type), pps_id, h->frame_num, @@ -3123,7 +3107,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, in level_code= (prefix<<suffix_length) + get_bits(gb, 12); //part if(suffix_length==0) level_code+=15; //FIXME doesnt make (much)sense }else{ - fprintf(stderr, "prefix too large at %d %d\n", s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "prefix too large at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -3164,7 +3148,7 @@ static int decode_residual(H264Context *h, GetBitContext *gb, DCTELEM *block, in } if(zeros_left<0){ - fprintf(stderr, "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "negative number of zero coeffs at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -3274,7 +3258,7 @@ static int decode_mb(H264Context *h){ assert(h->slice_type == I_TYPE); decode_intra_mb: if(mb_type > 25){ - fprintf(stderr, "mb_type %d in %c slice to large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice to large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y); return -1; } partition_count=0; @@ -3370,7 +3354,7 @@ decode_intra_mb: for(i=0; i<4; i++){ h->sub_mb_type[i]= get_ue_golomb(&s->gb); if(h->sub_mb_type[i] >=13){ - fprintf(stderr, "B sub_mb_type %d out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %d out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); return -1; } sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; @@ -3381,7 +3365,7 @@ decode_intra_mb: for(i=0; i<4; i++){ h->sub_mb_type[i]= get_ue_golomb(&s->gb); if(h->sub_mb_type[i] >=4){ - fprintf(stderr, "P sub_mb_type %d out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %d out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); return -1; } sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; @@ -3525,7 +3509,7 @@ decode_intra_mb: if(!IS_INTRA16x16(mb_type)){ cbp= get_ue_golomb(&s->gb); if(cbp > 47){ - fprintf(stderr, "cbp too large (%d) at %d %d\n", cbp, s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%d) at %d %d\n", cbp, s->mb_x, s->mb_y); return -1; } @@ -3554,7 +3538,7 @@ decode_intra_mb: dquant= get_se_golomb(&s->gb); if( dquant > 25 || dquant < -26 ){ - fprintf(stderr, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y); return -1; } @@ -3582,7 +3566,7 @@ decode_intra_mb: } } }else{ - memset(&h->non_zero_count_cache[8], 0, 8*4); //FIXME stupid & slow + fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1); } }else{ for(i8x8=0; i8x8<4; i8x8++){ @@ -3651,7 +3635,7 @@ static int decode_slice(H264Context *h){ } if(ret<0){ - fprintf(stderr, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask); return -1; @@ -3750,7 +3734,7 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){ }else if(aspect_ratio_idc < 16){ sps->sar= pixel_aspect[aspect_ratio_idc]; }else{ - fprintf(stderr, "illegal aspect ratio\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); return -1; } }else{ @@ -3842,7 +3826,7 @@ static inline int decode_seq_parameter_set(H264Context *h){ sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb); } if(sps->poc_type > 2){ - fprintf(stderr, "illegal POC type %d\n", sps->poc_type); + av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); return -1; } @@ -3865,7 +3849,7 @@ static inline int decode_seq_parameter_set(H264Context *h){ sps->crop_top = get_ue_golomb(&s->gb); sps->crop_bottom= get_ue_golomb(&s->gb); if(sps->crop_left || sps->crop_top){ - fprintf(stderr, "insane croping not completly supported, this could look slightly wrong ...\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completly supported, this could look slightly wrong ...\n"); } }else{ sps->crop_left = @@ -3879,7 +3863,7 @@ static inline int decode_seq_parameter_set(H264Context *h){ decode_vui_parameters(h, sps); if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("sps:%d profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s\n", + av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%d profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s\n", sps_id, sps->profile_idc, sps->level_idc, sps->poc_type, sps->ref_frame_count, @@ -3905,7 +3889,7 @@ static inline int decode_picture_parameter_set(H264Context *h){ pps->slice_group_count= get_ue_golomb(&s->gb) + 1; if(pps->slice_group_count > 1 ){ pps->mb_slice_group_map_type= get_ue_golomb(&s->gb); -fprintf(stderr, "FMO not supported\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "FMO not supported\n"); switch(pps->mb_slice_group_map_type){ case 0: #if 0 @@ -3943,7 +3927,7 @@ fprintf(stderr, "FMO not supported\n"); pps->ref_count[0]= get_ue_golomb(&s->gb) + 1; pps->ref_count[1]= get_ue_golomb(&s->gb) + 1; if(pps->ref_count[0] > 32 || pps->ref_count[1] > 32){ - fprintf(stderr, "reference overflow (pps)\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); return -1; } @@ -3957,7 +3941,7 @@ fprintf(stderr, "FMO not supported\n"); pps->redundant_pic_cnt_present = get_bits1(&s->gb); if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("pps:%d sps:%d %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d %s %s %s\n", + av_log(h->s.avctx, AV_LOG_DEBUG, "pps:%d sps:%d %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d %s %s %s\n", pps_id, pps->sps_id, pps->cabac ? "CABAC" : "CAVLC", pps->slice_group_count, @@ -4033,7 +4017,7 @@ static int decode_nal_units(H264Context *h, uint8_t *buf, int buf_size){ bit_length= 8*dst_length - decode_rbsp_trailing(ptr + dst_length - 1); if(s->avctx->debug&FF_DEBUG_STARTCODE){ - printf("NAL %d at %d length %d\n", h->nal_unit_type, buf_index, dst_length); + av_log(h->s.avctx, AV_LOG_DEBUG, "NAL %d at %d length %d\n", h->nal_unit_type, buf_index, dst_length); } buf_index += consumed; @@ -4147,6 +4131,7 @@ static int decode_frame(AVCodecContext *avctx, int buf_index; s->flags= avctx->flags; + s->flags2= avctx->flags2; *data_size = 0; @@ -4183,12 +4168,12 @@ static int decode_frame(AVCodecContext *avctx, } #endif if(!s->current_picture_ptr){ - fprintf(stderr, "error, NO frame\n"); + av_log(h->s.avctx, AV_LOG_DEBUG, "error, NO frame\n"); return -1; } *pict= *(AVFrame*)&s->current_picture; //FIXME - ff_print_debug_info(s, s->current_picture_ptr); + ff_print_debug_info(s, pict); assert(pict->data[0]); //printf("out %d\n", (int)pict->data[0]); #if 0 //? diff --git a/src/libffmpeg/libavcodec/huffyuv.c b/src/libffmpeg/libavcodec/huffyuv.c index 7db748936..d180d6aee 100644 --- a/src/libffmpeg/libavcodec/huffyuv.c +++ b/src/libffmpeg/libavcodec/huffyuv.c @@ -30,10 +30,6 @@ #include "avcodec.h" #include "dsputil.h" -#ifndef INT64_MAX -#define INT64_MAX 9223372036854775807LL -#endif - #define VLC_BITS 11 typedef enum Predictor{ @@ -219,7 +215,7 @@ static int generate_bits_table(uint32_t *dst, uint8_t *len_table){ dst[index]= bits++; } if(bits & 1){ - fprintf(stderr, "Error generating huffman table\n"); + av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n"); return -1; } bits >>= 1; @@ -227,7 +223,6 @@ static int generate_bits_table(uint32_t *dst, uint8_t *len_table){ return 0; } -#ifdef CONFIG_ENCODERS static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){ uint64_t counts[2*size]; int up[2*size]; @@ -283,7 +278,6 @@ static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){ if(i==size) break; } } -#endif static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){ GetBitContext gb; @@ -435,8 +429,6 @@ s->bgr32=1; return 0; } -#ifdef CONFIG_ENCODERS - static void store_table(HYuvContext *s, uint8_t *len){ int i; int index= s->avctx->extradata_size; @@ -484,7 +476,7 @@ static int encode_init(AVCodecContext *avctx) switch(avctx->pix_fmt){ case PIX_FMT_YUV420P: if(avctx->strict_std_compliance>=0){ - fprintf(stderr, "YV12-huffyuv is experimental, there WILL be no compatbility! (use (v)strict=-1)\n"); + av_log(avctx, AV_LOG_ERROR, "YV12-huffyuv is experimental, there WILL be no compatbility! (use (v)strict=-1)\n"); return -1; } s->bitstream_bpp= 12; @@ -493,7 +485,7 @@ static int encode_init(AVCodecContext *avctx) s->bitstream_bpp= 16; break; default: - fprintf(stderr, "format not supported\n"); + av_log(avctx, AV_LOG_ERROR, "format not supported\n"); return -1; } avctx->bits_per_sample= s->bitstream_bpp; @@ -557,8 +549,6 @@ static int encode_init(AVCodecContext *avctx) return 0; } -#endif - static void decode_422_bitstream(HYuvContext *s, int count){ int i; @@ -583,8 +573,6 @@ static void decode_gray_bitstream(HYuvContext *s, int count){ } } -#ifdef CONFIG_ENCODERS - static void encode_422_bitstream(HYuvContext *s, int count){ int i; @@ -623,8 +611,6 @@ static void encode_gray_bitstream(HYuvContext *s, int count){ } } -#endif - static void decode_bgr_bitstream(HYuvContext *s, int count){ int i; @@ -713,7 +699,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } @@ -734,7 +720,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 p->data[0][1]= get_bits(&s->gb, 8); p->data[0][0]= get_bits(&s->gb, 8); - fprintf(stderr, "YUY2 output isnt implemenetd yet\n"); + av_log(avctx, AV_LOG_ERROR, "YUY2 output isnt implemenetd yet\n"); return -1; }else{ @@ -906,11 +892,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8 draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order break; default: - fprintf(stderr, "prediction type not supported!\n"); + av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n"); } }else{ - fprintf(stderr, "BGR24 output isnt implemenetd yet\n"); + av_log(avctx, AV_LOG_ERROR, "BGR24 output isnt implemenetd yet\n"); return -1; } } @@ -936,8 +922,6 @@ static int decode_end(AVCodecContext *avctx) return 0; } -#ifdef CONFIG_ENCODERS - static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ HYuvContext *s = avctx->priv_data; AVFrame *pict = data; @@ -1061,7 +1045,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, } } }else{ - fprintf(stderr, "Format not supported!\n"); + av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); } emms_c(); @@ -1099,8 +1083,6 @@ static int encode_end(AVCodecContext *avctx) return 0; } -#endif - static const AVOption huffyuv_options[] = { AVOPTION_CODEC_INT("prediction_method", "prediction_method", prediction_method, 0, 2, 0), diff --git a/src/libffmpeg/libavcodec/i386/cputest.c b/src/libffmpeg/libavcodec/i386/cputest.c index b885548ee..b50d653c4 100644 --- a/src/libffmpeg/libavcodec/i386/cputest.c +++ b/src/libffmpeg/libavcodec/i386/cputest.c @@ -1,13 +1,122 @@ -/* dummy file to use xine mm_support function */ +/* Cpu detection code, extracted from mmx.h ((c)1997-99 by H. Dietz + and R. Fisher). Converted to C and improved by Fabrice Bellard */ -#include "xineutils.h" +#include <stdlib.h> #include "../dsputil.h" +/* ebx saving is necessary for PIC. gcc seems unable to see it alone */ +#define cpuid(index,eax,ebx,ecx,edx)\ + __asm __volatile\ + ("movl %%ebx, %%esi\n\t"\ + "cpuid\n\t"\ + "xchgl %%ebx, %%esi"\ + : "=a" (eax), "=S" (ebx),\ + "=c" (ecx), "=d" (edx)\ + : "0" (index)); /* Function to test if multimedia instructions are supported... */ int mm_support(void) { - return xine_mm_accel(); + int rval; + int eax, ebx, ecx, edx; + + __asm__ __volatile__ ( + /* See if CPUID instruction is supported ... */ + /* ... Get copies of EFLAGS into eax and ecx */ + "pushf\n\t" + "popl %0\n\t" + "movl %0, %1\n\t" + + /* ... Toggle the ID bit in one copy and store */ + /* to the EFLAGS reg */ + "xorl $0x200000, %0\n\t" + "push %0\n\t" + "popf\n\t" + + /* ... Get the (hopefully modified) EFLAGS */ + "pushf\n\t" + "popl %0\n\t" + : "=a" (eax), "=c" (ecx) + : + : "cc" + ); + + if (eax == ecx) + return 0; /* CPUID not supported */ + + cpuid(0, eax, ebx, ecx, edx); + + if (ebx == 0x756e6547 && + edx == 0x49656e69 && + ecx == 0x6c65746e) { + + /* intel */ + inteltest: + cpuid(1, eax, ebx, ecx, edx); + if ((edx & 0x00800000) == 0) + return 0; + rval = MM_MMX; + if (edx & 0x02000000) + rval |= MM_MMXEXT | MM_SSE; + if (edx & 0x04000000) + rval |= MM_SSE2; + return rval; + } else if (ebx == 0x68747541 && + edx == 0x69746e65 && + ecx == 0x444d4163) { + /* AMD */ + cpuid(0x80000000, eax, ebx, ecx, edx); + if ((unsigned)eax < 0x80000001) + goto inteltest; + cpuid(0x80000001, eax, ebx, ecx, edx); + if ((edx & 0x00800000) == 0) + return 0; + rval = MM_MMX; + if (edx & 0x80000000) + rval |= MM_3DNOW; + if (edx & 0x00400000) + rval |= MM_MMXEXT; + return rval; + } else if (ebx == 0x746e6543 && + edx == 0x48727561 && + ecx == 0x736c7561) { /* "CentaurHauls" */ + /* VIA C3 */ + cpuid(0x80000000, eax, ebx, ecx, edx); + if ((unsigned)eax < 0x80000001) + goto inteltest; + cpuid(0x80000001, eax, ebx, ecx, edx); + rval = 0; + if( edx & ( 1 << 31) ) + rval |= MM_3DNOW; + if( edx & ( 1 << 23) ) + rval |= MM_MMX; + if( edx & ( 1 << 24) ) + rval |= MM_MMXEXT; + return rval; + } else if (ebx == 0x69727943 && + edx == 0x736e4978 && + ecx == 0x64616574) { + /* Cyrix Section */ + /* See if extended CPUID level 80000001 is supported */ + /* The value of CPUID/80000001 for the 6x86MX is undefined + according to the Cyrix CPU Detection Guide (Preliminary + Rev. 1.01 table 1), so we'll check the value of eax for + CPUID/0 to see if standard CPUID level 2 is supported. + According to the table, the only CPU which supports level + 2 is also the only one which supports extended CPUID levels. + */ + if (eax != 2) + goto inteltest; + cpuid(0x80000001, eax, ebx, ecx, edx); + if ((eax & 0x00800000) == 0) + return 0; + rval = MM_MMX; + if (eax & 0x01000000) + rval |= MM_MMXEXT; + return rval; + } else { + return 0; + } } #ifdef __TEST__ diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c index a1e1642d5..efa022557 100644 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c +++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c @@ -1,6 +1,7 @@ /* * MMX optimized DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -22,6 +23,11 @@ #include "../dsputil.h" #include "../simple_idct.h" +//#undef NDEBUG +//#include <assert.h> + +extern const uint8_t ff_h263_loop_filter_strength[32]; + int mm_flags; /* multimedia extension flags */ /* pixel operations */ @@ -34,6 +40,8 @@ static const uint64_t ff_pw_3 __attribute__ ((aligned(8))) = 0x0003000300030003 static const uint64_t ff_pw_16 __attribute__ ((aligned(8))) = 0x0010001000100010ULL; static const uint64_t ff_pw_15 __attribute__ ((aligned(8))) = 0x000F000F000F000FULL; +static const uint64_t ff_pb_FC __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL; + #define JUMPALIGN() __asm __volatile (".balign 8"::) #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::) @@ -465,6 +473,180 @@ static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ dst[i+0] += src[i+0]; } +#define H263_LOOP_FILTER \ + "pxor %%mm7, %%mm7 \n\t"\ + "movq %0, %%mm0 \n\t"\ + "movq %0, %%mm1 \n\t"\ + "movq %3, %%mm2 \n\t"\ + "movq %3, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm0 \n\t"\ + "punpckhbw %%mm7, %%mm1 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpckhbw %%mm7, %%mm3 \n\t"\ + "psubw %%mm2, %%mm0 \n\t"\ + "psubw %%mm3, %%mm1 \n\t"\ + "movq %1, %%mm2 \n\t"\ + "movq %1, %%mm3 \n\t"\ + "movq %2, %%mm4 \n\t"\ + "movq %2, %%mm5 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpckhbw %%mm7, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm4 \n\t"\ + "punpckhbw %%mm7, %%mm5 \n\t"\ + "psubw %%mm2, %%mm4 \n\t"\ + "psubw %%mm3, %%mm5 \n\t"\ + "psllw $2, %%mm4 \n\t"\ + "psllw $2, %%mm5 \n\t"\ + "paddw %%mm0, %%mm4 \n\t"\ + "paddw %%mm1, %%mm5 \n\t"\ + "pxor %%mm6, %%mm6 \n\t"\ + "pcmpgtw %%mm4, %%mm6 \n\t"\ + "pcmpgtw %%mm5, %%mm7 \n\t"\ + "pxor %%mm6, %%mm4 \n\t"\ + "pxor %%mm7, %%mm5 \n\t"\ + "psubw %%mm6, %%mm4 \n\t"\ + "psubw %%mm7, %%mm5 \n\t"\ + "psrlw $3, %%mm4 \n\t"\ + "psrlw $3, %%mm5 \n\t"\ + "packuswb %%mm5, %%mm4 \n\t"\ + "packsswb %%mm7, %%mm6 \n\t"\ + "pxor %%mm7, %%mm7 \n\t"\ + "movd %4, %%mm2 \n\t"\ + "punpcklbw %%mm2, %%mm2 \n\t"\ + "punpcklbw %%mm2, %%mm2 \n\t"\ + "punpcklbw %%mm2, %%mm2 \n\t"\ + "psubusb %%mm4, %%mm2 \n\t"\ + "movq %%mm2, %%mm3 \n\t"\ + "psubusb %%mm4, %%mm3 \n\t"\ + "psubb %%mm3, %%mm2 \n\t"\ + "movq %1, %%mm3 \n\t"\ + "movq %2, %%mm4 \n\t"\ + "pxor %%mm6, %%mm3 \n\t"\ + "pxor %%mm6, %%mm4 \n\t"\ + "paddusb %%mm2, %%mm3 \n\t"\ + "psubusb %%mm2, %%mm4 \n\t"\ + "pxor %%mm6, %%mm3 \n\t"\ + "pxor %%mm6, %%mm4 \n\t"\ + "paddusb %%mm2, %%mm2 \n\t"\ + "packsswb %%mm1, %%mm0 \n\t"\ + "pcmpgtb %%mm0, %%mm7 \n\t"\ + "pxor %%mm7, %%mm0 \n\t"\ + "psubb %%mm7, %%mm0 \n\t"\ + "movq %%mm0, %%mm1 \n\t"\ + "psubusb %%mm2, %%mm0 \n\t"\ + "psubb %%mm0, %%mm1 \n\t"\ + "pand %5, %%mm1 \n\t"\ + "psrlw $2, %%mm1 \n\t"\ + "pxor %%mm7, %%mm1 \n\t"\ + "psubb %%mm7, %%mm1 \n\t"\ + "movq %0, %%mm5 \n\t"\ + "movq %3, %%mm6 \n\t"\ + "psubb %%mm1, %%mm5 \n\t"\ + "paddb %%mm1, %%mm6 \n\t" + +static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ + const int strength= ff_h263_loop_filter_strength[qscale]; + + asm volatile( + + H263_LOOP_FILTER + + "movq %%mm3, %1 \n\t" + "movq %%mm4, %2 \n\t" + "movq %%mm5, %0 \n\t" + "movq %%mm6, %3 \n\t" + : "+m" (*(uint64_t*)(src - 2*stride)), + "+m" (*(uint64_t*)(src - 1*stride)), + "+m" (*(uint64_t*)(src + 0*stride)), + "+m" (*(uint64_t*)(src + 1*stride)) + : "g" (2*strength), "m"(ff_pb_FC) + ); +} + +static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ + asm volatile( //FIXME could save 1 instruction if done as 8x4 ... + "movd %4, %%mm0 \n\t" + "movd %5, %%mm1 \n\t" + "movd %6, %%mm2 \n\t" + "movd %7, %%mm3 \n\t" + "punpcklbw %%mm1, %%mm0 \n\t" + "punpcklbw %%mm3, %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "punpcklwd %%mm2, %%mm0 \n\t" + "punpckhwd %%mm2, %%mm1 \n\t" + "movd %%mm0, %0 \n\t" + "punpckhdq %%mm0, %%mm0 \n\t" + "movd %%mm0, %1 \n\t" + "movd %%mm1, %2 \n\t" + "punpckhdq %%mm1, %%mm1 \n\t" + "movd %%mm1, %3 \n\t" + + : "=m" (*(uint32_t*)(dst + 0*dst_stride)), + "=m" (*(uint32_t*)(dst + 1*dst_stride)), + "=m" (*(uint32_t*)(dst + 2*dst_stride)), + "=m" (*(uint32_t*)(dst + 3*dst_stride)) + : "m" (*(uint32_t*)(src + 0*src_stride)), + "m" (*(uint32_t*)(src + 1*src_stride)), + "m" (*(uint32_t*)(src + 2*src_stride)), + "m" (*(uint32_t*)(src + 3*src_stride)) + ); +} + +static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ + const int strength= ff_h263_loop_filter_strength[qscale]; + uint64_t temp[4] __attribute__ ((aligned(8))); + uint8_t *btemp= (uint8_t*)temp; + + src -= 2; + + transpose4x4(btemp , src , 8, stride); + transpose4x4(btemp+4, src + 4*stride, 8, stride); + asm volatile( + H263_LOOP_FILTER // 5 3 4 6 + + : "+m" (temp[0]), + "+m" (temp[1]), + "+m" (temp[2]), + "+m" (temp[3]) + : "g" (2*strength), "m"(ff_pb_FC) + ); + + asm volatile( + "movq %%mm5, %%mm1 \n\t" + "movq %%mm4, %%mm0 \n\t" + "punpcklbw %%mm3, %%mm5 \n\t" + "punpcklbw %%mm6, %%mm4 \n\t" + "punpckhbw %%mm3, %%mm1 \n\t" + "punpckhbw %%mm6, %%mm0 \n\t" + "movq %%mm5, %%mm3 \n\t" + "movq %%mm1, %%mm6 \n\t" + "punpcklwd %%mm4, %%mm5 \n\t" + "punpcklwd %%mm0, %%mm1 \n\t" + "punpckhwd %%mm4, %%mm3 \n\t" + "punpckhwd %%mm0, %%mm6 \n\t" + "movd %%mm5, %0 \n\t" + "punpckhdq %%mm5, %%mm5 \n\t" + "movd %%mm5, %1 \n\t" + "movd %%mm3, %2 \n\t" + "punpckhdq %%mm3, %%mm3 \n\t" + "movd %%mm3, %3 \n\t" + "movd %%mm1, %4 \n\t" + "punpckhdq %%mm1, %%mm1 \n\t" + "movd %%mm1, %5 \n\t" + "movd %%mm6, %6 \n\t" + "punpckhdq %%mm6, %%mm6 \n\t" + "movd %%mm6, %7 \n\t" + : "=m" (*(uint32_t*)(src + 0*stride)), + "=m" (*(uint32_t*)(src + 1*stride)), + "=m" (*(uint32_t*)(src + 2*stride)), + "=m" (*(uint32_t*)(src + 3*stride)), + "=m" (*(uint32_t*)(src + 4*stride)), + "=m" (*(uint32_t*)(src + 5*stride)), + "=m" (*(uint32_t*)(src + 6*stride)), + "=m" (*(uint32_t*)(src + 7*stride)) + ); +} + #ifdef CONFIG_ENCODERS static int pix_norm1_mmx(uint8_t *pix, int line_size) { int tmp; @@ -509,10 +691,10 @@ static int pix_norm1_mmx(uint8_t *pix, int line_size) { return tmp; } -static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) { +static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int tmp; asm volatile ( - "movl $16,%%ecx\n" + "movl %4,%%ecx\n" "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ "1:\n" @@ -563,10 +745,252 @@ static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) { "psrlq $32, %%mm7\n" /* shift hi dword to lo */ "paddd %%mm7,%%mm1\n" "movd %%mm1,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" (line_size) : "ecx"); + : "+r" (pix1), "+r" (pix2), "=r"(tmp) + : "r" (line_size) , "m" (h) + : "%ecx"); return tmp; } +static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { + int tmp; + + assert( (((int)pix) & 7) == 0); + assert((line_size &7) ==0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0), %%mm2\n"\ + "movq 8(%0), %%mm3\n"\ + "addl %2,%0\n"\ + "movq %%mm2, " #out0 "\n"\ + "movq %%mm3, " #out1 "\n"\ + "psubusb " #in0 ", %%mm2\n"\ + "psubusb " #in1 ", %%mm3\n"\ + "psubusb " #out0 ", " #in0 "\n"\ + "psubusb " #out1 ", " #in1 "\n"\ + "por %%mm2, " #in0 "\n"\ + "por %%mm3, " #in1 "\n"\ + "movq " #in0 ", %%mm2\n"\ + "movq " #in1 ", %%mm3\n"\ + "punpcklbw %%mm7, " #in0 "\n"\ + "punpcklbw %%mm7, " #in1 "\n"\ + "punpckhbw %%mm7, %%mm2\n"\ + "punpckhbw %%mm7, %%mm3\n"\ + "paddw " #in1 ", " #in0 "\n"\ + "paddw %%mm3, %%mm2\n"\ + "paddw %%mm2, " #in0 "\n"\ + "paddw " #in0 ", %%mm6\n" + + + asm volatile ( + "movl %3,%%ecx\n" + "pxor %%mm6,%%mm6\n" + "pxor %%mm7,%%mm7\n" + "movq (%0),%%mm0\n" + "movq 8(%0),%%mm1\n" + "addl %2,%0\n" + "subl $2, %%ecx\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movq %%mm6,%%mm0\n" + "psrlq $32, %%mm6\n" + "paddw %%mm6,%%mm0\n" + "movq %%mm0,%%mm6\n" + "psrlq $16, %%mm0\n" + "paddw %%mm6,%%mm0\n" + "movd %%mm0,%1\n" + : "+r" (pix), "=r"(tmp) + : "r" (line_size) , "m" (h) + : "%ecx"); + return tmp & 0xFFFF; +} +#undef SUM + +static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { + int tmp; + + assert( (((int)pix) & 7) == 0); + assert((line_size &7) ==0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0), " #out0 "\n"\ + "movq 8(%0), " #out1 "\n"\ + "addl %2,%0\n"\ + "psadbw " #out0 ", " #in0 "\n"\ + "psadbw " #out1 ", " #in1 "\n"\ + "paddw " #in1 ", " #in0 "\n"\ + "paddw " #in0 ", %%mm6\n" + + asm volatile ( + "movl %3,%%ecx\n" + "pxor %%mm6,%%mm6\n" + "pxor %%mm7,%%mm7\n" + "movq (%0),%%mm0\n" + "movq 8(%0),%%mm1\n" + "addl %2,%0\n" + "subl $2, %%ecx\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movd %%mm6,%1\n" + : "+r" (pix), "=r"(tmp) + : "r" (line_size) , "m" (h) + : "%ecx"); + return tmp; +} +#undef SUM + +static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { + int tmp; + + assert( (((int)pix1) & 7) == 0); + assert( (((int)pix2) & 7) == 0); + assert((line_size &7) ==0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0),%%mm2\n"\ + "movq (%1)," #out0 "\n"\ + "movq 8(%0),%%mm3\n"\ + "movq 8(%1)," #out1 "\n"\ + "addl %3,%0\n"\ + "addl %3,%1\n"\ + "psubb " #out0 ", %%mm2\n"\ + "psubb " #out1 ", %%mm3\n"\ + "pxor %%mm7, %%mm2\n"\ + "pxor %%mm7, %%mm3\n"\ + "movq %%mm2, " #out0 "\n"\ + "movq %%mm3, " #out1 "\n"\ + "psubusb " #in0 ", %%mm2\n"\ + "psubusb " #in1 ", %%mm3\n"\ + "psubusb " #out0 ", " #in0 "\n"\ + "psubusb " #out1 ", " #in1 "\n"\ + "por %%mm2, " #in0 "\n"\ + "por %%mm3, " #in1 "\n"\ + "movq " #in0 ", %%mm2\n"\ + "movq " #in1 ", %%mm3\n"\ + "punpcklbw %%mm7, " #in0 "\n"\ + "punpcklbw %%mm7, " #in1 "\n"\ + "punpckhbw %%mm7, %%mm2\n"\ + "punpckhbw %%mm7, %%mm3\n"\ + "paddw " #in1 ", " #in0 "\n"\ + "paddw %%mm3, %%mm2\n"\ + "paddw %%mm2, " #in0 "\n"\ + "paddw " #in0 ", %%mm6\n" + + + asm volatile ( + "movl %4,%%ecx\n" + "pxor %%mm6,%%mm6\n" + "pcmpeqw %%mm7,%%mm7\n" + "psllw $15, %%mm7\n" + "packsswb %%mm7, %%mm7\n" + "movq (%0),%%mm0\n" + "movq (%1),%%mm2\n" + "movq 8(%0),%%mm1\n" + "movq 8(%1),%%mm3\n" + "addl %3,%0\n" + "addl %3,%1\n" + "subl $2, %%ecx\n" + "psubb %%mm2, %%mm0\n" + "psubb %%mm3, %%mm1\n" + "pxor %%mm7, %%mm0\n" + "pxor %%mm7, %%mm1\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movq %%mm6,%%mm0\n" + "psrlq $32, %%mm6\n" + "paddw %%mm6,%%mm0\n" + "movq %%mm0,%%mm6\n" + "psrlq $16, %%mm0\n" + "paddw %%mm6,%%mm0\n" + "movd %%mm0,%2\n" + : "+r" (pix1), "+r" (pix2), "=r"(tmp) + : "r" (line_size) , "m" (h) + : "%ecx"); + return tmp & 0x7FFF; +} +#undef SUM + +static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { + int tmp; + + assert( (((int)pix1) & 7) == 0); + assert( (((int)pix2) & 7) == 0); + assert((line_size &7) ==0); + +#define SUM(in0, in1, out0, out1) \ + "movq (%0)," #out0 "\n"\ + "movq (%1),%%mm2\n"\ + "movq 8(%0)," #out1 "\n"\ + "movq 8(%1),%%mm3\n"\ + "addl %3,%0\n"\ + "addl %3,%1\n"\ + "psubb %%mm2, " #out0 "\n"\ + "psubb %%mm3, " #out1 "\n"\ + "pxor %%mm7, " #out0 "\n"\ + "pxor %%mm7, " #out1 "\n"\ + "psadbw " #out0 ", " #in0 "\n"\ + "psadbw " #out1 ", " #in1 "\n"\ + "paddw " #in1 ", " #in0 "\n"\ + "paddw " #in0 ", %%mm6\n" + + asm volatile ( + "movl %4,%%ecx\n" + "pxor %%mm6,%%mm6\n" + "pcmpeqw %%mm7,%%mm7\n" + "psllw $15, %%mm7\n" + "packsswb %%mm7, %%mm7\n" + "movq (%0),%%mm0\n" + "movq (%1),%%mm2\n" + "movq 8(%0),%%mm1\n" + "movq 8(%1),%%mm3\n" + "addl %3,%0\n" + "addl %3,%1\n" + "subl $2, %%ecx\n" + "psubb %%mm2, %%mm0\n" + "psubb %%mm3, %%mm1\n" + "pxor %%mm7, %%mm0\n" + "pxor %%mm7, %%mm1\n" + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + "1:\n" + + SUM(%%mm4, %%mm5, %%mm0, %%mm1) + + SUM(%%mm0, %%mm1, %%mm4, %%mm5) + + "subl $2, %%ecx\n" + "jnz 1b\n" + + "movd %%mm6,%2\n" + : "+r" (pix1), "+r" (pix2), "=r"(tmp) + : "r" (line_size) , "m" (h) + : "%ecx"); + return tmp; +} +#undef SUM + static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ int i=0; asm volatile( @@ -588,7 +1012,6 @@ static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ for(; i<w; i++) dst[i+0] = src1[i+0]-src2[i+0]; } -#endif static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){ int i=0; @@ -626,8 +1049,6 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *left = src2[w-1]; } -#ifdef CONFIG_ENCODERS - #define LBUTTERFLY2(a1,b1,a2,b2)\ "paddw " #b1 ", " #a1 " \n\t"\ "paddw " #b2 ", " #a2 " \n\t"\ @@ -691,9 +1112,11 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t "movq "#c", "#o"+32(%1) \n\t"\ "movq "#d", "#o"+48(%1) \n\t"\ -static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride){ +static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){ uint64_t temp[16] __align8; int sum=0; + + assert(h==8); diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride); @@ -776,9 +1199,11 @@ static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride) return sum&0xFFFF; } -static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride){ +static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){ uint64_t temp[16] __align8; int sum=0; + + assert(h==8); diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride); @@ -862,8 +1287,8 @@ static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride } -WARPER88_1616(hadamard8_diff_mmx, hadamard8_diff16_mmx) -WARPER88_1616(hadamard8_diff_mmx2, hadamard8_diff16_mmx2) +WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx) +WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2) #endif //CONFIG_ENCODERS #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d) @@ -1602,12 +2027,19 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) #endif if (mm_flags & MM_MMX) { - const int idct_algo= avctx->idct_algo; -#ifdef CONFIG_ENCODERS const int dct_algo = avctx->dct_algo; + const int idct_algo= avctx->idct_algo; - if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX) - c->fdct = ff_fdct_mmx; +#ifdef CONFIG_ENCODERS + if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ + if(mm_flags & MM_SSE2){ + c->fdct = ff_fdct_sse2; + }else if(mm_flags & MM_MMXEXT){ + c->fdct = ff_fdct_mmx2; + }else{ + c->fdct = ff_fdct_mmx; + } + } #endif //CONFIG_ENCODERS if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ @@ -1688,7 +2120,15 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->pix_norm1 = pix_norm1_mmx; c->sse[0] = sse16_mmx; + c->vsad[4]= vsad_intra16_mmx; + + if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ + c->vsad[0] = vsad16_mmx; + } #endif //CONFIG_ENCODERS + + c->h263_v_loop_filter= h263_v_loop_filter_mmx; + c->h263_h_loop_filter= h263_h_loop_filter_mmx; if (mm_flags & MM_MMXEXT) { c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; @@ -1708,6 +2148,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) #ifdef CONFIG_ENCODERS c->hadamard8_diff[0]= hadamard8_diff16_mmx2; c->hadamard8_diff[1]= hadamard8_diff_mmx2; + c->vsad[4]= vsad_intra16_mmx2; #endif //CONFIG_ENCODERS if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ @@ -1717,6 +2158,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; +#ifdef CONFIG_ENCODERS + c->vsad[0] = vsad16_mmx2; +#endif //CONFIG_ENCODERS } #if 1 @@ -1754,7 +2198,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2) #endif +#ifdef CONFIG_ENCODERS c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; +#endif //CONFIG_ENCODERS } else if (mm_flags & MM_3DNOW) { c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h index 8418123ac..c8494f51a 100644 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h +++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h @@ -1,7 +1,7 @@ /* * DSP utils : average functions are compiled twice for 3dnow/mmx2 * Copyright (c) 2000, 2001 Fabrice Bellard. - * Copyright (c) 2002 Michael Niedermayer + * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h index bbd5aec97..21f0bfd84 100644 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h +++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h @@ -1,6 +1,7 @@ /* * DSP utils mmx functions are compiled twice for rnd/no_rnd * Copyright (c) 2000, 2001 Fabrice Bellard. + * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public diff --git a/src/libffmpeg/libavcodec/i386/fdct_mmx.c b/src/libffmpeg/libavcodec/i386/fdct_mmx.c index a2402c95d..877160773 100644 --- a/src/libffmpeg/libavcodec/i386/fdct_mmx.c +++ b/src/libffmpeg/libavcodec/i386/fdct_mmx.c @@ -1,16 +1,21 @@ /* * MMX optimized forward DCT * The gcc porting is Copyright (c) 2001 Fabrice Bellard. + * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> + * SSE2 optimization is Copyright (c) 2004 Denes Balatoni. * * from fdctam32.c - AP922 MMX(3D-Now) forward-DCT * * Intel Application Note AP-922 - fast, precise implementation of DCT * http://developer.intel.com/vtune/cbts/appnotes.htm + * + * Also of inspiration: + * a page about fdct at http://www.geocities.com/ssavekar/dct.htm + * Skal's fdct at http://skal.planet-d.net/coding/dct.html */ -#include "../dsputil.h" +#include "../common.h" #include "mmx.h" -#undef ATTR_ALIGN #define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align))) ////////////////////////////////////////////////////////////////////// @@ -27,10 +32,8 @@ #define BITS_FRW_ACC 3 //; 2 or 3 for accuracy #define SHIFT_FRW_COL BITS_FRW_ACC #define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3) -//#define RND_FRW_ROW (262144 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_ROW-1) #define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1)) -//#define RND_FRW_COL (2 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_COL-1) -#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1)) +//#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1)) //concatenated table, for forward DCT transformation static const int16_t fdct_tg_all_16[] ATTR_ALIGN(8) = { @@ -38,101 +41,220 @@ static const int16_t fdct_tg_all_16[] ATTR_ALIGN(8) = { 27146, 27146, 27146, 27146, // tg * (2<<16) + 0.5 -21746, -21746, -21746, -21746, // tg * (2<<16) + 0.5 }; -static const int16_t cos_4_16[4] ATTR_ALIGN(8) = { - -19195, -19195, -19195, -19195, //cos * (2<<16) + 0.5 -}; static const int16_t ocos_4_16[4] ATTR_ALIGN(8) = { 23170, 23170, 23170, 23170, //cos * (2<<15) + 0.5 }; -static const long long fdct_one_corr ATTR_ALIGN(8) = 0x0001000100010001LL; +static const long long fdct_one_corr ATTR_ALIGN(8) = 0x0001000100010001LL; + static const long fdct_r_row[2] ATTR_ALIGN(8) = {RND_FRW_ROW, RND_FRW_ROW }; +static const long fdct_r_row_sse2[4] ATTR_ALIGN(16) = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW}; + static const int16_t tab_frw_01234567[] ATTR_ALIGN(8) = { // forward_dct coeff table - //row0 - 16384, 16384, 21407, -8867, // w09 w01 w08 w00 - 16384, 16384, 8867, -21407, // w13 w05 w12 w04 - 16384, -16384, 8867, 21407, // w11 w03 w10 w02 - -16384, 16384, -21407, -8867, // w15 w07 w14 w06 - 22725, 12873, 19266, -22725, // w22 w20 w18 w16 - 19266, 4520, -4520, -12873, // w23 w21 w19 w17 - 12873, 4520, 4520, 19266, // w30 w28 w26 w24 - -22725, 19266, -12873, -22725, // w31 w29 w27 w25 - - //row1 - 22725, 22725, 29692, -12299, // w09 w01 w08 w00 - 22725, 22725, 12299, -29692, // w13 w05 w12 w04 - 22725, -22725, 12299, 29692, // w11 w03 w10 w02 - -22725, 22725, -29692, -12299, // w15 w07 w14 w06 - 31521, 17855, 26722, -31521, // w22 w20 w18 w16 - 26722, 6270, -6270, -17855, // w23 w21 w19 w17 - 17855, 6270, 6270, 26722, // w30 w28 w26 w24 - -31521, 26722, -17855, -31521, // w31 w29 w27 w25 - - //row2 - 21407, 21407, 27969, -11585, // w09 w01 w08 w00 - 21407, 21407, 11585, -27969, // w13 w05 w12 w04 - 21407, -21407, 11585, 27969, // w11 w03 w10 w02 - -21407, 21407, -27969, -11585, // w15 w07 w14 w06 - 29692, 16819, 25172, -29692, // w22 w20 w18 w16 - 25172, 5906, -5906, -16819, // w23 w21 w19 w17 - 16819, 5906, 5906, 25172, // w30 w28 w26 w24 - -29692, 25172, -16819, -29692, // w31 w29 w27 w25 - - //row3 - 19266, 19266, 25172, -10426, // w09 w01 w08 w00 - 19266, 19266, 10426, -25172, // w13 w05 w12 w04 - 19266, -19266, 10426, 25172, // w11 w03 w10 w02 - -19266, 19266, -25172, -10426, // w15 w07 w14 w06, - 26722, 15137, 22654, -26722, // w22 w20 w18 w16 - 22654, 5315, -5315, -15137, // w23 w21 w19 w17 - 15137, 5315, 5315, 22654, // w30 w28 w26 w24 - -26722, 22654, -15137, -26722, // w31 w29 w27 w25, - - //row4 - 16384, 16384, 21407, -8867, // w09 w01 w08 w00 - 16384, 16384, 8867, -21407, // w13 w05 w12 w04 - 16384, -16384, 8867, 21407, // w11 w03 w10 w02 - -16384, 16384, -21407, -8867, // w15 w07 w14 w06 - 22725, 12873, 19266, -22725, // w22 w20 w18 w16 - 19266, 4520, -4520, -12873, // w23 w21 w19 w17 - 12873, 4520, 4520, 19266, // w30 w28 w26 w24 - -22725, 19266, -12873, -22725, // w31 w29 w27 w25 - - //row5 - 19266, 19266, 25172, -10426, // w09 w01 w08 w00 - 19266, 19266, 10426, -25172, // w13 w05 w12 w04 - 19266, -19266, 10426, 25172, // w11 w03 w10 w02 - -19266, 19266, -25172, -10426, // w15 w07 w14 w06 - 26722, 15137, 22654, -26722, // w22 w20 w18 w16 - 22654, 5315, -5315, -15137, // w23 w21 w19 w17 - 15137, 5315, 5315, 22654, // w30 w28 w26 w24 - -26722, 22654, -15137, -26722, // w31 w29 w27 w25 - - //row6 - 21407, 21407, 27969, -11585, // w09 w01 w08 w00 - 21407, 21407, 11585, -27969, // w13 w05 w12 w04 - 21407, -21407, 11585, 27969, // w11 w03 w10 w02 - -21407, 21407, -27969, -11585, // w15 w07 w14 w06, - 29692, 16819, 25172, -29692, // w22 w20 w18 w16 - 25172, 5906, -5906, -16819, // w23 w21 w19 w17 - 16819, 5906, 5906, 25172, // w30 w28 w26 w24 - -29692, 25172, -16819, -29692, // w31 w29 w27 w25, - - //row7 - 22725, 22725, 29692, -12299, // w09 w01 w08 w00 - 22725, 22725, 12299, -29692, // w13 w05 w12 w04 - 22725, -22725, 12299, 29692, // w11 w03 w10 w02 - -22725, 22725, -29692, -12299, // w15 w07 w14 w06, - 31521, 17855, 26722, -31521, // w22 w20 w18 w16 - 26722, 6270, -6270, -17855, // w23 w21 w19 w17 - 17855, 6270, 6270, 26722, // w30 w28 w26 w24 - -31521, 26722, -17855, -31521 // w31 w29 w27 w25 + 16384, 16384, -8867, -21407, + 16384, 16384, 21407, 8867, + 16384, -16384, 21407, -8867, + -16384, 16384, 8867, -21407, + 22725, 19266, -22725, -12873, + 12873, 4520, 19266, -4520, + 12873, -22725, 19266, -22725, + 4520, 19266, 4520, -12873, + + 22725, 22725, -12299, -29692, + 22725, 22725, 29692, 12299, + 22725, -22725, 29692, -12299, + -22725, 22725, 12299, -29692, + 31521, 26722, -31521, -17855, + 17855, 6270, 26722, -6270, + 17855, -31521, 26722, -31521, + 6270, 26722, 6270, -17855, + + 21407, 21407, -11585, -27969, + 21407, 21407, 27969, 11585, + 21407, -21407, 27969, -11585, + -21407, 21407, 11585, -27969, + 29692, 25172, -29692, -16819, + 16819, 5906, 25172, -5906, + 16819, -29692, 25172, -29692, + 5906, 25172, 5906, -16819, + + 19266, 19266, -10426, -25172, + 19266, 19266, 25172, 10426, + 19266, -19266, 25172, -10426, + -19266, 19266, 10426, -25172, + 26722, 22654, -26722, -15137, + 15137, 5315, 22654, -5315, + 15137, -26722, 22654, -26722, + 5315, 22654, 5315, -15137, + + 16384, 16384, -8867, -21407, + 16384, 16384, 21407, 8867, + 16384, -16384, 21407, -8867, + -16384, 16384, 8867, -21407, + 22725, 19266, -22725, -12873, + 12873, 4520, 19266, -4520, + 12873, -22725, 19266, -22725, + 4520, 19266, 4520, -12873, + + 19266, 19266, -10426, -25172, + 19266, 19266, 25172, 10426, + 19266, -19266, 25172, -10426, + -19266, 19266, 10426, -25172, + 26722, 22654, -26722, -15137, + 15137, 5315, 22654, -5315, + 15137, -26722, 22654, -26722, + 5315, 22654, 5315, -15137, + + 21407, 21407, -11585, -27969, + 21407, 21407, 27969, 11585, + 21407, -21407, 27969, -11585, + -21407, 21407, 11585, -27969, + 29692, 25172, -29692, -16819, + 16819, 5906, 25172, -5906, + 16819, -29692, 25172, -29692, + 5906, 25172, 5906, -16819, + + 22725, 22725, -12299, -29692, + 22725, 22725, 29692, 12299, + 22725, -22725, 29692, -12299, + -22725, 22725, 12299, -29692, + 31521, 26722, -31521, -17855, + 17855, 6270, 26722, -6270, + 17855, -31521, 26722, -31521, + 6270, 26722, 6270, -17855, }; +static const int16_t tab_frw_01234567_sse2[] ATTR_ALIGN(16) = { // forward_dct coeff table +#define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \ + C4, C4, C5, C7, C2, C6, C3, -C7, \ + -C4, C4, C7, C3, C6, -C2, C7, -C5, \ + C4, -C4, C5, -C1, C2, -C6, C3, -C1, +// c1..c7 * cos(pi/4) * 2^15 +#define C1 22725 +#define C2 21407 +#define C3 19266 +#define C4 16384 +#define C5 12873 +#define C6 8867 +#define C7 4520 +TABLE_SSE2 + +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 31521 +#define C2 29692 +#define C3 26722 +#define C4 22725 +#define C5 17855 +#define C6 12299 +#define C7 6270 +TABLE_SSE2 + +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 29692 +#define C2 27969 +#define C3 25172 +#define C4 21407 +#define C5 16819 +#define C6 11585 +#define C7 5906 +TABLE_SSE2 + +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 26722 +#define C2 25172 +#define C3 22654 +#define C4 19266 +#define C5 15137 +#define C6 10426 +#define C7 5315 +TABLE_SSE2 + +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 22725 +#define C2 21407 +#define C3 19266 +#define C4 16384 +#define C5 12873 +#define C6 8867 +#define C7 4520 +TABLE_SSE2 -static inline void fdct_col(const int16_t *in, int16_t *out, int offset) +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 26722 +#define C2 25172 +#define C3 22654 +#define C4 19266 +#define C5 15137 +#define C6 10426 +#define C7 5315 +TABLE_SSE2 + +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 29692 +#define C2 27969 +#define C3 25172 +#define C4 21407 +#define C5 16819 +#define C6 11585 +#define C7 5906 +TABLE_SSE2 + +#undef C1 +#undef C2 +#undef C3 +#undef C4 +#undef C5 +#undef C6 +#undef C7 +#define C1 31521 +#define C2 29692 +#define C3 26722 +#define C4 22725 +#define C5 17855 +#define C6 12299 +#define C7 6270 +TABLE_SSE2 +}; + +static always_inline void fdct_col(const int16_t *in, int16_t *out, int offset) { movq_m2r(*(in + offset + 1 * 8), mm0); movq_m2r(*(in + offset + 6 * 8), mm1); @@ -211,59 +333,158 @@ static inline void fdct_col(const int16_t *in, int16_t *out, int offset) movq_r2m(mm3, *(out + offset + 7 * 8)); } -static inline void fdct_row(const int16_t *in, int16_t *out, const int16_t *table) + +static always_inline void fdct_row_sse2(const int16_t *in, int16_t *out) { - movd_m2r(*(in + 6), mm5); - punpcklwd_m2r(*(in + 4), mm5); - movq_r2r(mm5, mm2); - psrlq_i2r(0x20, mm5); + asm volatile( + ".macro FDCT_ROW_SSE2_H1 i t \n\t" + "movq \\i(%0), %%xmm2 \n\t" + "movq \\i+8(%0), %%xmm0 \n\t" + "movdqa \\t+32(%1), %%xmm3 \n\t" + "movdqa \\t+48(%1), %%xmm7 \n\t" + "movdqa \\t(%1), %%xmm4 \n\t" + "movdqa \\t+16(%1), %%xmm5 \n\t" + ".endm \n\t" + ".macro FDCT_ROW_SSE2_H2 i t \n\t" + "movq \\i(%0), %%xmm2 \n\t" + "movq \\i+8(%0), %%xmm0 \n\t" + "movdqa \\t+32(%1), %%xmm3 \n\t" + "movdqa \\t+48(%1), %%xmm7 \n\t" + ".endm \n\t" + ".macro FDCT_ROW_SSE2 i \n\t" + "movq %%xmm2, %%xmm1 \n\t" + "pshuflw $27, %%xmm0, %%xmm0 \n\t" + "paddsw %%xmm0, %%xmm1 \n\t" + "psubsw %%xmm0, %%xmm2 \n\t" + "punpckldq %%xmm2, %%xmm1 \n\t" + "pshufd $78, %%xmm1, %%xmm2 \n\t" + "pmaddwd %%xmm2, %%xmm3 \n\t" + "pmaddwd %%xmm1, %%xmm7 \n\t" + "pmaddwd %%xmm5, %%xmm2 \n\t" + "pmaddwd %%xmm4, %%xmm1 \n\t" + "paddd %%xmm7, %%xmm3 \n\t" + "paddd %%xmm2, %%xmm1 \n\t" + "paddd %%xmm6, %%xmm3 \n\t" + "paddd %%xmm6, %%xmm1 \n\t" + "psrad %3, %%xmm3 \n\t" + "psrad %3, %%xmm1 \n\t" + "packssdw %%xmm3, %%xmm1 \n\t" + "movdqa %%xmm1, \\i(%4) \n\t" + ".endm \n\t" + "movdqa (%2), %%xmm6 \n\t" + "FDCT_ROW_SSE2_H1 0 0 \n\t" + "FDCT_ROW_SSE2 0 \n\t" + "FDCT_ROW_SSE2_H2 64 0 \n\t" + "FDCT_ROW_SSE2 64 \n\t" + + "FDCT_ROW_SSE2_H1 16 64 \n\t" + "FDCT_ROW_SSE2 16 \n\t" + "FDCT_ROW_SSE2_H2 112 64 \n\t" + "FDCT_ROW_SSE2 112 \n\t" + + "FDCT_ROW_SSE2_H1 32 128 \n\t" + "FDCT_ROW_SSE2 32 \n\t" + "FDCT_ROW_SSE2_H2 96 128 \n\t" + "FDCT_ROW_SSE2 96 \n\t" + + "FDCT_ROW_SSE2_H1 48 192 \n\t" + "FDCT_ROW_SSE2 48 \n\t" + "FDCT_ROW_SSE2_H2 80 192 \n\t" + "FDCT_ROW_SSE2 80 \n\t" + : + : "r" (in), "r" (tab_frw_01234567_sse2), "r" (fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out) + ); +} + +static always_inline void fdct_row_mmx2(const int16_t *in, int16_t *out, const int16_t *table) +{ + pshufw_m2r(*(in + 4), mm5, 0x1B); movq_m2r(*(in + 0), mm0); - punpcklwd_r2r(mm2, mm5); - movq_r2r(mm0, mm1); + movq_r2r(mm0, mm1); paddsw_r2r(mm5, mm0); psubsw_r2r(mm5, mm1); - movq_r2r(mm0, mm2); - punpcklwd_r2r(mm1, mm0); - punpckhwd_r2r(mm1, mm2); - movq_r2r(mm2, mm1); - movq_r2r(mm0, mm2); + pshufw_r2r(mm0, mm2, 0x4E); + pshufw_r2r(mm1, mm3, 0x4E); + movq_m2r(*(table + 0), mm4); + movq_m2r(*(table + 4), mm6); + movq_m2r(*(table + 16), mm5); + movq_m2r(*(table + 20), mm7); + pmaddwd_r2r(mm0, mm4); + pmaddwd_r2r(mm1, mm5); + pmaddwd_r2r(mm2, mm6); + pmaddwd_r2r(mm3, mm7); + pmaddwd_m2r(*(table + 8), mm0); + pmaddwd_m2r(*(table + 12), mm2); + pmaddwd_m2r(*(table + 24), mm1); + pmaddwd_m2r(*(table + 28), mm3); + paddd_r2r(mm6, mm4); + paddd_r2r(mm7, mm5); + paddd_r2r(mm2, mm0); + paddd_r2r(mm3, mm1); + movq_m2r(*fdct_r_row, mm7); + paddd_r2r(mm7, mm4); + paddd_r2r(mm7, mm5); + paddd_r2r(mm7, mm0); + paddd_r2r(mm7, mm1); + psrad_i2r(SHIFT_FRW_ROW, mm4); + psrad_i2r(SHIFT_FRW_ROW, mm5); + psrad_i2r(SHIFT_FRW_ROW, mm0); + psrad_i2r(SHIFT_FRW_ROW, mm1); + packssdw_r2r(mm0, mm4); + packssdw_r2r(mm1, mm5); + movq_r2r(mm4, mm2); + punpcklwd_r2r(mm5, mm4); + punpckhwd_r2r(mm5, mm2); + movq_r2m(mm4, *(out + 0)); + movq_r2m(mm2, *(out + 4)); +} + +static always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table) +{ + movd_m2r(*(in + 6), mm1); + punpcklwd_m2r(*(in + 4), mm1); + movq_r2r(mm1, mm2); + psrlq_i2r(0x20, mm1); + movq_m2r(*(in + 0), mm0); + punpcklwd_r2r(mm2, mm1); + movq_r2r(mm0, mm5); + paddsw_r2r(mm1, mm0); + psubsw_r2r(mm1, mm5); + movq_r2r(mm0, mm1); + movq_r2r(mm5, mm6); + punpckldq_r2r(mm5, mm3); + punpckhdq_r2r(mm3, mm6); movq_m2r(*(table + 0), mm3); - punpcklwd_r2r(mm1, mm0); - movq_r2r(mm0, mm5); - punpckldq_r2r(mm0, mm0); movq_m2r(*(table + 4), mm4); - punpckhwd_r2r(mm1, mm2); + punpckldq_r2r(mm0, mm2); pmaddwd_r2r(mm0, mm3); - movq_r2r(mm2, mm6); - movq_m2r(*(table + 16), mm1); - punpckldq_r2r(mm2, mm2); - pmaddwd_r2r(mm2, mm4); - punpckhdq_r2r(mm5, mm5); + punpckhdq_r2r(mm2, mm1); + movq_m2r(*(table + 16), mm2); + pmaddwd_r2r(mm1, mm4); pmaddwd_m2r(*(table + 8), mm0); - punpckhdq_r2r(mm6, mm6); movq_m2r(*(table + 20), mm7); - pmaddwd_r2r(mm5, mm1); + pmaddwd_r2r(mm5, mm2); paddd_m2r(*fdct_r_row, mm3); pmaddwd_r2r(mm6, mm7); - pmaddwd_m2r(*(table + 12), mm2); + pmaddwd_m2r(*(table + 12), mm1); paddd_r2r(mm4, mm3); pmaddwd_m2r(*(table + 24), mm5); pmaddwd_m2r(*(table + 28), mm6); - paddd_r2r(mm7, mm1); + paddd_r2r(mm7, mm2); paddd_m2r(*fdct_r_row, mm0); psrad_i2r(SHIFT_FRW_ROW, mm3); - paddd_m2r(*fdct_r_row, mm1); - paddd_r2r(mm2, mm0); + paddd_m2r(*fdct_r_row, mm2); + paddd_r2r(mm1, mm0); paddd_m2r(*fdct_r_row, mm5); - psrad_i2r(SHIFT_FRW_ROW, mm1); + psrad_i2r(SHIFT_FRW_ROW, mm2); paddd_r2r(mm6, mm5); psrad_i2r(SHIFT_FRW_ROW, mm0); psrad_i2r(SHIFT_FRW_ROW, mm5); packssdw_r2r(mm0, mm3); - packssdw_r2r(mm5, mm1); + packssdw_r2r(mm5, mm2); movq_r2r(mm3, mm6); - punpcklwd_r2r(mm1, mm3); - punpckhwd_r2r(mm1, mm6); + punpcklwd_r2r(mm2, mm3); + punpckhwd_r2r(mm2, mm6); movq_r2m(mm3, *(out + 0)); movq_r2m(mm6, *(out + 4)); } @@ -284,9 +505,47 @@ void ff_fdct_mmx(int16_t *block) table = tab_frw_01234567; out = block; for(i=8;i>0;i--) { - fdct_row(block1, out, table); + fdct_row_mmx(block1, out, table); + block1 += 8; + table += 32; + out += 8; + } +} + +void ff_fdct_mmx2(int16_t *block) +{ + int64_t align_tmp[16] ATTR_ALIGN(8); + int16_t * const block_tmp= (int16_t*)align_tmp; + int16_t *block1, *out; + const int16_t *table; + int i; + + block1 = block_tmp; + fdct_col(block, block1, 0); + fdct_col(block, block1, 4); + + block1 = block_tmp; + table = tab_frw_01234567; + out = block; + for(i=8;i>0;i--) { + fdct_row_mmx2(block1, out, table); block1 += 8; table += 32; out += 8; } } + +void ff_fdct_sse2(int16_t *block) +{ + int64_t align_tmp[16] ATTR_ALIGN(8); + int16_t * const block_tmp= (int16_t*)align_tmp; + int16_t *block1; + int i; + + block1 = block_tmp; + fdct_col(block, block1, 0); + fdct_col(block, block1, 4); + + fdct_row_sse2(block1, block); +} + diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx.c b/src/libffmpeg/libavcodec/i386/idct_mmx.c index 654792e5e..298c8a8b0 100644 --- a/src/libffmpeg/libavcodec/i386/idct_mmx.c +++ b/src/libffmpeg/libavcodec/i386/idct_mmx.c @@ -22,11 +22,10 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include "../dsputil.h" +#include "../common.h" #include "mmx.h" -#undef ATTR_ALIGN #define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align))) #define ROW_SHIFT 11 @@ -555,10 +554,6 @@ static int32_t rounder5[] ATTR_ALIGN(8) = #undef COL_SHIFT #undef ROW_SHIFT -/* the macro below will generate these */ -void ff_mmx_idct(DCTELEM *block); -void ff_mmxext_idct(DCTELEM *block); - #define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \ void idct (int16_t * block) \ { \ diff --git a/src/libffmpeg/libavcodec/i386/mmx.h b/src/libffmpeg/libavcodec/i386/mmx.h index 2ba28898d..7e94cfd9b 100644 --- a/src/libffmpeg/libavcodec/i386/mmx.h +++ b/src/libffmpeg/libavcodec/i386/mmx.h @@ -1 +1,243 @@ -#include "xineutils.h" +/* + * mmx.h + * Copyright (C) 1997-2001 H. Dietz and R. Fisher + */ +#ifndef AVCODEC_I386MMX_H +#define AVCODEC_I386MMX_H + +/* + * The type of an value that fits in an MMX register (note that long + * long constant values MUST be suffixed by LL and unsigned long long + * values by ULL, lest they be truncated by the compiler) + */ + +typedef union { + long long q; /* Quadword (64-bit) value */ + unsigned long long uq; /* Unsigned Quadword */ + int d[2]; /* 2 Doubleword (32-bit) values */ + unsigned int ud[2]; /* 2 Unsigned Doubleword */ + short w[4]; /* 4 Word (16-bit) values */ + unsigned short uw[4]; /* 4 Unsigned Word */ + char b[8]; /* 8 Byte (8-bit) values */ + unsigned char ub[8]; /* 8 Unsigned Byte */ + float s[2]; /* Single-precision (32-bit) value */ +} mmx_t; /* On an 8-byte (64-bit) boundary */ + + +#define mmx_i2r(op,imm,reg) \ + __asm__ __volatile__ (#op " %0, %%" #reg \ + : /* nothing */ \ + : "i" (imm) ) + +#define mmx_m2r(op,mem,reg) \ + __asm__ __volatile__ (#op " %0, %%" #reg \ + : /* nothing */ \ + : "m" (mem)) + +#define mmx_r2m(op,reg,mem) \ + __asm__ __volatile__ (#op " %%" #reg ", %0" \ + : "=m" (mem) \ + : /* nothing */ ) + +#define mmx_r2r(op,regs,regd) \ + __asm__ __volatile__ (#op " %" #regs ", %" #regd) + + +#define emms() __asm__ __volatile__ ("emms") + +#define movd_m2r(var,reg) mmx_m2r (movd, var, reg) +#define movd_r2m(reg,var) mmx_r2m (movd, reg, var) +#define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd) + +#define movq_m2r(var,reg) mmx_m2r (movq, var, reg) +#define movq_r2m(reg,var) mmx_r2m (movq, reg, var) +#define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd) + +#define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg) +#define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd) +#define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg) +#define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd) + +#define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg) +#define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd) + +#define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg) +#define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd) +#define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg) +#define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd) +#define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg) +#define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd) + +#define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg) +#define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd) +#define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg) +#define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd) + +#define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg) +#define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd) +#define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg) +#define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd) + +#define pand_m2r(var,reg) mmx_m2r (pand, var, reg) +#define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd) + +#define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg) +#define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd) + +#define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg) +#define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd) +#define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg) +#define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd) +#define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg) +#define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd) + +#define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg) +#define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd) +#define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg) +#define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd) +#define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg) +#define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd) + +#define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg) +#define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd) + +#define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg) +#define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd) + +#define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg) +#define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd) + +#define por_m2r(var,reg) mmx_m2r (por, var, reg) +#define por_r2r(regs,regd) mmx_r2r (por, regs, regd) + +#define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg) +#define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg) +#define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd) +#define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg) +#define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg) +#define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd) +#define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg) +#define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg) +#define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd) + +#define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg) +#define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg) +#define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd) +#define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg) +#define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg) +#define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd) + +#define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg) +#define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg) +#define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd) +#define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg) +#define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg) +#define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd) +#define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg) +#define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg) +#define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd) + +#define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg) +#define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd) +#define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg) +#define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd) +#define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg) +#define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd) + +#define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg) +#define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd) +#define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg) +#define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd) + +#define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg) +#define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd) +#define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg) +#define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd) + +#define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg) +#define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd) +#define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg) +#define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd) +#define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg) +#define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd) + +#define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg) +#define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd) +#define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg) +#define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd) +#define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg) +#define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd) + +#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) +#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) + + +/* 3DNOW extensions */ + +#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) +#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) + + +/* AMD MMX extensions - also available in intel SSE */ + + +#define mmx_m2ri(op,mem,reg,imm) \ + __asm__ __volatile__ (#op " %1, %0, %%" #reg \ + : /* nothing */ \ + : "X" (mem), "X" (imm)) +#define mmx_r2ri(op,regs,regd,imm) \ + __asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \ + : /* nothing */ \ + : "X" (imm) ) + +#define mmx_fetch(mem,hint) \ + __asm__ __volatile__ ("prefetch" #hint " %0" \ + : /* nothing */ \ + : "X" (mem)) + + +#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) + +#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) + +#define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg) +#define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd) +#define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg) +#define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd) + +#define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm) + +#define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm) + +#define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg) +#define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd) + +#define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg) +#define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd) + +#define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg) +#define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd) + +#define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg) +#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd) + +#define pmovmskb(mmreg,reg) \ + __asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg) + +#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg) +#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd) + +#define prefetcht0(mem) mmx_fetch (mem, t0) +#define prefetcht1(mem) mmx_fetch (mem, t1) +#define prefetcht2(mem) mmx_fetch (mem, t2) +#define prefetchnta(mem) mmx_fetch (mem, nta) + +#define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg) +#define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd) + +#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm) +#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm) + +#define sfence() __asm__ __volatile__ ("sfence\n\t") + +#endif /* AVCODEC_I386MMX_H */ diff --git a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c index 950100e63..f32afae0b 100644 --- a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c +++ b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c @@ -1,6 +1,7 @@ /* * MMX optimized motion estimation * Copyright (c) 2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -26,11 +27,11 @@ static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={ 0x0002000200020002ULL, }; -static const __attribute__ ((aligned(8), unused)) uint64_t bone= 0x0101010101010101LL; +static __attribute__ ((aligned(8), unused)) uint64_t bone= 0x0101010101010101LL; -static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) +static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - int len= -(stride<<h); + int len= -(stride*h); asm volatile( ".balign 16 \n\t" "1: \n\t" @@ -64,9 +65,9 @@ static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) ); } -static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) +static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - int len= -(stride<<h); + int len= -(stride*h); asm volatile( ".balign 16 \n\t" "1: \n\t" @@ -88,7 +89,7 @@ static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) { - int len= -(stride<<h); + int len= -(stride*h); asm volatile( ".balign 16 \n\t" "1: \n\t" @@ -114,7 +115,7 @@ static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, in static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) { //FIXME reuse src - int len= -(stride<<h); + int len= -(stride*h); asm volatile( ".balign 16 \n\t" "movq "MANGLE(bone)", %%mm5 \n\t" @@ -151,7 +152,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) { - int len= -(stride<<h); + int len= -(stride*h); asm volatile( ".balign 16 \n\t" "1: \n\t" @@ -189,7 +190,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) { - int len= -(stride<<h); + int len= -(stride*h); asm volatile( ".balign 16 \n\t" "1: \n\t" @@ -265,85 +266,69 @@ static inline int sum_mmx2(void) #define PIX_SAD(suf)\ -static int pix_abs8x8_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ + assert(h==8);\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t":);\ \ - sad8_ ## suf(blk1, blk2, stride, 3);\ + sad8_1_ ## suf(blk1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ -static int sad8x8_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\ -{\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t":);\ -\ - sad8_ ## suf(blk1, blk2, stride, 3);\ -\ - return sum_ ## suf();\ -}\ -\ -static int pix_abs8x8_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ + assert(h==8);\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ );\ \ - sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 3);\ + sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ \ -static int pix_abs8x8_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ + assert(h==8);\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[1]) \ );\ \ - sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\ + sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ \ -static int pix_abs8x8_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ + assert(h==8);\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "movq %0, %%mm5 \n\t"\ :: "m"(round_tab[2]) \ );\ \ - sad8_4_ ## suf(blk1, blk2, stride, 3);\ + sad8_4_ ## suf(blk1, blk2, stride, 8);\ \ return sum_ ## suf();\ }\ \ -static int pix_abs16x16_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ -{\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t":);\ -\ - sad8_ ## suf(blk1 , blk2 , stride, 4);\ - sad8_ ## suf(blk1+8, blk2+8, stride, 4);\ -\ - return sum_ ## suf();\ -}\ -static int sad16x16_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t":);\ \ - sad8_ ## suf(blk1 , blk2 , stride, 4);\ - sad8_ ## suf(blk1+8, blk2+8, stride, 4);\ + sad8_1_ ## suf(blk1 , blk2 , stride, h);\ + sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ -static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ @@ -351,12 +336,12 @@ static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ :: "m"(round_tab[1]) \ );\ \ - sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\ - sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\ + sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, h);\ + sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ -static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ @@ -364,12 +349,12 @@ static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ :: "m"(round_tab[1]) \ );\ \ - sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\ - sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\ + sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, h);\ + sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ -static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ +static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ {\ asm volatile("pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ @@ -377,8 +362,8 @@ static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\ :: "m"(round_tab[2]) \ );\ \ - sad8_4_ ## suf(blk1 , blk2 , stride, 4);\ - sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\ + sad8_4_ ## suf(blk1 , blk2 , stride, h);\ + sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\ \ return sum_ ## suf();\ }\ @@ -389,32 +374,32 @@ PIX_SAD(mmx2) void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) { if (mm_flags & MM_MMX) { - c->pix_abs16x16 = pix_abs16x16_mmx; - c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx; - c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx; - c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx; - c->pix_abs8x8 = pix_abs8x8_mmx; - c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx; - c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx; - c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx; + c->pix_abs[0][0] = sad16_mmx; + c->pix_abs[0][1] = sad16_x2_mmx; + c->pix_abs[0][2] = sad16_y2_mmx; + c->pix_abs[0][3] = sad16_xy2_mmx; + c->pix_abs[1][0] = sad8_mmx; + c->pix_abs[1][1] = sad8_x2_mmx; + c->pix_abs[1][2] = sad8_y2_mmx; + c->pix_abs[1][3] = sad8_xy2_mmx; - c->sad[0]= sad16x16_mmx; - c->sad[1]= sad8x8_mmx; + c->sad[0]= sad16_mmx; + c->sad[1]= sad8_mmx; } if (mm_flags & MM_MMXEXT) { - c->pix_abs16x16 = pix_abs16x16_mmx2; - c->pix_abs8x8 = pix_abs8x8_mmx2; + c->pix_abs[0][0] = sad16_mmx2; + c->pix_abs[1][0] = sad8_mmx2; - c->sad[0]= sad16x16_mmx2; - c->sad[1]= sad8x8_mmx2; + c->sad[0]= sad16_mmx2; + c->sad[1]= sad8_mmx2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx2; - c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx2; - c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx2; - c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx2; - c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx2; - c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx2; + c->pix_abs[0][1] = sad16_x2_mmx2; + c->pix_abs[0][2] = sad16_y2_mmx2; + c->pix_abs[0][3] = sad16_xy2_mmx2; + c->pix_abs[1][1] = sad8_x2_mmx2; + c->pix_abs[1][2] = sad8_y2_mmx2; + c->pix_abs[1][3] = sad8_xy2_mmx2; } } } diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c index d2f477b7b..1c0e9f5ae 100644 --- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c +++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c @@ -31,31 +31,92 @@ static const unsigned long long int mm_wabs __attribute__ ((aligned(8))) = 0xfff static const unsigned long long int mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001ULL; -static void dct_unquantize_h263_mmx(MpegEncContext *s, +static void dct_unquantize_h263_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int level, qmul, qadd, nCoeffs; qmul = qscale << 1; - qadd = (qscale - 1) | 1; - assert(s->block_last_index[n]>=0); + assert(s->block_last_index[n]>=0 || s->h263_aic); - if (s->mb_intra) { - if (!s->h263_aic) { - if (n < 4) - level = block[0] * s->y_dc_scale; - else - level = block[0] * s->c_dc_scale; - }else{ - qadd = 0; - level= block[0]; - } - nCoeffs=63; - } else { - nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - level = 0;/* keep gcc quiet */ + if (!s->h263_aic) { + if (n < 4) + level = block[0] * s->y_dc_scale; + else + level = block[0] * s->c_dc_scale; + qadd = (qscale - 1) | 1; + }else{ + qadd = 0; + level= block[0]; } + if(s->ac_pred) + nCoeffs=63; + else + nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; +//printf("%d %d ", qmul, qadd); +asm volatile( + "movd %1, %%mm6 \n\t" //qmul + "packssdw %%mm6, %%mm6 \n\t" + "packssdw %%mm6, %%mm6 \n\t" + "movd %2, %%mm5 \n\t" //qadd + "pxor %%mm7, %%mm7 \n\t" + "packssdw %%mm5, %%mm5 \n\t" + "packssdw %%mm5, %%mm5 \n\t" + "psubw %%mm5, %%mm7 \n\t" + "pxor %%mm4, %%mm4 \n\t" + ".balign 16\n\t" + "1: \n\t" + "movq (%0, %3), %%mm0 \n\t" + "movq 8(%0, %3), %%mm1 \n\t" + + "pmullw %%mm6, %%mm0 \n\t" + "pmullw %%mm6, %%mm1 \n\t" + + "movq (%0, %3), %%mm2 \n\t" + "movq 8(%0, %3), %%mm3 \n\t" + + "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 + "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 + + "pxor %%mm2, %%mm0 \n\t" + "pxor %%mm3, %%mm1 \n\t" + + "paddw %%mm7, %%mm0 \n\t" + "paddw %%mm7, %%mm1 \n\t" + + "pxor %%mm0, %%mm2 \n\t" + "pxor %%mm1, %%mm3 \n\t" + + "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 + + "pandn %%mm2, %%mm0 \n\t" + "pandn %%mm3, %%mm1 \n\t" + + "movq %%mm0, (%0, %3) \n\t" + "movq %%mm1, 8(%0, %3) \n\t" + + "addl $16, %3 \n\t" + "jng 1b \n\t" + ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(-nCoeffs)) + : "memory" + ); + block[0]= level; +} + + +static void dct_unquantize_h263_inter_mmx(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int level, qmul, qadd, nCoeffs; + + qmul = qscale << 1; + qadd = (qscale - 1) | 1; + + assert(s->block_last_index[n]>=0 || s->h263_aic); + + nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; //printf("%d %d ", qmul, qadd); asm volatile( "movd %1, %%mm6 \n\t" //qmul @@ -104,8 +165,6 @@ asm volatile( ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(-nCoeffs)) : "memory" ); - if(s->mb_intra) - block[0]= level; } @@ -138,24 +197,23 @@ asm volatile( high3:low3 = low1*low2 high3 += tlow1 */ -static void dct_unquantize_mpeg1_mmx(MpegEncContext *s, +static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int nCoeffs; const uint16_t *quant_matrix; + int block0; assert(s->block_last_index[n]>=0); nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; - if (s->mb_intra) { - int block0; - if (n < 4) - block0 = block[0] * s->y_dc_scale; - else - block0 = block[0] * s->c_dc_scale; - /* XXX: only mpeg1 */ - quant_matrix = s->intra_matrix; + if (n < 4) + block0 = block[0] * s->y_dc_scale; + else + block0 = block[0] * s->c_dc_scale; + /* XXX: only mpeg1 */ + quant_matrix = s->intra_matrix; asm volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" @@ -205,9 +263,19 @@ asm volatile( ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs) : "%eax", "memory" ); - block[0]= block0; + block[0]= block0; +} + +static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int nCoeffs; + const uint16_t *quant_matrix; + + assert(s->block_last_index[n]>=0); + + nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; - } else { quant_matrix = s->inter_matrix; asm volatile( "pcmpeqw %%mm7, %%mm7 \n\t" @@ -262,28 +330,25 @@ asm volatile( ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs) : "%eax", "memory" ); - } - } -static void dct_unquantize_mpeg2_mmx(MpegEncContext *s, +static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int nCoeffs; const uint16_t *quant_matrix; + int block0; assert(s->block_last_index[n]>=0); if(s->alternate_scan) nCoeffs= 63; //FIXME else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; - if (s->mb_intra) { - int block0; - if (n < 4) - block0 = block[0] * s->y_dc_scale; - else - block0 = block[0] * s->c_dc_scale; - quant_matrix = s->intra_matrix; + if (n < 4) + block0 = block[0] * s->y_dc_scale; + else + block0 = block[0] * s->c_dc_scale; + quant_matrix = s->intra_matrix; asm volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" @@ -329,10 +394,21 @@ asm volatile( ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs) : "%eax", "memory" ); - block[0]= block0; + block[0]= block0; //Note, we dont do mismatch control for intra as errors cannot accumulate +} + +static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int nCoeffs; + const uint16_t *quant_matrix; + + assert(s->block_last_index[n]>=0); + + if(s->alternate_scan) nCoeffs= 63; //FIXME + else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; - } else { quant_matrix = s->inter_matrix; asm volatile( "pcmpeqw %%mm7, %%mm7 \n\t" @@ -397,7 +473,6 @@ asm volatile( ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (-2*nCoeffs) : "%eax", "memory" ); - } } /* draw the edges of width 'w' of an image of size width, height @@ -488,13 +563,130 @@ static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) } } +static void denoise_dct_mmx(MpegEncContext *s, DCTELEM *block){ + const int intra= s->mb_intra; + int *sum= s->dct_error_sum[intra]; + uint16_t *offset= s->dct_offset[intra]; + + s->dct_count[intra]++; + + asm volatile( + "pxor %%mm7, %%mm7 \n\t" + "1: \n\t" + "pxor %%mm0, %%mm0 \n\t" + "pxor %%mm1, %%mm1 \n\t" + "movq (%0), %%mm2 \n\t" + "movq 8(%0), %%mm3 \n\t" + "pcmpgtw %%mm2, %%mm0 \n\t" + "pcmpgtw %%mm3, %%mm1 \n\t" + "pxor %%mm0, %%mm2 \n\t" + "pxor %%mm1, %%mm3 \n\t" + "psubw %%mm0, %%mm2 \n\t" + "psubw %%mm1, %%mm3 \n\t" + "movq %%mm2, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psubusw (%2), %%mm2 \n\t" + "psubusw 8(%2), %%mm3 \n\t" + "pxor %%mm0, %%mm2 \n\t" + "pxor %%mm1, %%mm3 \n\t" + "psubw %%mm0, %%mm2 \n\t" + "psubw %%mm1, %%mm3 \n\t" + "movq %%mm2, (%0) \n\t" + "movq %%mm3, 8(%0) \n\t" + "movq %%mm4, %%mm2 \n\t" + "movq %%mm5, %%mm3 \n\t" + "punpcklwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm2 \n\t" + "punpcklwd %%mm7, %%mm5 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "paddd (%1), %%mm4 \n\t" + "paddd 8(%1), %%mm2 \n\t" + "paddd 16(%1), %%mm5 \n\t" + "paddd 24(%1), %%mm3 \n\t" + "movq %%mm4, (%1) \n\t" + "movq %%mm2, 8(%1) \n\t" + "movq %%mm5, 16(%1) \n\t" + "movq %%mm3, 24(%1) \n\t" + "addl $16, %0 \n\t" + "addl $32, %1 \n\t" + "addl $16, %2 \n\t" + "cmpl %3, %0 \n\t" + " jb 1b \n\t" + : "+r" (block), "+r" (sum), "+r" (offset) + : "r"(block+64) + ); +} + +static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){ + const int intra= s->mb_intra; + int *sum= s->dct_error_sum[intra]; + uint16_t *offset= s->dct_offset[intra]; + + s->dct_count[intra]++; + + asm volatile( + "pxor %%xmm7, %%xmm7 \n\t" + "1: \n\t" + "pxor %%xmm0, %%xmm0 \n\t" + "pxor %%xmm1, %%xmm1 \n\t" + "movdqa (%0), %%xmm2 \n\t" + "movdqa 16(%0), %%xmm3 \n\t" + "pcmpgtw %%xmm2, %%xmm0 \n\t" + "pcmpgtw %%xmm3, %%xmm1 \n\t" + "pxor %%xmm0, %%xmm2 \n\t" + "pxor %%xmm1, %%xmm3 \n\t" + "psubw %%xmm0, %%xmm2 \n\t" + "psubw %%xmm1, %%xmm3 \n\t" + "movdqa %%xmm2, %%xmm4 \n\t" + "movdqa %%xmm3, %%xmm5 \n\t" + "psubusw (%2), %%xmm2 \n\t" + "psubusw 16(%2), %%xmm3 \n\t" + "pxor %%xmm0, %%xmm2 \n\t" + "pxor %%xmm1, %%xmm3 \n\t" + "psubw %%xmm0, %%xmm2 \n\t" + "psubw %%xmm1, %%xmm3 \n\t" + "movdqa %%xmm2, (%0) \n\t" + "movdqa %%xmm3, 16(%0) \n\t" + "movdqa %%xmm4, %%xmm6 \n\t" + "movdqa %%xmm5, %%xmm0 \n\t" + "punpcklwd %%xmm7, %%xmm4 \n\t" + "punpckhwd %%xmm7, %%xmm6 \n\t" + "punpcklwd %%xmm7, %%xmm5 \n\t" + "punpckhwd %%xmm7, %%xmm0 \n\t" + "paddd (%1), %%xmm4 \n\t" + "paddd 16(%1), %%xmm6 \n\t" + "paddd 32(%1), %%xmm5 \n\t" + "paddd 48(%1), %%xmm0 \n\t" + "movdqa %%xmm4, (%1) \n\t" + "movdqa %%xmm6, 16(%1) \n\t" + "movdqa %%xmm5, 32(%1) \n\t" + "movdqa %%xmm0, 48(%1) \n\t" + "addl $32, %0 \n\t" + "addl $64, %1 \n\t" + "addl $32, %2 \n\t" + "cmpl %3, %0 \n\t" + " jb 1b \n\t" + : "+r" (block), "+r" (sum), "+r" (offset) + : "r"(block+64) + ); +} + #undef HAVE_MMX2 #define RENAME(a) a ## _MMX +#define RENAMEl(a) a ## _mmx #include "mpegvideo_mmx_template.c" #define HAVE_MMX2 #undef RENAME +#undef RENAMEl #define RENAME(a) a ## _MMX2 +#define RENAMEl(a) a ## _mmx2 +#include "mpegvideo_mmx_template.c" + +#undef RENAME +#undef RENAMEl +#define RENAME(a) a ## _SSE2 +#define RENAMEl(a) a ## _sse2 #include "mpegvideo_mmx_template.c" void MPV_common_init_mmx(MpegEncContext *s) @@ -502,14 +694,25 @@ void MPV_common_init_mmx(MpegEncContext *s) if (mm_flags & MM_MMX) { const int dct_algo = s->avctx->dct_algo; - s->dct_unquantize_h263 = dct_unquantize_h263_mmx; - s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_mmx; - s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_mmx; + s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx; + s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx; + s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx; + s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx; + s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx; + s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx; draw_edges = draw_edges_mmx; + + if (mm_flags & MM_SSE2) { + s->denoise_dct= denoise_dct_sse2; + } else { + s->denoise_dct= denoise_dct_mmx; + } if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ - if(mm_flags & MM_MMXEXT){ + if(mm_flags & MM_SSE2){ + s->dct_quantize= dct_quantize_SSE2; + } else if(mm_flags & MM_MMXEXT){ s->dct_quantize= dct_quantize_MMX2; } else { s->dct_quantize= dct_quantize_MMX; diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c index 706211eec..d4ed61ecb 100644 --- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c +++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c @@ -43,7 +43,10 @@ static int RENAME(dct_quantize)(MpegEncContext *s, assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly? //s->fdct (block); - ff_fdct_mmx (block); //cant be anything else ... + RENAMEl(ff_fdct) (block); //cant be anything else ... + + if(s->dct_error_sum) + s->denoise_dct(s, block); if (s->mb_intra) { int dummy; @@ -76,12 +79,12 @@ static int RENAME(dct_quantize)(MpegEncContext *s, block[0]=0; //avoid fake overflow // temp_block[0] = (block[0] + (q >> 1)) / q; last_non_zero_p1 = 1; - bias = s->q_intra_matrix16_bias[qscale]; - qmat = s->q_intra_matrix16[qscale]; + bias = s->q_intra_matrix16[qscale][1]; + qmat = s->q_intra_matrix16[qscale][0]; } else { last_non_zero_p1 = 0; - bias = s->q_inter_matrix16_bias[qscale]; - qmat = s->q_inter_matrix16[qscale]; + bias = s->q_inter_matrix16[qscale][1]; + qmat = s->q_inter_matrix16[qscale][0]; } if(s->out_format == FMT_H263 && s->mpeg_quant==0){ diff --git a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c b/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c index 1ee88b634..626c1f565 100644 --- a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c +++ b/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c @@ -18,7 +18,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "../dsputil.h" -#include "../simple_idct.h" /* 23170.475006 diff --git a/src/libffmpeg/libavcodec/idcinvideo.c b/src/libffmpeg/libavcodec/idcinvideo.c index b3a658296..e53246bd1 100644 --- a/src/libffmpeg/libavcodec/idcinvideo.c +++ b/src/libffmpeg/libavcodec/idcinvideo.c @@ -158,7 +158,7 @@ static int idcin_decode_init(AVCodecContext *avctx) /* make sure the Huffman tables make it */ if (s->avctx->extradata_size != HUFFMAN_TABLE_SIZE) { - printf(" Id CIN video: expected extradata size of %d\n", HUFFMAN_TABLE_SIZE); + av_log(s->avctx, AV_LOG_ERROR, " Id CIN video: expected extradata size of %d\n", HUFFMAN_TABLE_SIZE); return -1; } @@ -193,7 +193,7 @@ static void idcin_decode_vlcs(IdcinContext *s) while(node_num >= HUF_TOKENS) { if(!bit_pos) { if(dat_pos > s->size) { - printf("Huffman decode error.\n"); + av_log(s->avctx, AV_LOG_ERROR, "Huffman decode error.\n"); return; } bit_pos = 8; @@ -225,7 +225,7 @@ static int idcin_decode_frame(AVCodecContext *avctx, avctx->release_buffer(avctx, &s->frame); if (avctx->get_buffer(avctx, &s->frame)) { - printf (" Id CIN Video: get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, " Id CIN Video: get_buffer() failed\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/imgconvert.c b/src/libffmpeg/libavcodec/imgconvert.c index 93e19ff59..b351d2219 100644 --- a/src/libffmpeg/libavcodec/imgconvert.c +++ b/src/libffmpeg/libavcodec/imgconvert.c @@ -147,6 +147,7 @@ static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, + .x_chroma_shift = 0, .y_chroma_shift = 0, }, [PIX_FMT_BGR24] = { .name = "bgr24", @@ -154,6 +155,7 @@ static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, + .x_chroma_shift = 0, .y_chroma_shift = 0, }, [PIX_FMT_RGBA32] = { .name = "rgba32", @@ -161,6 +163,7 @@ static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 8, + .x_chroma_shift = 0, .y_chroma_shift = 0, }, [PIX_FMT_RGB565] = { .name = "rgb565", @@ -168,6 +171,7 @@ static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, + .x_chroma_shift = 0, .y_chroma_shift = 0, }, [PIX_FMT_RGB555] = { .name = "rgb555", @@ -175,6 +179,7 @@ static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { .color_type = FF_COLOR_RGB, .pixel_type = FF_PIXEL_PACKED, .depth = 5, + .x_chroma_shift = 0, .y_chroma_shift = 0, }, /* gray / mono formats */ @@ -535,7 +540,7 @@ static void img_copy_plane(uint8_t *dst, int dst_wrap, /** * Copy image 'src' to 'dst'. */ -static void img_copy(AVPicture *dst, const AVPicture *src, +void img_copy(AVPicture *dst, const AVPicture *src, int pix_fmt, int width, int height) { int bwidth, bits, i; diff --git a/src/libffmpeg/libavcodec/indeo3.c b/src/libffmpeg/libavcodec/indeo3.c index d377eb993..cad8e982b 100644 --- a/src/libffmpeg/libavcodec/indeo3.c +++ b/src/libffmpeg/libavcodec/indeo3.c @@ -622,7 +622,7 @@ static void iv_Decode_Chunk(Indeo3DecodeContext *s, break; case 9: - fprintf(stderr, "UNTESTED.\n"); + av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n"); lv1 = *buf1++; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); @@ -791,7 +791,7 @@ static void iv_Decode_Chunk(Indeo3DecodeContext *s, break; case 9: - fprintf(stderr, "UNTESTED.\n"); + av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n"); lv1 = *buf1; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); @@ -887,7 +887,7 @@ static void iv_Decode_Chunk(Indeo3DecodeContext *s, break; case 9: - fprintf(stderr, "UNTESTED.\n"); + av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n"); lv1 = *buf1; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); @@ -980,7 +980,7 @@ static void iv_Decode_Chunk(Indeo3DecodeContext *s, break; case 9: - fprintf(stderr, "UNTESTED.\n"); + av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n"); lv1 = *buf1++; lv = (lv1 & 0x7F) << 1; lv += (lv << 8); @@ -1063,7 +1063,7 @@ static int indeo3_decode_frame(AVCodecContext *avctx, s->frame.reference = 0; if(avctx->get_buffer(avctx, &s->frame) < 0) { - fprintf(stderr, "get_buffer() failed\n"); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/interplayvideo.c b/src/libffmpeg/libavcodec/interplayvideo.c index 72e037747..06816ba3e 100644 --- a/src/libffmpeg/libavcodec/interplayvideo.c +++ b/src/libffmpeg/libavcodec/interplayvideo.c @@ -76,7 +76,7 @@ typedef struct IpvideoContext { #define CHECK_STREAM_PTR(n) \ if ((s->stream_ptr + n) > s->stream_end) { \ - printf ("Interplay video warning: stream_ptr out of bounds (%p >= %p)\n", \ + av_log(s->avctx, AV_LOG_ERROR, "Interplay video warning: stream_ptr out of bounds (%p >= %p)\n", \ s->stream_ptr + n, s->stream_end); \ return -1; \ } @@ -86,10 +86,10 @@ typedef struct IpvideoContext { motion_offset += y * s->stride; \ motion_offset += x; \ if (motion_offset < 0) { \ - printf (" Interplay video: motion offset < 0 (%d)\n", motion_offset); \ + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset < 0 (%d)\n", motion_offset); \ return -1; \ } else if (motion_offset > s->upper_motion_limit_offset) { \ - printf (" Interplay video: motion offset above limit (%d >= %d)\n", \ + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset above limit (%d >= %d)\n", \ motion_offset, s->upper_motion_limit_offset); \ return -1; \ } \ @@ -101,10 +101,10 @@ typedef struct IpvideoContext { motion_offset += y * s->stride; \ motion_offset += x; \ if (motion_offset < 0) { \ - printf (" Interplay video: motion offset < 0 (%d)\n", motion_offset); \ + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset < 0 (%d)\n", motion_offset); \ return -1; \ } else if (motion_offset > s->upper_motion_limit_offset) { \ - printf (" Interplay video: motion offset above limit (%d >= %d)\n", \ + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset above limit (%d >= %d)\n", \ motion_offset, s->upper_motion_limit_offset); \ return -1; \ } \ @@ -116,10 +116,10 @@ typedef struct IpvideoContext { motion_offset += y * s->stride; \ motion_offset += x; \ if (motion_offset < 0) { \ - printf (" Interplay video: motion offset < 0 (%d)\n", motion_offset); \ + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset < 0 (%d)\n", motion_offset); \ return -1; \ } else if (motion_offset > s->upper_motion_limit_offset) { \ - printf (" Interplay video: motion offset above limit (%d >= %d)\n", \ + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: motion offset above limit (%d >= %d)\n", \ motion_offset, s->upper_motion_limit_offset); \ return -1; \ } \ @@ -254,7 +254,7 @@ static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s) static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s) { /* mystery opcode? skip multiple blocks? */ - printf (" Interplay video: Help! Mystery opcode 0x6 seen\n"); + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: Help! Mystery opcode 0x6 seen\n"); /* report success */ return 0; @@ -838,7 +838,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s) s->pixel_ptr = s->current_frame.data[0] + x; ret = ipvideo_decode_block[opcode](s); if (ret != 0) { - printf(" Interplay video: decode problem on frame %d, @ block (%d, %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: decode problem on frame %d, @ block (%d, %d)\n", frame, x - y, y / s->stride); return; } @@ -846,7 +846,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s) } if ((s->stream_ptr != s->stream_end) && (s->stream_ptr + 1 != s->stream_end)) { - printf (" Interplay video: decode finished with %d bytes left over\n", + av_log(s->avctx, AV_LOG_ERROR, " Interplay video: decode finished with %d bytes left over\n", s->stream_end - s->stream_ptr); } } @@ -858,7 +858,7 @@ static int ipvideo_decode_init(AVCodecContext *avctx) s->avctx = avctx; if (s->avctx->palctrl == NULL) { - printf (" Interplay video: palette expected.\n"); + av_log(avctx, AV_LOG_ERROR, " Interplay video: palette expected.\n"); return -1; } @@ -900,13 +900,18 @@ static int ipvideo_decode_frame(AVCodecContext *avctx, IpvideoContext *s = avctx->priv_data; AVPaletteControl *palette_control = avctx->palctrl; + /* compressed buffer needs to be large enough to at least hold an entire + * decoding map */ + if (buf_size < s->decoding_map_size) + return buf_size; + s->decoding_map = buf; s->buf = buf + s->decoding_map_size; s->size = buf_size - s->decoding_map_size; s->current_frame.reference = 3; if (avctx->get_buffer(avctx, &s->current_frame)) { - printf (" Interplay Video: get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, " Interplay Video: get_buffer() failed\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/jfdctfst.c b/src/libffmpeg/libavcodec/jfdctfst.c index 899768d36..a393c5ca8 100644 --- a/src/libffmpeg/libavcodec/jfdctfst.c +++ b/src/libffmpeg/libavcodec/jfdctfst.c @@ -112,17 +112,10 @@ #define MULTIPLY(var,const) ((DCTELEM) DESCALE((var) * (const), CONST_BITS)) - -/* - * Perform the forward DCT on one block of samples. - */ - -GLOBAL(void) -fdct_ifast (DCTELEM * data) -{ - DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - DCTELEM tmp10, tmp11, tmp12, tmp13; - DCTELEM z1, z2, z3, z4, z5, z11, z13; +static always_inline void row_fdct(DCTELEM * data){ + int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_fast16_t tmp10, tmp11, tmp12, tmp13; + int_fast16_t z1, z2, z3, z4, z5, z11, z13; DCTELEM *dataptr; int ctr; SHIFT_TEMPS @@ -176,7 +169,24 @@ fdct_ifast (DCTELEM * data) dataptr += DCTSIZE; /* advance pointer to next row */ } +} + +/* + * Perform the forward DCT on one block of samples. + */ +GLOBAL(void) +fdct_ifast (DCTELEM * data) +{ + int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_fast16_t tmp10, tmp11, tmp12, tmp13; + int_fast16_t z1, z2, z3, z4, z5, z11, z13; + DCTELEM *dataptr; + int ctr; + SHIFT_TEMPS + + row_fdct(data); + /* Pass 2: process columns. */ dataptr = data; @@ -228,6 +238,65 @@ fdct_ifast (DCTELEM * data) } } +/* + * Perform the forward 2-4-8 DCT on one block of samples. + */ + +GLOBAL(void) +fdct_ifast248 (DCTELEM * data) +{ + int_fast16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_fast16_t tmp10, tmp11, tmp12, tmp13; + int_fast16_t z1; + DCTELEM *dataptr; + int ctr; + SHIFT_TEMPS + + row_fdct(data); + + /* Pass 2: process columns. */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; + tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; + tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; + tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; + tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; + + /* Even part */ + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + tmp13 = tmp0 - tmp3; + + dataptr[DCTSIZE*0] = tmp10 + tmp11; + dataptr[DCTSIZE*4] = tmp10 - tmp11; + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); + dataptr[DCTSIZE*2] = tmp13 + z1; + dataptr[DCTSIZE*6] = tmp13 - z1; + + tmp10 = tmp4 + tmp7; + tmp11 = tmp5 + tmp6; + tmp12 = tmp5 - tmp6; + tmp13 = tmp4 - tmp7; + + dataptr[DCTSIZE*1] = tmp10 + tmp11; + dataptr[DCTSIZE*5] = tmp10 - tmp11; + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_707106781); + dataptr[DCTSIZE*3] = tmp13 + z1; + dataptr[DCTSIZE*7] = tmp13 - z1; + + dataptr++; /* advance pointer to next column */ + } +} + #undef GLOBAL #undef CONST_BITS diff --git a/src/libffmpeg/libavcodec/jfdctint.c b/src/libffmpeg/libavcodec/jfdctint.c index 702d0a947..1fbd85b28 100644 --- a/src/libffmpeg/libavcodec/jfdctint.c +++ b/src/libffmpeg/libavcodec/jfdctint.c @@ -148,16 +148,10 @@ #endif -/* - * Perform the forward DCT on one block of samples. - */ - -GLOBAL(void) -ff_jpeg_fdct_islow (DCTELEM * data) -{ - int32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int32_t tmp10, tmp11, tmp12, tmp13; - int32_t z1, z2, z3, z4, z5; +static always_inline void row_fdct(DCTELEM * data){ + int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_fast32_t tmp10, tmp11, tmp12, tmp13; + int_fast32_t z1, z2, z3, z4, z5; DCTELEM *dataptr; int ctr; SHIFT_TEMPS @@ -225,6 +219,23 @@ ff_jpeg_fdct_islow (DCTELEM * data) dataptr += DCTSIZE; /* advance pointer to next row */ } +} + +/* + * Perform the forward DCT on one block of samples. + */ + +GLOBAL(void) +ff_jpeg_fdct_islow (DCTELEM * data) +{ + int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_fast32_t tmp10, tmp11, tmp12, tmp13; + int_fast32_t z1, z2, z3, z4, z5; + DCTELEM *dataptr; + int ctr; + SHIFT_TEMPS + + row_fdct(data); /* Pass 2: process columns. * We remove the PASS1_BITS scaling, but leave the results scaled up @@ -295,3 +306,68 @@ ff_jpeg_fdct_islow (DCTELEM * data) dataptr++; /* advance pointer to next column */ } } + +/* + * The secret of DCT2-4-8 is really simple -- you do the usual 1-DCT + * on the rows and then, instead of doing even and odd, part on the colums + * you do even part two times. + */ +GLOBAL(void) +ff_fdct248_islow (DCTELEM * data) +{ + int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int_fast32_t tmp10, tmp11, tmp12, tmp13; + int_fast32_t z1; + DCTELEM *dataptr; + int ctr; + SHIFT_TEMPS + + row_fdct(data); + + /* Pass 2: process columns. + * We remove the PASS1_BITS scaling, but leave the results scaled up + * by an overall factor of 8. + */ + + dataptr = data; + for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { + tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*1]; + tmp1 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*3]; + tmp2 = dataptr[DCTSIZE*4] + dataptr[DCTSIZE*5]; + tmp3 = dataptr[DCTSIZE*6] + dataptr[DCTSIZE*7]; + tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; + tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*3]; + tmp6 = dataptr[DCTSIZE*4] - dataptr[DCTSIZE*5]; + tmp7 = dataptr[DCTSIZE*6] - dataptr[DCTSIZE*7]; + + tmp10 = tmp0 + tmp3; + tmp11 = tmp1 + tmp2; + tmp12 = tmp1 - tmp2; + tmp13 = tmp0 - tmp3; + + dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); + dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS+PASS1_BITS); + + tmp10 = tmp4 + tmp7; + tmp11 = tmp5 + tmp6; + tmp12 = tmp5 - tmp6; + tmp13 = tmp4 - tmp7; + + dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); + dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); + + z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); + dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), + CONST_BITS+PASS1_BITS); + dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), + CONST_BITS+PASS1_BITS); + + dataptr++; /* advance pointer to next column */ + } +} diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess.c b/src/libffmpeg/libavcodec/libpostproc/postprocess.c index 11267b666..093d94aea 100644 --- a/src/libffmpeg/libavcodec/libpostproc/postprocess.c +++ b/src/libffmpeg/libavcodec/libpostproc/postprocess.c @@ -70,9 +70,8 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks #include "config.h" #include <inttypes.h> #include <stdio.h> - -#include "xineutils.h" - +#include <stdlib.h> +#include <string.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif @@ -81,9 +80,9 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks //#undef HAVE_MMX //#undef ARCH_X86 //#define DEBUG_BRIGHTNESS - -#define memcpy(a,b,c) xine_fast_memcpy(a,b,c) - +#ifdef USE_FASTMEMCPY +#include "../fastmemcpy.h" +#endif #include "postprocess.h" #include "postprocess_internal.h" @@ -105,13 +104,13 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks //#define NUM_BLOCKS_AT_ONCE 16 //not used yet #ifdef ARCH_X86 -static const uint64_t __attribute__((aligned(8))) w05= 0x0005000500050005ULL; -static const uint64_t __attribute__((aligned(8))) w20= 0x0020002000200020ULL; -static const uint64_t __attribute__((aligned(8))) b00= 0x0000000000000000ULL; -static const uint64_t __attribute__((aligned(8))) b01= 0x0101010101010101ULL; -static const uint64_t __attribute__((aligned(8))) b02= 0x0202020202020202ULL; -static const uint64_t __attribute__((aligned(8))) b08= 0x0808080808080808ULL; -static const uint64_t __attribute__((aligned(8))) b80= 0x8080808080808080ULL; +static uint64_t __attribute__((aligned(8))) w05= 0x0005000500050005LL; +static uint64_t __attribute__((aligned(8))) w20= 0x0020002000200020LL; +static uint64_t __attribute__((aligned(8))) b00= 0x0000000000000000LL; +static uint64_t __attribute__((aligned(8))) b01= 0x0101010101010101LL; +static uint64_t __attribute__((aligned(8))) b02= 0x0202020202020202LL; +static uint64_t __attribute__((aligned(8))) b08= 0x0808080808080808LL; +static uint64_t __attribute__((aligned(8))) b80= 0x8080808080808080LL; #endif @@ -153,6 +152,44 @@ static char *replaceTable[]= NULL //End Marker }; +#ifdef ARCH_X86 +static inline void unusedVariableWarningFixer() +{ + if(w05 + w20 + b00 + b01 + b02 + b08 + b80 == 0) b00=0; +} +#endif + + +#ifdef ARCH_X86 +static inline void prefetchnta(void *p) +{ + asm volatile( "prefetchnta (%0)\n\t" + : : "r" (p) + ); +} + +static inline void prefetcht0(void *p) +{ + asm volatile( "prefetcht0 (%0)\n\t" + : : "r" (p) + ); +} + +static inline void prefetcht1(void *p) +{ + asm volatile( "prefetcht1 (%0)\n\t" + : : "r" (p) + ); +} + +static inline void prefetcht2(void *p) +{ + asm volatile( "prefetcht2 (%0)\n\t" + : : "r" (p) + ); +} +#endif + // The horizontal Functions exist only in C cuz the MMX code is faster with vertical filters and transposing /** @@ -906,10 +943,10 @@ for(y=0; y<mbHeight; y++){ int i; const int count= mbHeight * QPStride; for(i=0; i<(count>>2); i++){ - ((uint32_t*)c->nonBQPTable)[i] = ((uint32_t*)QP_store)[i] & 0x1F1F1F1F; + ((uint32_t*)c->nonBQPTable)[i] = ((uint32_t*)QP_store)[i] & 0x3F3F3F3F; } for(i<<=2; i<count; i++){ - c->nonBQPTable[i] = QP_store[i] & 0x1F; + c->nonBQPTable[i] = QP_store[i] & 0x3F; } } diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h b/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h index 13b3e3831..db50fa3b5 100644 --- a/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h +++ b/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h @@ -134,8 +134,8 @@ typedef struct PPContext{ uint64_t __attribute__((aligned(8))) pQPb; uint64_t __attribute__((aligned(8))) pQPb2; - uint64_t __attribute__((aligned(8))) mmxDcOffset[32]; - uint64_t __attribute__((aligned(8))) mmxDcThreshold[32]; + uint64_t __attribute__((aligned(8))) mmxDcOffset[64]; + uint64_t __attribute__((aligned(8))) mmxDcThreshold[64]; QP_STORE_T *stdQPTable; ///< used to fix MPEG2 style qscale QP_STORE_T *nonBQPTable; diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c b/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c index 636212959..7ebc08bd4 100644 --- a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c +++ b/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c @@ -1805,10 +1805,9 @@ DEINT_L5(%%mm1, %%mm0, (%%edx, %1, 2), (%0, %1, 8) , (%%edx, %1, 4)) * will be called for every 8x8 block and can read & write from line 4-15 * lines 0-3 have been passed through the deblock / dering filters allready, but can be read too * lines 4-12 will be read into the deblocking filter and should be deinterlaced - * will shift the image up by 1 line (FIXME if this is a problem) * this filter will read lines 4-13 and write 4-11 */ -static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride) +static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride, uint8_t *tmp) { #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) src+= 4*stride; @@ -1818,43 +1817,43 @@ static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride) // 0 1 2 3 4 5 6 7 8 9 // %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 - "movq (%0), %%mm0 \n\t" // L0 - "movq (%%eax, %1), %%mm1 \n\t" // L2 + "movq (%2), %%mm0 \n\t" // L0 + "movq (%%eax), %%mm1 \n\t" // L2 PAVGB(%%mm1, %%mm0) // L0+L2 - "movq (%%eax), %%mm2 \n\t" // L1 + "movq (%0), %%mm2 \n\t" // L1 PAVGB(%%mm2, %%mm0) "movq %%mm0, (%0) \n\t" - "movq (%%eax, %1, 2), %%mm0 \n\t" // L3 + "movq (%%eax, %1), %%mm0 \n\t" // L3 PAVGB(%%mm0, %%mm2) // L1+L3 PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3 "movq %%mm2, (%%eax) \n\t" - "movq (%0, %1, 4), %%mm2 \n\t" // L4 + "movq (%%eax, %1, 2), %%mm2 \n\t" // L4 PAVGB(%%mm2, %%mm1) // L2+L4 PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4 "movq %%mm1, (%%eax, %1) \n\t" - "movq (%%edx), %%mm1 \n\t" // L5 + "movq (%0, %1, 4), %%mm1 \n\t" // L5 PAVGB(%%mm1, %%mm0) // L3+L5 PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5 "movq %%mm0, (%%eax, %1, 2) \n\t" - "movq (%%edx, %1), %%mm0 \n\t" // L6 + "movq (%%edx), %%mm0 \n\t" // L6 PAVGB(%%mm0, %%mm2) // L4+L6 PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6 "movq %%mm2, (%0, %1, 4) \n\t" - "movq (%%edx, %1, 2), %%mm2 \n\t" // L7 + "movq (%%edx, %1), %%mm2 \n\t" // L7 PAVGB(%%mm2, %%mm1) // L5+L7 PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7 "movq %%mm1, (%%edx) \n\t" - "movq (%0, %1, 8), %%mm1 \n\t" // L8 + "movq (%%edx, %1, 2), %%mm1 \n\t" // L8 PAVGB(%%mm1, %%mm0) // L6+L8 PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8 "movq %%mm0, (%%edx, %1) \n\t" - "movq (%%edx, %1, 4), %%mm0 \n\t" // L9 + "movq (%0, %1, 8), %%mm0 \n\t" // L9 PAVGB(%%mm0, %%mm2) // L7+L9 PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9 "movq %%mm2, (%%edx, %1, 2) \n\t" + "movq %%mm1, (%2) \n\t" - - : : "r" (src), "r" (stride) + : : "r" (src), "r" (stride), "r" (tmp) : "%eax", "%edx" ); #else @@ -1862,41 +1861,43 @@ static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride) src+= 4*stride; for(x=0; x<2; x++){ - a= *(uint32_t*)&src[stride*0]; - b= *(uint32_t*)&src[stride*1]; - c= *(uint32_t*)&src[stride*2]; + a= *(uint32_t*)&tmp[stride*0]; + b= *(uint32_t*)&src[stride*0]; + c= *(uint32_t*)&src[stride*1]; a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*0]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1); - a= *(uint32_t*)&src[stride*3]; + a= *(uint32_t*)&src[stride*2]; b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*1]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1); - b= *(uint32_t*)&src[stride*4]; + b= *(uint32_t*)&src[stride*3]; c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*2]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1); - c= *(uint32_t*)&src[stride*5]; + c= *(uint32_t*)&src[stride*4]; a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*3]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1); - a= *(uint32_t*)&src[stride*6]; + a= *(uint32_t*)&src[stride*5]; b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*4]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1); - b= *(uint32_t*)&src[stride*7]; + b= *(uint32_t*)&src[stride*6]; c= (b&c) + (((b^c)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*5]= (c|a) - (((c^a)&0xFEFEFEFEUL)>>1); - c= *(uint32_t*)&src[stride*8]; + c= *(uint32_t*)&src[stride*7]; a= (a&c) + (((a^c)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*6]= (a|b) - (((a^b)&0xFEFEFEFEUL)>>1); - a= *(uint32_t*)&src[stride*9]; + a= *(uint32_t*)&src[stride*8]; b= (a&b) + (((a^b)&0xFEFEFEFEUL)>>1); *(uint32_t*)&src[stride*7]= (c|b) - (((c^b)&0xFEFEFEFEUL)>>1); + *(uint32_t*)&tmp[stride*0]= c; src += 4; + tmp += 4; } #endif } @@ -2788,9 +2789,10 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int uint64_t * const yHistogram= c.yHistogram; uint8_t * const tempSrc= c.tempSrc; uint8_t * const tempDst= c.tempDst; + const int mbWidth= isColor ? (width+7)>>3 : (width+15)>>4; #ifdef HAVE_MMX - for(i=0; i<32; i++){ + for(i=0; i<57; i++){ int offset= ((i*c.ppMode.baseDcDiff)>>8) + 1; int threshold= offset*2 + 1; c.mmxDcOffset[i]= 0x7F - offset; @@ -2932,7 +2934,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int if(mode & LINEAR_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride); else if(mode & LINEAR_BLEND_DEINT_FILTER) - RENAME(deInterlaceBlendLinear)(dstBlock, dstStride); + RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x); else if(mode & MEDIAN_DEINT_FILTER) RENAME(deInterlaceMedian)(dstBlock, dstStride); else if(mode & CUBIC_IPOL_DEINT_FILTER) @@ -3076,7 +3078,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int if(mode & LINEAR_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride); else if(mode & LINEAR_BLEND_DEINT_FILTER) - RENAME(deInterlaceBlendLinear)(dstBlock, dstStride); + RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x); else if(mode & MEDIAN_DEINT_FILTER) RENAME(deInterlaceMedian)(dstBlock, dstStride); else if(mode & CUBIC_IPOL_DEINT_FILTER) diff --git a/src/libffmpeg/libavcodec/mdec.c b/src/libffmpeg/libavcodec/mdec.c index e093ee356..faf3cef31 100644 --- a/src/libffmpeg/libavcodec/mdec.c +++ b/src/libffmpeg/libavcodec/mdec.c @@ -110,7 +110,7 @@ static inline int mdec_decode_block_intra(MDECContext *a, DCTELEM *block, int n) } } if (i > 63){ - fprintf(stderr, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y); + av_log(a->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", a->mb_x, a->mb_y); return -1; } @@ -175,7 +175,7 @@ static int decode_frame(AVCodecContext *avctx, p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type= I_TYPE; diff --git a/src/libffmpeg/libavcodec/mjpeg.c b/src/libffmpeg/libavcodec/mjpeg.c index 5502083ab..1a948aa56 100644 --- a/src/libffmpeg/libavcodec/mjpeg.c +++ b/src/libffmpeg/libavcodec/mjpeg.c @@ -793,7 +793,7 @@ typedef struct MJpegDecodeContext { VLC vlcs[2][4]; int qscale[4]; ///< quantizer scale calculated from quant_matrixes - int org_width, org_height; /* size given at codec init */ + int org_height; /* size given at codec init */ int first_picture; /* true if decoding first picture */ int interlaced; /* true if interlaced */ int bottom_field; /* true if bottom field */ @@ -855,7 +855,6 @@ static int mjpeg_decode_init(AVCodecContext *avctx) /* ugly way to get the idct & scantable FIXME */ memset(&s2, 0, sizeof(MpegEncContext)); - s2.flags= avctx->flags; s2.avctx= avctx; // s2->out_format = FMT_MJPEG; s2.width = 8; @@ -874,7 +873,6 @@ static int mjpeg_decode_init(AVCodecContext *avctx) return -1; s->start_code = -1; s->first_picture = 1; - s->org_width = avctx->width; s->org_height = avctx->height; build_vlc(&s->vlcs[0][0], bits_dc_luminance, val_dc_luminance, 12); @@ -884,7 +882,7 @@ static int mjpeg_decode_init(AVCodecContext *avctx) if (avctx->flags & CODEC_FLAG_EXTERN_HUFF) { - printf("mjpeg: using external huffman table\n"); + av_log(avctx, AV_LOG_INFO, "mjpeg: using external huffman table\n"); init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size*8); mjpeg_decode_dht(s); /* should check for error - but dunno */ @@ -987,7 +985,7 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s) if(s->bits==9 && !s->pegasus_rct) s->rct=1; //FIXME ugly if (s->bits != 8 && !s->lossless){ - printf("only 8 bits/component accepted\n"); + av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n"); return -1; } height = get_bits(&s->gb, 16); @@ -1027,13 +1025,17 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s) s->width = width; s->height = height; + s->avctx->width = s->width; + s->avctx->height = s->height; + /* test interlaced mode */ if (s->first_picture && s->org_height != 0 && s->height < ((s->org_height * 3) / 4)) { s->interlaced = 1; // s->bottom_field = (s->interlace_polarity) ? 1 : 0; - s->bottom_field = 0; + s->bottom_field = 0; + s->avctx->height *= 2; } s->qscale_table= av_mallocz((s->width+15)/16); @@ -1049,8 +1051,10 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s) case 0x11: if(s->rgb){ s->avctx->pix_fmt = PIX_FMT_RGBA32; - }else + }else if(s->nb_components==3) s->avctx->pix_fmt = PIX_FMT_YUV444P; + else + s->avctx->pix_fmt = PIX_FMT_GRAY8; break; case 0x21: s->avctx->pix_fmt = PIX_FMT_YUV422P; @@ -1066,7 +1070,7 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s) s->picture.reference= 0; if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){ - fprintf(stderr, "get_buffer() failed\n"); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.pict_type= I_TYPE; @@ -1369,7 +1373,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s) return -1; } /* XXX: only interleaved scan accepted */ - if (nb_components != 3) + if (nb_components != s->nb_components) { dprintf("decode_sos: components(%d) mismatch\n", nb_components); return -1; @@ -1444,7 +1448,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s) } if(s->avctx->debug & FF_DEBUG_PICT_INFO) - printf("%s %s p:%d >>:%d\n", s->lossless ? "lossless" : "sequencial DCT", s->rgb ? "RGB" : "", predictor, point_transform); + av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d\n", s->lossless ? "lossless" : "sequencial DCT", s->rgb ? "RGB" : "", predictor, point_transform); if(s->lossless){ if(s->rgb){ @@ -1489,7 +1493,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) len -= 6; if(s->avctx->debug & FF_DEBUG_STARTCODE){ - printf("APPx %8X\n", id); + av_log(s->avctx, AV_LOG_DEBUG, "APPx %8X\n", id); } /* buggy AVID, it puts EOI only at every 10th frame */ @@ -1504,7 +1508,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) 4bytes field_size 4bytes field_size_less_padding */ -// s->buggy_avid = 1; + s->buggy_avid = 1; // if (s->first_picture) // printf("mjpeg: workarounding buggy AVID\n"); s->interlace_polarity = get_bits(&s->gb, 8); @@ -1525,7 +1529,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) { int t_w, t_h; skip_bits(&s->gb, 8); /* the trailing zero-byte */ - printf("mjpeg: JFIF header found (version: %x.%x)\n", + av_log(s->avctx, AV_LOG_INFO, "mjpeg: JFIF header found (version: %x.%x)\n", get_bits(&s->gb, 8), get_bits(&s->gb, 8)); skip_bits(&s->gb, 8); @@ -1546,7 +1550,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) if (id == ff_get_fourcc("Adob") && (get_bits(&s->gb, 8) == 'e')) { - printf("mjpeg: Adobe header found\n"); + av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found\n"); skip_bits(&s->gb, 16); /* version */ skip_bits(&s->gb, 16); /* flags0 */ skip_bits(&s->gb, 16); /* flags1 */ @@ -1556,7 +1560,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) } if (id == ff_get_fourcc("LJIF")){ - printf("Pegasus lossless jpeg header found\n"); + av_log(s->avctx, AV_LOG_INFO, "Pegasus lossless jpeg header found\n"); skip_bits(&s->gb, 16); /* version ? */ skip_bits(&s->gb, 16); /* unknwon always 0? */ skip_bits(&s->gb, 16); /* unknwon always 0? */ @@ -1571,7 +1575,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) s->pegasus_rct=1; break; default: - printf("unknown colorspace\n"); + av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace\n"); } len -= 9; goto out; @@ -1596,14 +1600,14 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) skip_bits(&s->gb, 32); /* data off */ #endif if (s->first_picture) - printf("mjpeg: Apple MJPEG-A header found\n"); + av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n"); } } out: /* slow but needed for extreme adobe jpegs */ if (len < 0) - printf("mjpeg: error, decode_app parser read over the end\n"); + av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error, decode_app parser read over the end\n"); while(--len > 0) skip_bits(&s->gb, 8); @@ -1626,7 +1630,7 @@ static int mjpeg_decode_com(MJpegDecodeContext *s) else cbuf[i] = 0; - printf("mjpeg comment: '%s'\n", cbuf); + av_log(s->avctx, AV_LOG_INFO, "mjpeg comment: '%s'\n", cbuf); /* buggy avid, it puts EOI only at every 10th frame */ if (!strcmp(cbuf, "AVID")) @@ -1766,7 +1770,7 @@ static int mjpeg_decode_frame(AVCodecContext *avctx, s->start_code = start_code; if(s->avctx->debug & FF_DEBUG_STARTCODE){ - printf("startcode: %X\n", start_code); + av_log(s->avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code); } /* process markers */ @@ -1791,7 +1795,7 @@ static int mjpeg_decode_frame(AVCodecContext *avctx, break; case DHT: if(mjpeg_decode_dht(s) < 0){ - fprintf(stderr, "huffman table decode error\n"); + av_log(s->avctx, AV_LOG_ERROR, "huffman table decode error\n"); return -1; } break; @@ -1818,10 +1822,6 @@ eoi_parser: } *picture = s->picture; *data_size = sizeof(AVFrame); - avctx->height = s->height; - if (s->interlaced) - avctx->height *= 2; - avctx->width = s->width; if(!s->lossless){ picture->quality= FFMAX(FFMAX(s->qscale[0], s->qscale[1]), s->qscale[2]); @@ -1829,7 +1829,7 @@ eoi_parser: picture->qscale_table= s->qscale_table; memset(picture->qscale_table, picture->quality, (s->width+15)/16); if(avctx->debug & FF_DEBUG_QP) - printf("QP: %d\n", picture->quality); + av_log(s->avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality); picture->quality*= FF_QP2LAMBDA; } @@ -1858,7 +1858,7 @@ eoi_parser: case SOF14: case SOF15: case JPG: - printf("mjpeg: unsupported coding type (%x)\n", start_code); + av_log(s->avctx, AV_LOG_ERROR, "mjpeg: unsupported coding type (%x)\n", start_code); break; // default: // printf("mjpeg: unsupported marker (%x)\n", start_code); @@ -1976,10 +1976,6 @@ read_header: *picture= s->picture; *data_size = sizeof(AVFrame); - avctx->height = s->height; - if (s->interlaced) - avctx->height *= 2; - avctx->width = s->width; if(!s->lossless){ picture->quality= FFMAX(FFMAX(s->qscale[0], s->qscale[1]), s->qscale[2]); @@ -1987,7 +1983,7 @@ read_header: picture->qscale_table= s->qscale_table; memset(picture->qscale_table, picture->quality, (s->width+15)/16); if(avctx->debug & FF_DEBUG_QP) - printf("QP: %d\n", picture->quality); + av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality); picture->quality*= FF_QP2LAMBDA; } @@ -2148,7 +2144,7 @@ static int sp5x_decode_frame(AVCodecContext *avctx, s->mb_width = (s->width * s->h_max * 8 -1) / (s->h_max * 8); s->mb_height = (s->height * s->v_max * 8 -1) / (s->v_max * 8); - init_get_bits(&s->gb, buf, buf_size*8); + init_get_bits(&s->gb, buf+14, (buf_size-14)*8); return mjpeg_decode_scan(s); #endif diff --git a/src/libffmpeg/libavcodec/motion_est.c b/src/libffmpeg/libavcodec/motion_est.c index 52774d392..e8641790b 100644 --- a/src/libffmpeg/libavcodec/motion_est.c +++ b/src/libffmpeg/libavcodec/motion_est.c @@ -1,7 +1,7 @@ /* * Motion estimation * Copyright (c) 2000,2001 Fabrice Bellard. - * Copyright (c) 2002-2003 Michael Niedermayer + * Copyright (c) 2002-2004 Michael Niedermayer * * * This library is free software; you can redistribute it and/or @@ -46,9 +46,9 @@ static inline int sad_hpel_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, - int xmin, int ymin, int xmax, int ymax, - int pred_x, int pred_y, Picture *picture, - int n, int size, uint8_t * const mv_penalty); + int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[6], int stride, int uvstride, + int size, int h, uint8_t * const mv_penalty); static inline int update_map_generation(MpegEncContext * s) { @@ -78,20 +78,21 @@ static int minima_cmp(const void *a, const void *b){ #define RENAME(a) simple_ ## a #define CMP(d, x, y, size)\ -d = cmp(s, src_y, (ref_y) + (x) + (y)*(stride), stride); +d = cmp(s, src_y, (ref_y) + (x) + (y)*(stride), stride, h); #define CMP_HPEL(d, dx, dy, x, y, size)\ {\ const int dxy= (dx) + 2*(dy);\ - hpel_put[0][dxy](s->me.scratchpad, (ref_y) + (x) + (y)*(stride), stride, (16>>size));\ - d = cmp_sub(s, s->me.scratchpad, src_y, stride);\ + hpel_put[0][dxy](s->me.scratchpad, (ref_y) + (x) + (y)*(stride), stride, h);\ + d = cmp_sub(s, s->me.scratchpad, src_y, stride, h);\ } + #define CMP_QPEL(d, dx, dy, x, y, size)\ {\ const int dxy= (dx) + 4*(dy);\ qpel_put[0][dxy](s->me.scratchpad, (ref_y) + (x) + (y)*(stride), stride);\ - d = cmp_sub(s, s->me.scratchpad, src_y, stride);\ + d = cmp_sub(s, s->me.scratchpad, src_y, stride, h);\ } #include "motion_est_template.c" @@ -105,29 +106,29 @@ d = cmp(s, src_y, (ref_y) + (x) + (y)*(stride), stride); #define RENAME(a) simple_chroma_ ## a #define CMP(d, x, y, size)\ -d = cmp(s, src_y, (ref_y) + (x) + (y)*(stride), stride);\ +d = cmp(s, src_y, (ref_y) + (x) + (y)*(stride), stride, h);\ if(chroma_cmp){\ int dxy= ((x)&1) + 2*((y)&1);\ int c= ((x)>>1) + ((y)>>1)*uvstride;\ \ - chroma_hpel_put[0][dxy](s->me.scratchpad, ref_u + c, uvstride, 8);\ - d += chroma_cmp(s, s->me.scratchpad, src_u, uvstride);\ - chroma_hpel_put[0][dxy](s->me.scratchpad, ref_v + c, uvstride, 8);\ - d += chroma_cmp(s, s->me.scratchpad, src_v, uvstride);\ + chroma_hpel_put[0][dxy](s->me.scratchpad, ref_u + c, uvstride, h>>1);\ + d += chroma_cmp(s, s->me.scratchpad, src_u, uvstride, h>>1);\ + chroma_hpel_put[0][dxy](s->me.scratchpad, ref_v + c, uvstride, h>>1);\ + d += chroma_cmp(s, s->me.scratchpad, src_v, uvstride, h>>1);\ } #define CMP_HPEL(d, dx, dy, x, y, size)\ {\ const int dxy= (dx) + 2*(dy);\ - hpel_put[0][dxy](s->me.scratchpad, (ref_y) + (x) + (y)*(stride), stride, (16>>size));\ - d = cmp_sub(s, s->me.scratchpad, src_y, stride);\ + hpel_put[0][dxy](s->me.scratchpad, (ref_y) + (x) + (y)*(stride), stride, h);\ + d = cmp_sub(s, s->me.scratchpad, src_y, stride, h);\ if(chroma_cmp_sub){\ int cxy= (dxy) | ((x)&1) | (2*((y)&1));\ int c= ((x)>>1) + ((y)>>1)*uvstride;\ - chroma_hpel_put[0][cxy](s->me.scratchpad, ref_u + c, uvstride, 8);\ - d += chroma_cmp_sub(s, s->me.scratchpad, src_u, uvstride);\ - chroma_hpel_put[0][cxy](s->me.scratchpad, ref_v + c, uvstride, 8);\ - d += chroma_cmp_sub(s, s->me.scratchpad, src_v, uvstride);\ + chroma_hpel_put[0][cxy](s->me.scratchpad, ref_u + c, uvstride, h>>1);\ + d += chroma_cmp_sub(s, s->me.scratchpad, src_u, uvstride, h>>1);\ + chroma_hpel_put[0][cxy](s->me.scratchpad, ref_v + c, uvstride, h>>1);\ + d += chroma_cmp_sub(s, s->me.scratchpad, src_v, uvstride, h>>1);\ }\ } @@ -135,7 +136,7 @@ if(chroma_cmp){\ {\ const int dxy= (dx) + 4*(dy);\ qpel_put[0][dxy](s->me.scratchpad, (ref_y) + (x) + (y)*(stride), stride);\ - d = cmp_sub(s, s->me.scratchpad, src_y, stride);\ + d = cmp_sub(s, s->me.scratchpad, src_y, stride, h);\ if(chroma_cmp_sub){\ int cxy, c;\ int cx= (4*(x) + (dx))/2;\ @@ -144,10 +145,10 @@ if(chroma_cmp){\ cy= (cy>>1)|(cy&1);\ cxy= (cx&1) + 2*(cy&1);\ c= ((cx)>>1) + ((cy)>>1)*uvstride;\ - chroma_hpel_put[0][cxy](s->me.scratchpad, ref_u + c, uvstride, 8);\ - d += chroma_cmp_sub(s, s->me.scratchpad, src_u, uvstride);\ - chroma_hpel_put[0][cxy](s->me.scratchpad, ref_v + c, uvstride, 8);\ - d += chroma_cmp_sub(s, s->me.scratchpad, src_v, uvstride);\ + chroma_hpel_put[0][cxy](s->me.scratchpad, ref_u + c, uvstride, h>>1);\ + d += chroma_cmp_sub(s, s->me.scratchpad, src_u, uvstride, h>>1);\ + chroma_hpel_put[0][cxy](s->me.scratchpad, ref_v + c, uvstride, h>>1);\ + d += chroma_cmp_sub(s, s->me.scratchpad, src_v, uvstride, h>>1);\ }\ } @@ -178,7 +179,7 @@ if((x) >= xmin && 2*(x) + (dx) <= 2*xmax && (y) >= ymin && 2*(y) + (dy) <= 2*yma \ uint8_t *dst= s->me.scratchpad + 8*(i&1) + 8*stride*(i>>1);\ hpel_put[1][fxy](dst, (ref_y ) + (fx>>1) + (fy>>1)*(stride), stride, 8);\ - hpel_avg[1][bxy](dst, (ref2_y) + (bx>>1) + (by>>1)*(stride), stride, 8);\ + hpel_avg[1][bxy](dst, (ref_data[3]) + (bx>>1) + (by>>1)*(stride), stride, 8);\ }\ }else{\ int fx = s->me.direct_basis_mv[0][0] + hx;\ @@ -198,9 +199,9 @@ if((x) >= xmin && 2*(x) + (dx) <= 2*xmax && (y) >= ymin && 2*(y) + (dy) <= 2*yma assert((by>>1) + 16*s->mb_y <= s->height);\ \ hpel_put[0][fxy](s->me.scratchpad, (ref_y ) + (fx>>1) + (fy>>1)*(stride), stride, 16);\ - hpel_avg[0][bxy](s->me.scratchpad, (ref2_y) + (bx>>1) + (by>>1)*(stride), stride, 16);\ + hpel_avg[0][bxy](s->me.scratchpad, (ref_data[3]) + (bx>>1) + (by>>1)*(stride), stride, 16);\ }\ - d = cmp_func(s, s->me.scratchpad, src_y, stride);\ + d = cmp_func(s, s->me.scratchpad, src_y, stride, 16);\ }else\ d= 256*256*256*32; @@ -238,7 +239,7 @@ if((x) >= xmin && 4*(x) + (dx) <= 4*xmax && (y) >= ymin && 4*(y) + (dy) <= 4*yma \ uint8_t *dst= s->me.scratchpad + 8*(i&1) + 8*stride*(i>>1);\ qpel_put[1][fxy](dst, (ref_y ) + (fx>>2) + (fy>>2)*(stride), stride);\ - qpel_avg[1][bxy](dst, (ref2_y) + (bx>>2) + (by>>2)*(stride), stride);\ + qpel_avg[1][bxy](dst, (ref_data[3]) + (bx>>2) + (by>>2)*(stride), stride);\ }\ }else{\ int fx = s->me.direct_basis_mv[0][0] + qx;\ @@ -252,12 +253,12 @@ if((x) >= xmin && 4*(x) + (dx) <= 4*xmax && (y) >= ymin && 4*(y) + (dy) <= 4*yma qpel_put[1][fxy](s->me.scratchpad + 8 , (ref_y ) + (fx>>2) + (fy>>2)*(stride) + 8 , stride);\ qpel_put[1][fxy](s->me.scratchpad + 8*stride, (ref_y ) + (fx>>2) + (fy>>2)*(stride) + 8*stride, stride);\ qpel_put[1][fxy](s->me.scratchpad + 8 + 8*stride, (ref_y ) + (fx>>2) + (fy>>2)*(stride) + 8 + 8*stride, stride);\ - qpel_avg[1][bxy](s->me.scratchpad , (ref2_y) + (bx>>2) + (by>>2)*(stride) , stride);\ - qpel_avg[1][bxy](s->me.scratchpad + 8 , (ref2_y) + (bx>>2) + (by>>2)*(stride) + 8 , stride);\ - qpel_avg[1][bxy](s->me.scratchpad + 8*stride, (ref2_y) + (bx>>2) + (by>>2)*(stride) + 8*stride, stride);\ - qpel_avg[1][bxy](s->me.scratchpad + 8 + 8*stride, (ref2_y) + (bx>>2) + (by>>2)*(stride) + 8 + 8*stride, stride);\ + qpel_avg[1][bxy](s->me.scratchpad , (ref_data[3]) + (bx>>2) + (by>>2)*(stride) , stride);\ + qpel_avg[1][bxy](s->me.scratchpad + 8 , (ref_data[3]) + (bx>>2) + (by>>2)*(stride) + 8 , stride);\ + qpel_avg[1][bxy](s->me.scratchpad + 8*stride, (ref_data[3]) + (bx>>2) + (by>>2)*(stride) + 8*stride, stride);\ + qpel_avg[1][bxy](s->me.scratchpad + 8 + 8*stride, (ref_data[3]) + (bx>>2) + (by>>2)*(stride) + 8 + 8*stride, stride);\ }\ - d = cmp_func(s, s->me.scratchpad, src_y, stride);\ + d = cmp_func(s, s->me.scratchpad, src_y, stride, 16);\ }else\ d= 256*256*256*32; @@ -276,56 +277,6 @@ if((x) >= xmin && 4*(x) + (dx) <= 4*xmax && (y) >= ymin && 4*(y) + (dy) <= 4*yma #undef INIT #undef CMP__DIRECT - -static int zero_cmp(void *s, uint8_t *a, uint8_t *b, int stride){ - return 0; -} - -static void set_cmp(MpegEncContext *s, me_cmp_func *cmp, int type){ - DSPContext* c= &s->dsp; - int i; - - memset(cmp, 0, sizeof(void*)*11); - - switch(type&0xFF){ - case FF_CMP_SAD: - cmp[0]= c->sad[0]; - cmp[1]= c->sad[1]; - break; - case FF_CMP_SATD: - cmp[0]= c->hadamard8_diff[0]; - cmp[1]= c->hadamard8_diff[1]; - break; - case FF_CMP_SSE: - cmp[0]= c->sse[0]; - cmp[1]= c->sse[1]; - break; - case FF_CMP_DCT: - cmp[0]= c->dct_sad[0]; - cmp[1]= c->dct_sad[1]; - break; - case FF_CMP_PSNR: - cmp[0]= c->quant_psnr[0]; - cmp[1]= c->quant_psnr[1]; - break; - case FF_CMP_BIT: - cmp[0]= c->bit[0]; - cmp[1]= c->bit[1]; - break; - case FF_CMP_RD: - cmp[0]= c->rd[0]; - cmp[1]= c->rd[1]; - break; - case FF_CMP_ZERO: - for(i=0; i<7; i++){ - cmp[i]= zero_cmp; - } - break; - default: - fprintf(stderr,"internal error in cmp function selection\n"); - } -} - static inline int get_penalty_factor(MpegEncContext *s, int type){ switch(type&0xFF){ default: @@ -346,10 +297,10 @@ static inline int get_penalty_factor(MpegEncContext *s, int type){ } void ff_init_me(MpegEncContext *s){ - set_cmp(s, s->dsp.me_pre_cmp, s->avctx->me_pre_cmp); - set_cmp(s, s->dsp.me_cmp, s->avctx->me_cmp); - set_cmp(s, s->dsp.me_sub_cmp, s->avctx->me_sub_cmp); - set_cmp(s, s->dsp.mb_cmp, s->avctx->mb_cmp); + ff_set_cmp(&s->dsp, s->dsp.me_pre_cmp, s->avctx->me_pre_cmp); + ff_set_cmp(&s->dsp, s->dsp.me_cmp, s->avctx->me_cmp); + ff_set_cmp(&s->dsp, s->dsp.me_sub_cmp, s->avctx->me_sub_cmp); + ff_set_cmp(&s->dsp, s->dsp.mb_cmp, s->avctx->mb_cmp); if(s->flags&CODEC_FLAG_QPEL){ if(s->avctx->me_sub_cmp&FF_CMP_CHROMA) @@ -362,7 +313,7 @@ void ff_init_me(MpegEncContext *s){ else if( s->avctx->me_sub_cmp == FF_CMP_SAD && s->avctx-> me_cmp == FF_CMP_SAD && s->avctx-> mb_cmp == FF_CMP_SAD) - s->me.sub_motion_search= sad_hpel_motion_search; + s->me.sub_motion_search= sad_hpel_motion_search; // 2050 vs. 2450 cycles else s->me.sub_motion_search= simple_hpel_motion_search; } @@ -370,9 +321,11 @@ void ff_init_me(MpegEncContext *s){ if(s->avctx->me_cmp&FF_CMP_CHROMA){ s->me.motion_search[0]= simple_chroma_epzs_motion_search; s->me.motion_search[1]= simple_chroma_epzs_motion_search4; + s->me.motion_search[4]= simple_chroma_epzs_motion_search2; }else{ s->me.motion_search[0]= simple_epzs_motion_search; s->me.motion_search[1]= simple_epzs_motion_search4; + s->me.motion_search[4]= simple_epzs_motion_search2; } if(s->avctx->me_pre_cmp&FF_CMP_CHROMA){ @@ -453,8 +406,8 @@ static int full_motion_search(MpegEncContext * s, my = 0; for (y = y1; y <= y2; y++) { for (x = x1; x <= x2; x++) { - d = s->dsp.pix_abs16x16(pix, ref_picture + (y * s->linesize) + x, - s->linesize); + d = s->dsp.pix_abs[0][0](NULL, pix, ref_picture + (y * s->linesize) + x, + s->linesize, 16); if (d < dmin || (d == dmin && (abs(x - xx) + abs(y - yy)) < @@ -518,7 +471,7 @@ static int log_motion_search(MpegEncContext * s, do { for (y = y1; y <= y2; y += range) { for (x = x1; x <= x2; x += range) { - d = s->dsp.pix_abs16x16(pix, ref_picture + (y * s->linesize) + x, s->linesize); + d = s->dsp.pix_abs[0][0](NULL, pix, ref_picture + (y * s->linesize) + x, s->linesize, 16); if (d < dmin || (d == dmin && (abs(x - xx) + abs(y - yy)) < (abs(mx - xx) + abs(my - yy)))) { dmin = d; mx = x; @@ -598,7 +551,7 @@ static int phods_motion_search(MpegEncContext * s, lastx = x; for (x = x1; x <= x2; x += range) { - d = s->dsp.pix_abs16x16(pix, ref_picture + (y * s->linesize) + x, s->linesize); + d = s->dsp.pix_abs[0][0](NULL, pix, ref_picture + (y * s->linesize) + x, s->linesize, 16); if (d < dminx || (d == dminx && (abs(x - xx) + abs(y - yy)) < (abs(mx - xx) + abs(my - yy)))) { dminx = d; mx = x; @@ -607,7 +560,7 @@ static int phods_motion_search(MpegEncContext * s, x = lastx; for (y = y1; y <= y2; y += range) { - d = s->dsp.pix_abs16x16(pix, ref_picture + (y * s->linesize) + x, s->linesize); + d = s->dsp.pix_abs[0][0](NULL, pix, ref_picture + (y * s->linesize) + x, s->linesize, 16); if (d < dminy || (d == dminy && (abs(x - xx) + abs(y - yy)) < (abs(mx - xx) + abs(my - yy)))) { dminy = d; my = y; @@ -651,35 +604,25 @@ static int phods_motion_search(MpegEncContext * s, #define CHECK_SAD_HALF_MV(suffix, x, y) \ {\ - d= pix_abs_ ## suffix(pix, ptr+((x)>>1), s->linesize);\ + d= s->dsp.pix_abs[size][(x?1:0)+(y?2:0)](NULL, pix, ptr+((x)>>1), stride, h);\ d += (mv_penalty[pen_x + x] + mv_penalty[pen_y + y])*penalty_factor;\ COPY3_IF_LT(dminh, d, dx, x, dy, y)\ } static inline int sad_hpel_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, - int xmin, int ymin, int xmax, int ymax, - int pred_x, int pred_y, Picture *picture, - int n, int size, uint8_t * const mv_penalty) + int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[6], int stride, int uvstride, + int size, int h, uint8_t * const mv_penalty) { - uint8_t *ref_picture= picture->data[0]; uint32_t *score_map= s->me.score_map; const int penalty_factor= s->me.sub_penalty_factor; - int mx, my, xx, yy, dminh; + int mx, my, dminh; uint8_t *pix, *ptr; - op_pixels_abs_func pix_abs_x2; - op_pixels_abs_func pix_abs_y2; - op_pixels_abs_func pix_abs_xy2; - - if(size==0){ - pix_abs_x2 = s->dsp.pix_abs16x16_x2; - pix_abs_y2 = s->dsp.pix_abs16x16_y2; - pix_abs_xy2= s->dsp.pix_abs16x16_xy2; - }else{ - pix_abs_x2 = s->dsp.pix_abs8x8_x2; - pix_abs_y2 = s->dsp.pix_abs8x8_y2; - pix_abs_xy2= s->dsp.pix_abs8x8_xy2; - } + const int xmin= s->me.xmin; + const int ymin= s->me.ymin; + const int xmax= s->me.xmax; + const int ymax= s->me.ymax; if(s->me.skip){ // printf("S"); @@ -689,13 +632,11 @@ static inline int sad_hpel_motion_search(MpegEncContext * s, } // printf("N"); - xx = 16 * s->mb_x + 8*(n&1); - yy = 16 * s->mb_y + 8*(n>>1); - pix = s->new_picture.data[0] + (yy * s->linesize) + xx; + pix = src_data[0]; mx = *mx_ptr; my = *my_ptr; - ptr = ref_picture + ((yy + my) * s->linesize) + (xx + mx); + ptr = ref_data[0] + (my * stride) + mx; dminh = dmin; @@ -715,16 +656,16 @@ static inline int sad_hpel_motion_search(MpegEncContext * s, pen_x= pred_x + mx; pen_y= pred_y + my; - ptr-= s->linesize; + ptr-= stride; if(t<=b){ CHECK_SAD_HALF_MV(y2 , 0, -1) if(l<=r){ CHECK_SAD_HALF_MV(xy2, -1, -1) if(t+r<=b+l){ CHECK_SAD_HALF_MV(xy2, +1, -1) - ptr+= s->linesize; + ptr+= stride; }else{ - ptr+= s->linesize; + ptr+= stride; CHECK_SAD_HALF_MV(xy2, -1, +1) } CHECK_SAD_HALF_MV(x2 , -1, 0) @@ -732,9 +673,9 @@ static inline int sad_hpel_motion_search(MpegEncContext * s, CHECK_SAD_HALF_MV(xy2, +1, -1) if(t+l<=b+r){ CHECK_SAD_HALF_MV(xy2, -1, -1) - ptr+= s->linesize; + ptr+= stride; }else{ - ptr+= s->linesize; + ptr+= stride; CHECK_SAD_HALF_MV(xy2, +1, +1) } CHECK_SAD_HALF_MV(x2 , +1, 0) @@ -743,9 +684,9 @@ static inline int sad_hpel_motion_search(MpegEncContext * s, if(l<=r){ if(t+l<=b+r){ CHECK_SAD_HALF_MV(xy2, -1, -1) - ptr+= s->linesize; + ptr+= stride; }else{ - ptr+= s->linesize; + ptr+= stride; CHECK_SAD_HALF_MV(xy2, +1, +1) } CHECK_SAD_HALF_MV(x2 , -1, 0) @@ -753,9 +694,9 @@ static inline int sad_hpel_motion_search(MpegEncContext * s, }else{ if(t+r<=b+l){ CHECK_SAD_HALF_MV(xy2, +1, -1) - ptr+= s->linesize; + ptr+= stride; }else{ - ptr+= s->linesize; + ptr+= stride; CHECK_SAD_HALF_MV(xy2, -1, +1) } CHECK_SAD_HALF_MV(x2 , +1, 0) @@ -787,50 +728,56 @@ static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4) if(mv4){ int mot_xy= s->block_index[0]; - s->motion_val[mot_xy ][0]= mx; - s->motion_val[mot_xy ][1]= my; - s->motion_val[mot_xy+1][0]= mx; - s->motion_val[mot_xy+1][1]= my; + s->current_picture.motion_val[0][mot_xy ][0]= mx; + s->current_picture.motion_val[0][mot_xy ][1]= my; + s->current_picture.motion_val[0][mot_xy+1][0]= mx; + s->current_picture.motion_val[0][mot_xy+1][1]= my; mot_xy += s->block_wrap[0]; - s->motion_val[mot_xy ][0]= mx; - s->motion_val[mot_xy ][1]= my; - s->motion_val[mot_xy+1][0]= mx; - s->motion_val[mot_xy+1][1]= my; + s->current_picture.motion_val[0][mot_xy ][0]= mx; + s->current_picture.motion_val[0][mot_xy ][1]= my; + s->current_picture.motion_val[0][mot_xy+1][0]= mx; + s->current_picture.motion_val[0][mot_xy+1][1]= my; } } /** * get fullpel ME search limits. - * @param range the approximate search range for the old ME code, unused for EPZS and newer */ -static inline void get_limits(MpegEncContext *s, int *range, int *xmin, int *ymin, int *xmax, int *ymax) +static inline void get_limits(MpegEncContext *s, int x, int y) { - if(s->avctx->me_range) *range= s->avctx->me_range >> 1; - else *range= 16; - +/* + if(s->avctx->me_range) s->me.range= s->avctx->me_range >> 1; + else s->me.range= 16; +*/ if (s->unrestricted_mv) { - *xmin = -16; - *ymin = -16; - *xmax = s->mb_width*16; - *ymax = s->mb_height*16; + s->me.xmin = - x - 16; + s->me.ymin = - y - 16; + s->me.xmax = - x + s->mb_width *16; + s->me.ymax = - y + s->mb_height*16; } else { - *xmin = 0; - *ymin = 0; - *xmax = s->mb_width*16 - 16; - *ymax = s->mb_height*16 - 16; + s->me.xmin = - x; + s->me.ymin = - y; + s->me.xmax = - x + s->mb_width *16 - 16; + s->me.ymax = - y + s->mb_height*16 - 16; } - - //FIXME try to limit x/y min/max if me_range is set } -static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xmax, int ymax, int mx, int my, int shift) +static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift) { + const int size= 1; + const int h=8; int block; int P[10][2]; int dmin_sum=0, mx4_sum=0, my4_sum=0; uint8_t * const mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV; int same=1; + const int stride= s->linesize; + const int uvstride= s->uvlinesize; + const int xmin= s->me.xmin; + const int ymin= s->me.ymin; + const int xmax= s->me.xmax; + const int ymax= s->me.ymax; for(block=0; block<4; block++){ int mx4, my4; @@ -839,37 +786,37 @@ static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xma static const int off[4]= {2, 1, 1, -1}; const int mot_stride = s->block_wrap[0]; const int mot_xy = s->block_index[block]; -// const int block_x= (block&1); -// const int block_y= (block>>1); -#if 1 // this saves us a bit of cliping work and shouldnt affect compression in a negative way - const int rel_xmin4= xmin; - const int rel_xmax4= xmax; - const int rel_ymin4= ymin; - const int rel_ymax4= ymax; -#else - const int rel_xmin4= xmin - block_x*8; - const int rel_xmax4= xmax - block_x*8 + 8; - const int rel_ymin4= ymin - block_y*8; - const int rel_ymax4= ymax - block_y*8 + 8; -#endif - P_LEFT[0] = s->motion_val[mot_xy - 1][0]; - P_LEFT[1] = s->motion_val[mot_xy - 1][1]; - - if(P_LEFT[0] > (rel_xmax4<<shift)) P_LEFT[0] = (rel_xmax4<<shift); + const int block_x= (block&1); + const int block_y= (block>>1); + uint8_t *src_data[3]= { + s->new_picture.data[0] + 8*(2*s->mb_x + block_x) + stride *8*(2*s->mb_y + block_y), //FIXME chroma? + s->new_picture.data[1] + 4*(2*s->mb_x + block_x) + uvstride*4*(2*s->mb_y + block_y), + s->new_picture.data[2] + 4*(2*s->mb_x + block_x) + uvstride*4*(2*s->mb_y + block_y) + }; + uint8_t *ref_data[3]= { + s->last_picture.data[0] + 8*(2*s->mb_x + block_x) + stride *8*(2*s->mb_y + block_y), //FIXME chroma? + s->last_picture.data[1] + 4*(2*s->mb_x + block_x) + uvstride*4*(2*s->mb_y + block_y), + s->last_picture.data[2] + 4*(2*s->mb_x + block_x) + uvstride*4*(2*s->mb_y + block_y) + }; + + P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; + P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; + + if(P_LEFT[0] > (s->me.xmax<<shift)) P_LEFT[0] = (s->me.xmax<<shift); /* special case for first line */ if (s->mb_y == 0 && block<2) { pred_x4= P_LEFT[0]; pred_y4= P_LEFT[1]; } else { - P_TOP[0] = s->motion_val[mot_xy - mot_stride ][0]; - P_TOP[1] = s->motion_val[mot_xy - mot_stride ][1]; - P_TOPRIGHT[0] = s->motion_val[mot_xy - mot_stride + off[block]][0]; - P_TOPRIGHT[1] = s->motion_val[mot_xy - mot_stride + off[block]][1]; - if(P_TOP[1] > (rel_ymax4<<shift)) P_TOP[1] = (rel_ymax4<<shift); - if(P_TOPRIGHT[0] < (rel_xmin4<<shift)) P_TOPRIGHT[0]= (rel_xmin4<<shift); - if(P_TOPRIGHT[0] > (rel_xmax4<<shift)) P_TOPRIGHT[0]= (rel_xmax4<<shift); - if(P_TOPRIGHT[1] > (rel_ymax4<<shift)) P_TOPRIGHT[1]= (rel_ymax4<<shift); + P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0]; + P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1]; + P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][0]; + P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + off[block]][1]; + if(P_TOP[1] > (s->me.ymax<<shift)) P_TOP[1] = (s->me.ymax<<shift); + if(P_TOPRIGHT[0] < (s->me.xmin<<shift)) P_TOPRIGHT[0]= (s->me.xmin<<shift); + if(P_TOPRIGHT[0] > (s->me.xmax<<shift)) P_TOPRIGHT[0]= (s->me.xmax<<shift); + if(P_TOPRIGHT[1] > (s->me.ymax<<shift)) P_TOPRIGHT[1]= (s->me.ymax<<shift); P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); @@ -887,33 +834,34 @@ static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xma P_MV1[0]= mx; P_MV1[1]= my; - dmin4 = s->me.motion_search[1](s, block, &mx4, &my4, P, pred_x4, pred_y4, rel_xmin4, rel_ymin4, rel_xmax4, rel_ymax4, - &s->last_picture, s->p_mv_table, (1<<16)>>shift, mv_penalty); + dmin4 = s->me.motion_search[1](s, &mx4, &my4, P, pred_x4, pred_y4, + src_data, ref_data, stride, uvstride, s->p_mv_table, (1<<16)>>shift, mv_penalty); - dmin4= s->me.sub_motion_search(s, &mx4, &my4, dmin4, rel_xmin4, rel_ymin4, rel_xmax4, rel_ymax4, - pred_x4, pred_y4, &s->last_picture, block, 1, mv_penalty); + dmin4= s->me.sub_motion_search(s, &mx4, &my4, dmin4, + pred_x4, pred_y4, src_data, ref_data, stride, uvstride, size, h, mv_penalty); - if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){ + if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0] + && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE){ int dxy; - const int offset= ((block&1) + (block>>1)*s->linesize)*8; + const int offset= ((block&1) + (block>>1)*stride)*8; uint8_t *dest_y = s->me.scratchpad + offset; if(s->quarter_sample){ - uint8_t *ref= s->last_picture.data[0] + (s->mb_x*16 + (mx4>>2)) + (s->mb_y*16 + (my4>>2))*s->linesize + offset; + uint8_t *ref= ref_data[0] + (mx4>>2) + (my4>>2)*stride; dxy = ((my4 & 3) << 2) | (mx4 & 3); if(s->no_rounding) s->dsp.put_no_rnd_qpel_pixels_tab[1][dxy](dest_y , ref , s->linesize); else - s->dsp.put_qpel_pixels_tab [1][dxy](dest_y , ref , s->linesize); + s->dsp.put_qpel_pixels_tab [1][dxy](dest_y , ref , stride); }else{ - uint8_t *ref= s->last_picture.data[0] + (s->mb_x*16 + (mx4>>1)) + (s->mb_y*16 + (my4>>1))*s->linesize + offset; + uint8_t *ref= ref_data[0] + (mx4>>1) + (my4>>1)*stride; dxy = ((my4 & 1) << 1) | (mx4 & 1); if(s->no_rounding) - s->dsp.put_no_rnd_pixels_tab[1][dxy](dest_y , ref , s->linesize, 8); + s->dsp.put_no_rnd_pixels_tab[1][dxy](dest_y , ref , stride, h); else - s->dsp.put_pixels_tab [1][dxy](dest_y , ref , s->linesize, 8); + s->dsp.put_pixels_tab [1][dxy](dest_y , ref , stride, h); } dmin_sum+= (mv_penalty[mx4-pred_x4] + mv_penalty[my4-pred_y4])*s->me.mb_penalty_factor; }else @@ -927,8 +875,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xma my4_sum+= my4; } - s->motion_val[ s->block_index[block] ][0]= mx4; - s->motion_val[ s->block_index[block] ][1]= my4; + s->current_picture.motion_val[0][ s->block_index[block] ][0]= mx4; + s->current_picture.motion_val[0][ s->block_index[block] ][1]= my4; if(mx4 != mx || my4 != my) same=0; } @@ -937,7 +885,7 @@ static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xma return INT_MAX; if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0]){ - dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*16*s->linesize, s->me.scratchpad, s->linesize); + dmin_sum += s->dsp.mb_cmp[0](s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*16*stride, s->me.scratchpad, stride, 16); } if(s->avctx->mb_cmp&FF_CMP_CHROMA){ @@ -959,8 +907,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xma s->dsp.put_pixels_tab [1][dxy](s->me.scratchpad+8 , s->last_picture.data[2] + offset, s->uvlinesize, 8); } - dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, s->me.scratchpad , s->uvlinesize); - dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, s->me.scratchpad+8, s->uvlinesize); + dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, s->me.scratchpad , s->uvlinesize, 8); + dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, s->me.scratchpad+8, s->uvlinesize, 8); } switch(s->avctx->mb_cmp&0xFF){ @@ -973,13 +921,135 @@ static inline int h263_mv4_search(MpegEncContext *s, int xmin, int ymin, int xma } } +static int interlaced_search(MpegEncContext *s, uint8_t *frame_src_data[3], uint8_t *frame_ref_data[3], + int16_t (*mv_tables[2][2])[2], uint8_t *field_select_tables[2], int f_code, int mx, int my) +{ + const int size=0; + const int h=8; + int block; + int P[10][2]; + uint8_t * const mv_penalty= s->me.mv_penalty[f_code] + MAX_MV; + int same=1; + const int stride= 2*s->linesize; + const int uvstride= 2*s->uvlinesize; + int dmin_sum= 0; + const int mot_stride= s->mb_stride; + const int xy= s->mb_x + s->mb_y*mot_stride; + + s->me.ymin>>=1; + s->me.ymax>>=1; + + for(block=0; block<2; block++){ + int field_select; + int best_dmin= INT_MAX; + int best_field= -1; + + uint8_t *src_data[3]= { + frame_src_data[0] + s-> linesize*block, + frame_src_data[1] + s->uvlinesize*block, + frame_src_data[2] + s->uvlinesize*block + }; + + for(field_select=0; field_select<2; field_select++){ + int dmin, mx_i, my_i, pred_x, pred_y; + uint8_t *ref_data[3]= { + frame_ref_data[0] + s-> linesize*field_select, + frame_ref_data[1] + s->uvlinesize*field_select, + frame_ref_data[2] + s->uvlinesize*field_select + }; + int16_t (*mv_table)[2]= mv_tables[block][field_select]; + + P_LEFT[0] = mv_table[xy - 1][0]; + P_LEFT[1] = mv_table[xy - 1][1]; + if(P_LEFT[0] > (s->me.xmax<<1)) P_LEFT[0] = (s->me.xmax<<1); + + pred_x= P_LEFT[0]; + pred_y= P_LEFT[1]; + + if(s->mb_y){ + P_TOP[0] = mv_table[xy - mot_stride][0]; + P_TOP[1] = mv_table[xy - mot_stride][1]; + P_TOPRIGHT[0] = mv_table[xy - mot_stride + 1][0]; + P_TOPRIGHT[1] = mv_table[xy - mot_stride + 1][1]; + if(P_TOP[1] > (s->me.ymax<<1)) P_TOP[1] = (s->me.ymax<<1); + if(P_TOPRIGHT[0] < (s->me.xmin<<1)) P_TOPRIGHT[0]= (s->me.xmin<<1); + if(P_TOPRIGHT[0] > (s->me.xmax<<1)) P_TOPRIGHT[0]= (s->me.xmax<<1); + if(P_TOPRIGHT[1] > (s->me.ymax<<1)) P_TOPRIGHT[1]= (s->me.ymax<<1); + + P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); + P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); + } + P_MV1[0]= mx; //FIXME not correct if block != field_select + P_MV1[1]= my / 2; + + dmin = s->me.motion_search[4](s, &mx_i, &my_i, P, pred_x, pred_y, + src_data, ref_data, stride, uvstride, mv_table, (1<<16)>>1, mv_penalty); + + dmin= s->me.sub_motion_search(s, &mx_i, &my_i, dmin, + pred_x, pred_y, src_data, ref_data, stride, uvstride, size, h, mv_penalty); + + mv_table[xy][0]= mx_i; + mv_table[xy][1]= my_i; + + if(s->dsp.me_sub_cmp[0] != s->dsp.mb_cmp[0] + && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE){ + int dxy; + + //FIXME chroma ME + uint8_t *ref= ref_data[0] + (mx_i>>1) + (my_i>>1)*stride; + dxy = ((my_i & 1) << 1) | (mx_i & 1); + + if(s->no_rounding){ + s->dsp.put_no_rnd_pixels_tab[size][dxy](s->me.scratchpad, ref , stride, h); + }else{ + s->dsp.put_pixels_tab [size][dxy](s->me.scratchpad, ref , stride, h); + } + dmin= s->dsp.mb_cmp[size](s, src_data[0], s->me.scratchpad, stride, h); + dmin+= (mv_penalty[mx_i-pred_x] + mv_penalty[my_i-pred_y] + 1)*s->me.mb_penalty_factor; + }else + dmin+= s->me.mb_penalty_factor; //field_select bits + + dmin += field_select != block; //slightly prefer same field + + if(dmin < best_dmin){ + best_dmin= dmin; + best_field= field_select; + } + } + { + int16_t (*mv_table)[2]= mv_tables[block][best_field]; + + if(mv_table[xy][0] != mx) same=0; //FIXME check if these checks work and are any good at all + if(mv_table[xy][1]&1) same=0; + if(mv_table[xy][1]*2 != my) same=0; + if(best_field != block) same=0; + } + + field_select_tables[block][xy]= best_field; + dmin_sum += best_dmin; + } + + s->me.ymin<<=1; + s->me.ymax<<=1; + + if(same) + return INT_MAX; + + switch(s->avctx->mb_cmp&0xFF){ + /*case FF_CMP_SSE: + return dmin_sum+ 32*s->qscale*s->qscale;*/ + case FF_CMP_RD: + return dmin_sum; + default: + return dmin_sum+ 11*s->me.mb_penalty_factor; + } +} + void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { uint8_t *pix, *ppix; - int sum, varc, vard, mx, my, range, dmin, xx, yy; - int xmin, ymin, xmax, ymax; - int rel_xmin, rel_ymin, rel_xmax, rel_ymax; + int sum, varc, vard, mx, my, dmin, xx, yy; int pred_x=0, pred_y=0; int P[10][2]; const int shift= 1+s->quarter_sample; @@ -987,18 +1057,26 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, uint8_t *ref_picture= s->last_picture.data[0]; Picture * const pic= &s->current_picture; uint8_t * const mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV; - + const int stride= s->linesize; + const int uvstride= s->uvlinesize; + uint8_t *src_data[3]= { + s->new_picture.data[0] + 16*(mb_x + stride*mb_y), + s->new_picture.data[1] + 8*(mb_x + uvstride*mb_y), + s->new_picture.data[2] + 8*(mb_x + uvstride*mb_y) + }; + uint8_t *ref_data[3]= { + s->last_picture.data[0] + 16*(mb_x + stride*mb_y), + s->last_picture.data[1] + 8*(mb_x + uvstride*mb_y), + s->last_picture.data[2] + 8*(mb_x + uvstride*mb_y) + }; + assert(s->quarter_sample==0 || s->quarter_sample==1); s->me.penalty_factor = get_penalty_factor(s, s->avctx->me_cmp); s->me.sub_penalty_factor= get_penalty_factor(s, s->avctx->me_sub_cmp); s->me.mb_penalty_factor = get_penalty_factor(s, s->avctx->mb_cmp); - get_limits(s, &range, &xmin, &ymin, &xmax, &ymax); - rel_xmin= xmin - mb_x*16; - rel_xmax= xmax - mb_x*16; - rel_ymin= ymin - mb_y*16; - rel_ymax= ymax - mb_y*16; + get_limits(s, 16*mb_x, 16*mb_y); s->me.skip=0; switch(s->me_method) { @@ -1009,40 +1087,42 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, my-= mb_y*16; dmin = 0; break; +#if 0 case ME_FULL: - dmin = full_motion_search(s, &mx, &my, range, xmin, ymin, xmax, ymax, ref_picture); + dmin = full_motion_search(s, &mx, &my, range, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_LOG: - dmin = log_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); + dmin = log_motion_search(s, &mx, &my, range / 2, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_PHODS: - dmin = phods_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); + dmin = phods_motion_search(s, &mx, &my, range / 2, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; +#endif case ME_X1: case ME_EPZS: { const int mot_stride = s->block_wrap[0]; const int mot_xy = s->block_index[0]; - P_LEFT[0] = s->motion_val[mot_xy - 1][0]; - P_LEFT[1] = s->motion_val[mot_xy - 1][1]; + P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; + P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; - if(P_LEFT[0] > (rel_xmax<<shift)) P_LEFT[0] = (rel_xmax<<shift); + if(P_LEFT[0] > (s->me.xmax<<shift)) P_LEFT[0] = (s->me.xmax<<shift); if(mb_y) { - P_TOP[0] = s->motion_val[mot_xy - mot_stride ][0]; - P_TOP[1] = s->motion_val[mot_xy - mot_stride ][1]; - P_TOPRIGHT[0] = s->motion_val[mot_xy - mot_stride + 2][0]; - P_TOPRIGHT[1] = s->motion_val[mot_xy - mot_stride + 2][1]; - if(P_TOP[1] > (rel_ymax<<shift)) P_TOP[1] = (rel_ymax<<shift); - if(P_TOPRIGHT[0] < (rel_xmin<<shift)) P_TOPRIGHT[0]= (rel_xmin<<shift); - if(P_TOPRIGHT[1] > (rel_ymax<<shift)) P_TOPRIGHT[1]= (rel_ymax<<shift); + P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0]; + P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1]; + P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0]; + P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1]; + if(P_TOP[1] > (s->me.ymax<<shift)) P_TOP[1] = (s->me.ymax<<shift); + if(P_TOPRIGHT[0] < (s->me.xmin<<shift)) P_TOPRIGHT[0]= (s->me.xmin<<shift); + if(P_TOPRIGHT[1] > (s->me.ymax<<shift)) P_TOPRIGHT[1]= (s->me.ymax<<shift); P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); @@ -1060,8 +1140,8 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, } } - dmin = s->me.motion_search[0](s, 0, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax, - &s->last_picture, s->p_mv_table, (1<<16)>>shift, mv_penalty); + dmin = s->me.motion_search[0](s, &mx, &my, P, pred_x, pred_y, + src_data, ref_data, stride, uvstride, s->p_mv_table, (1<<16)>>shift, mv_penalty); break; } @@ -1070,14 +1150,14 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, xx = mb_x * 16; yy = mb_y * 16; - pix = s->new_picture.data[0] + (yy * s->linesize) + xx; + pix = src_data[0]; /* At this point (mx,my) are full-pell and the relative displacement */ - ppix = ref_picture + ((yy+my) * s->linesize) + (xx+mx); + ppix = ref_data[0] + (my * s->linesize) + mx; sum = s->dsp.pix_sum(pix, s->linesize); varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; - vard = (s->dsp.sse[0](NULL, pix, ppix, s->linesize)+128)>>8; + vard = (s->dsp.sse[0](NULL, pix, ppix, s->linesize, 16)+128)>>8; //printf("%d %d %d %X %X %X\n", s->mb_width, mb_x, mb_y,(int)s, (int)s->mb_var, (int)s->mc_mb_var); fflush(stdout); pic->mb_var [s->mb_stride * mb_y + mb_x] = varc; @@ -1099,47 +1179,109 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, s->scene_change_score+= s->qscale; if (vard*2 + 200 > varc) - mb_type|= MB_TYPE_INTRA; + mb_type|= CANDIDATE_MB_TYPE_INTRA; if (varc*2 + 200 > vard){ - mb_type|= MB_TYPE_INTER; - s->me.sub_motion_search(s, &mx, &my, dmin, rel_xmin, rel_ymin, rel_xmax, rel_ymax, - pred_x, pred_y, &s->last_picture, 0, 0, mv_penalty); + mb_type|= CANDIDATE_MB_TYPE_INTER; + s->me.sub_motion_search(s, &mx, &my, dmin, + pred_x, pred_y, src_data, ref_data, stride, uvstride, 0, 16, mv_penalty); if(s->flags&CODEC_FLAG_MV0) if(mx || my) - mb_type |= MB_TYPE_SKIPED; //FIXME check difference + mb_type |= CANDIDATE_MB_TYPE_SKIPED; //FIXME check difference }else{ mx <<=shift; my <<=shift; } if((s->flags&CODEC_FLAG_4MV) && !s->me.skip && varc>50 && vard>10){ - if(h263_mv4_search(s, rel_xmin, rel_ymin, rel_xmax, rel_ymax, mx, my, shift) < INT_MAX) - mb_type|=MB_TYPE_INTER4V; + if(h263_mv4_search(s, mx, my, shift) < INT_MAX) + mb_type|=CANDIDATE_MB_TYPE_INTER4V; set_p_mv_tables(s, mx, my, 0); }else set_p_mv_tables(s, mx, my, 1); + if((s->flags&CODEC_FLAG_INTERLACED_ME) + && !s->me.skip){ //FIXME varc/d checks + if(interlaced_search(s, src_data, ref_data, s->p_field_mv_table, s->p_field_select_table, s->f_code, mx, my) < INT_MAX) + mb_type |= CANDIDATE_MB_TYPE_INTER_I; + } }else{ - mb_type= MB_TYPE_INTER; + int intra_score, i; + mb_type= CANDIDATE_MB_TYPE_INTER; - dmin= s->me.sub_motion_search(s, &mx, &my, dmin, rel_xmin, rel_ymin, rel_xmax, rel_ymax, - pred_x, pred_y, &s->last_picture, 0, 0, mv_penalty); - + dmin= s->me.sub_motion_search(s, &mx, &my, dmin, + pred_x, pred_y, src_data, ref_data, stride, uvstride, 0, 16, mv_penalty); if(s->avctx->me_sub_cmp != s->avctx->mb_cmp && !s->me.skip) - dmin= s->me.get_mb_score(s, mx, my, pred_x, pred_y, &s->last_picture, mv_penalty); + dmin= s->me.get_mb_score(s, mx, my, pred_x, pred_y, src_data, ref_data, stride, uvstride, mv_penalty); if((s->flags&CODEC_FLAG_4MV) && !s->me.skip && varc>50 && vard>10){ - int dmin4= h263_mv4_search(s, rel_xmin, rel_ymin, rel_xmax, rel_ymax, mx, my, shift); + int dmin4= h263_mv4_search(s, mx, my, shift); if(dmin4 < dmin){ - mb_type= MB_TYPE_INTER4V; + mb_type= CANDIDATE_MB_TYPE_INTER4V; dmin=dmin4; } } - pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin; - set_p_mv_tables(s, mx, my, mb_type!=MB_TYPE_INTER4V); + if((s->flags&CODEC_FLAG_INTERLACED_ME) + && !s->me.skip){ //FIXME varc/d checks + int dmin_i= interlaced_search(s, src_data, ref_data, s->p_field_mv_table, s->p_field_select_table, s->f_code, mx, my); + if(dmin_i < dmin){ + mb_type = CANDIDATE_MB_TYPE_INTER_I; + dmin= dmin_i; + } + } + +// pic->mb_cmp_score[s->mb_stride * mb_y + mb_x] = dmin; + set_p_mv_tables(s, mx, my, mb_type!=CANDIDATE_MB_TYPE_INTER4V); + + /* get intra luma score */ + if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){ + intra_score= (varc<<8) - 500; //FIXME dont scale it down so we dont have to fix it + }else{ + int mean= (sum+128)>>8; + mean*= 0x01010101; + + for(i=0; i<16; i++){ + *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean; + *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean; + *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean; + *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean; + } + + intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, pix, s->linesize, 16); + } +#if 0 //FIXME + /* get chroma score */ + if(s->avctx->mb_cmp&FF_CMP_CHROMA){ + for(i=1; i<3; i++){ + uint8_t *dest_c; + int mean; + + if(s->out_format == FMT_H263){ + mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;) + }else{ + mean= (s->last_dc[i] + 4)>>3; + } + dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; + + mean*= 0x01010101; + for(i=0; i<8; i++){ + *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean; + *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean; + } + + intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize); + } + } +#endif + intra_score += s->me.mb_penalty_factor*16; - if (vard <= 64 || vard < varc) { + if(intra_score < dmin){ + mb_type= CANDIDATE_MB_TYPE_INTRA; + s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= CANDIDATE_MB_TYPE_INTRA; //FIXME cleanup + }else + s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= 0; + + if (vard <= 64 || vard < varc) { //FIXME s->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc); }else{ s->scene_change_score+= s->qscale; @@ -1152,30 +1294,36 @@ void ff_estimate_p_frame_motion(MpegEncContext * s, int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { - int mx, my, range, dmin; - int xmin, ymin, xmax, ymax; - int rel_xmin, rel_ymin, rel_xmax, rel_ymax; + int mx, my, dmin; int pred_x=0, pred_y=0; int P[10][2]; const int shift= 1+s->quarter_sample; uint8_t * const mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV; const int xy= mb_x + mb_y*s->mb_stride; + const int stride= s->linesize; + const int uvstride= s->uvlinesize; + uint8_t *src_data[3]= { + s->new_picture.data[0] + 16*(mb_x + stride*mb_y), + s->new_picture.data[1] + 8*(mb_x + uvstride*mb_y), + s->new_picture.data[2] + 8*(mb_x + uvstride*mb_y) + }; + uint8_t *ref_data[3]= { + s->last_picture.data[0] + 16*(mb_x + stride*mb_y), + s->last_picture.data[1] + 8*(mb_x + uvstride*mb_y), + s->last_picture.data[2] + 8*(mb_x + uvstride*mb_y) + }; assert(s->quarter_sample==0 || s->quarter_sample==1); s->me.pre_penalty_factor = get_penalty_factor(s, s->avctx->me_pre_cmp); - get_limits(s, &range, &xmin, &ymin, &xmax, &ymax); - rel_xmin= xmin - mb_x*16; - rel_xmax= xmax - mb_x*16; - rel_ymin= ymin - mb_y*16; - rel_ymax= ymax - mb_y*16; + get_limits(s, 16*mb_x, 16*mb_y); s->me.skip=0; P_LEFT[0] = s->p_mv_table[xy + 1][0]; P_LEFT[1] = s->p_mv_table[xy + 1][1]; - if(P_LEFT[0] < (rel_xmin<<shift)) P_LEFT[0] = (rel_xmin<<shift); + if(P_LEFT[0] < (s->me.xmin<<shift)) P_LEFT[0] = (s->me.xmin<<shift); /* special case for first line */ if (mb_y == s->mb_height-1) { @@ -1188,9 +1336,9 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s, P_TOP[1] = s->p_mv_table[xy + s->mb_stride ][1]; P_TOPRIGHT[0] = s->p_mv_table[xy + s->mb_stride - 1][0]; P_TOPRIGHT[1] = s->p_mv_table[xy + s->mb_stride - 1][1]; - if(P_TOP[1] < (rel_ymin<<shift)) P_TOP[1] = (rel_ymin<<shift); - if(P_TOPRIGHT[0] > (rel_xmax<<shift)) P_TOPRIGHT[0]= (rel_xmax<<shift); - if(P_TOPRIGHT[1] < (rel_ymin<<shift)) P_TOPRIGHT[1]= (rel_ymin<<shift); + if(P_TOP[1] < (s->me.ymin<<shift)) P_TOP[1] = (s->me.ymin<<shift); + if(P_TOPRIGHT[0] > (s->me.xmax<<shift)) P_TOPRIGHT[0]= (s->me.xmax<<shift); + if(P_TOPRIGHT[1] < (s->me.ymin<<shift)) P_TOPRIGHT[1]= (s->me.ymin<<shift); P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); @@ -1198,8 +1346,8 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s, pred_x = P_MEDIAN[0]; pred_y = P_MEDIAN[1]; } - dmin = s->me.pre_motion_search(s, 0, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax, - &s->last_picture, s->p_mv_table, (1<<16)>>shift, mv_penalty); + dmin = s->me.pre_motion_search(s, &mx, &my, P, pred_x, pred_y, + src_data, ref_data, stride, uvstride, s->p_mv_table, (1<<16)>>shift, mv_penalty); s->p_mv_table[xy][0] = mx<<shift; s->p_mv_table[xy][1] = my<<shift; @@ -1208,17 +1356,16 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s, } static int ff_estimate_motion_b(MpegEncContext * s, - int mb_x, int mb_y, int16_t (*mv_table)[2], Picture *picture, int f_code) + int mb_x, int mb_y, int16_t (*mv_table)[2], uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int f_code) { - int mx, my, range, dmin; - int xmin, ymin, xmax, ymax; - int rel_xmin, rel_ymin, rel_xmax, rel_ymax; + int mx, my, dmin; int pred_x=0, pred_y=0; int P[10][2]; const int shift= 1+s->quarter_sample; const int mot_stride = s->mb_stride; const int mot_xy = mb_y*mot_stride + mb_x; - uint8_t * const ref_picture= picture->data[0]; + uint8_t * const ref_picture= ref_data[0] - 16*s->mb_x - 16*s->mb_y*s->linesize; //FIXME ugly uint8_t * const mv_penalty= s->me.mv_penalty[f_code] + MAX_MV; int mv_scale; @@ -1226,11 +1373,7 @@ static int ff_estimate_motion_b(MpegEncContext * s, s->me.sub_penalty_factor= get_penalty_factor(s, s->avctx->me_sub_cmp); s->me.mb_penalty_factor = get_penalty_factor(s, s->avctx->mb_cmp); - get_limits(s, &range, &xmin, &ymin, &xmax, &ymax); - rel_xmin= xmin - mb_x*16; - rel_xmax= xmax - mb_x*16; - rel_ymin= ymin - mb_y*16; - rel_ymax= ymax - mb_y*16; + get_limits(s, 16*mb_x, 16*mb_y); switch(s->me_method) { case ME_ZERO: @@ -1240,28 +1383,30 @@ static int ff_estimate_motion_b(MpegEncContext * s, mx-= mb_x*16; my-= mb_y*16; break; +#if 0 case ME_FULL: - dmin = full_motion_search(s, &mx, &my, range, xmin, ymin, xmax, ymax, ref_picture); + dmin = full_motion_search(s, &mx, &my, range, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_LOG: - dmin = log_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); + dmin = log_motion_search(s, &mx, &my, range / 2, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_PHODS: - dmin = phods_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); + dmin = phods_motion_search(s, &mx, &my, range / 2, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; +#endif case ME_X1: case ME_EPZS: { P_LEFT[0] = mv_table[mot_xy - 1][0]; P_LEFT[1] = mv_table[mot_xy - 1][1]; - if(P_LEFT[0] > (rel_xmax<<shift)) P_LEFT[0] = (rel_xmax<<shift); + if(P_LEFT[0] > (s->me.xmax<<shift)) P_LEFT[0] = (s->me.xmax<<shift); /* special case for first line */ if (mb_y) { @@ -1269,9 +1414,9 @@ static int ff_estimate_motion_b(MpegEncContext * s, P_TOP[1] = mv_table[mot_xy - mot_stride ][1]; P_TOPRIGHT[0] = mv_table[mot_xy - mot_stride + 1 ][0]; P_TOPRIGHT[1] = mv_table[mot_xy - mot_stride + 1 ][1]; - if(P_TOP[1] > (rel_ymax<<shift)) P_TOP[1]= (rel_ymax<<shift); - if(P_TOPRIGHT[0] < (rel_xmin<<shift)) P_TOPRIGHT[0]= (rel_xmin<<shift); - if(P_TOPRIGHT[1] > (rel_ymax<<shift)) P_TOPRIGHT[1]= (rel_ymax<<shift); + if(P_TOP[1] > (s->me.ymax<<shift)) P_TOP[1]= (s->me.ymax<<shift); + if(P_TOPRIGHT[0] < (s->me.xmin<<shift)) P_TOPRIGHT[0]= (s->me.xmin<<shift); + if(P_TOPRIGHT[1] > (s->me.ymax<<shift)) P_TOPRIGHT[1]= (s->me.ymax<<shift); P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); @@ -1286,17 +1431,17 @@ static int ff_estimate_motion_b(MpegEncContext * s, mv_scale= ((s->pb_time - s->pp_time)<<16) / (s->pp_time<<shift); } - dmin = s->me.motion_search[0](s, 0, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax, - picture, s->p_mv_table, mv_scale, mv_penalty); + dmin = s->me.motion_search[0](s, &mx, &my, P, pred_x, pred_y, + src_data, ref_data, stride, uvstride, s->p_mv_table, mv_scale, mv_penalty); break; } - dmin= s->me.sub_motion_search(s, &mx, &my, dmin, rel_xmin, rel_ymin, rel_xmax, rel_ymax, - pred_x, pred_y, picture, 0, 0, mv_penalty); + dmin= s->me.sub_motion_search(s, &mx, &my, dmin, + pred_x, pred_y, src_data, ref_data, stride, uvstride, 0, 16, mv_penalty); if(s->avctx->me_sub_cmp != s->avctx->mb_cmp && !s->me.skip) - dmin= s->me.get_mb_score(s, mx, my, pred_x, pred_y, picture, mv_penalty); + dmin= s->me.get_mb_score(s, mx, my, pred_x, pred_y, src_data, ref_data, stride, uvstride, mv_penalty); //printf("%d %d %d %d//", s->mb_x, s->mb_y, mx, my); // s->mb_type[mb_y*s->mb_width + mb_x]= mb_type; @@ -1306,16 +1451,18 @@ static int ff_estimate_motion_b(MpegEncContext * s, return dmin; } -static inline int check_bidir_mv(MpegEncContext * s, - int mb_x, int mb_y, +static inline int check_bidir_mv(MpegEncContext * s, uint8_t *src_data[3], uint8_t *ref_data[6], + int stride, int uvstride, int motion_fx, int motion_fy, int motion_bx, int motion_by, int pred_fx, int pred_fy, - int pred_bx, int pred_by) + int pred_bx, int pred_by, + int size, int h) { //FIXME optimize? //FIXME move into template? //FIXME better f_code prediction (max mv & distance) + //FIXME pointers uint8_t * const mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame uint8_t *dest_y = s->me.scratchpad; uint8_t *ptr; @@ -1325,45 +1472,37 @@ static inline int check_bidir_mv(MpegEncContext * s, if(s->quarter_sample){ dxy = ((motion_fy & 3) << 2) | (motion_fx & 3); - src_x = mb_x * 16 + (motion_fx >> 2); - src_y = mb_y * 16 + (motion_fy >> 2); - assert(src_x >=-16 && src_x<=s->h_edge_pos); - assert(src_y >=-16 && src_y<=s->v_edge_pos); + src_x = motion_fx >> 2; + src_y = motion_fy >> 2; - ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x; - s->dsp.put_qpel_pixels_tab[0][dxy](dest_y , ptr , s->linesize); + ptr = ref_data[0] + (src_y * stride) + src_x; + s->dsp.put_qpel_pixels_tab[0][dxy](dest_y , ptr , stride); dxy = ((motion_by & 3) << 2) | (motion_bx & 3); - src_x = mb_x * 16 + (motion_bx >> 2); - src_y = mb_y * 16 + (motion_by >> 2); - assert(src_x >=-16 && src_x<=s->h_edge_pos); - assert(src_y >=-16 && src_y<=s->v_edge_pos); + src_x = motion_bx >> 2; + src_y = motion_by >> 2; - ptr = s->next_picture.data[0] + (src_y * s->linesize) + src_x; - s->dsp.avg_qpel_pixels_tab[0][dxy](dest_y , ptr , s->linesize); + ptr = ref_data[3] + (src_y * stride) + src_x; + s->dsp.avg_qpel_pixels_tab[size][dxy](dest_y , ptr , stride); }else{ dxy = ((motion_fy & 1) << 1) | (motion_fx & 1); - src_x = mb_x * 16 + (motion_fx >> 1); - src_y = mb_y * 16 + (motion_fy >> 1); - assert(src_x >=-16 && src_x<=s->h_edge_pos); - assert(src_y >=-16 && src_y<=s->v_edge_pos); + src_x = motion_fx >> 1; + src_y = motion_fy >> 1; - ptr = s->last_picture.data[0] + (src_y * s->linesize) + src_x; - s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); + ptr = ref_data[0] + (src_y * stride) + src_x; + s->dsp.put_pixels_tab[size][dxy](dest_y , ptr , stride, h); dxy = ((motion_by & 1) << 1) | (motion_bx & 1); - src_x = mb_x * 16 + (motion_bx >> 1); - src_y = mb_y * 16 + (motion_by >> 1); - assert(src_x >=-16 && src_x<=s->h_edge_pos); - assert(src_y >=-16 && src_y<=s->v_edge_pos); + src_x = motion_bx >> 1; + src_y = motion_by >> 1; - ptr = s->next_picture.data[0] + (src_y * s->linesize) + src_x; - s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , s->linesize, 16); + ptr = ref_data[3] + (src_y * stride) + src_x; + s->dsp.avg_pixels_tab[size][dxy](dest_y , ptr , stride, h); } fbmin = (mv_penalty[motion_fx-pred_fx] + mv_penalty[motion_fy-pred_fy])*s->me.mb_penalty_factor +(mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])*s->me.mb_penalty_factor - + s->dsp.mb_cmp[0](s, s->new_picture.data[0] + mb_x*16 + mb_y*16*s->linesize, dest_y, s->linesize); + + s->dsp.mb_cmp[size](s, src_data[0], dest_y, stride, h); //FIXME new_pic if(s->avctx->mb_cmp&FF_CMP_CHROMA){ } @@ -1373,7 +1512,8 @@ static inline int check_bidir_mv(MpegEncContext * s, } /* refine the bidir vectors in hq mode and return the score in both lq & hq mode*/ -static inline int bidir_refine(MpegEncContext * s, +static inline int bidir_refine(MpegEncContext * s, uint8_t *src_data[3], uint8_t *ref_data[6], + int stride, int uvstride, int mb_x, int mb_y) { const int mot_stride = s->mb_stride; @@ -1390,16 +1530,18 @@ static inline int bidir_refine(MpegEncContext * s, //FIXME do refinement and add flag - fbmin= check_bidir_mv(s, mb_x, mb_y, + fbmin= check_bidir_mv(s, src_data, ref_data, stride, uvstride, motion_fx, motion_fy, motion_bx, motion_by, pred_fx, pred_fy, - pred_bx, pred_by); + pred_bx, pred_by, + 0, 16); return fbmin; } -static inline int direct_search(MpegEncContext * s, +static inline int direct_search(MpegEncContext * s, uint8_t *src_data[3], uint8_t *ref_data[6], + int stride, int uvstride, int mb_x, int mb_y) { int P[10][2]; @@ -1426,8 +1568,8 @@ static inline int direct_search(MpegEncContext * s, int index= s->block_index[i]; int min, max; - s->me.co_located_mv[i][0]= s->motion_val[index][0]; - s->me.co_located_mv[i][1]= s->motion_val[index][1]; + s->me.co_located_mv[i][0]= s->next_picture.motion_val[0][index][0]; + s->me.co_located_mv[i][1]= s->next_picture.motion_val[0][index][1]; s->me.direct_basis_mv[i][0]= s->me.co_located_mv[i][0]*time_pb/time_pp + ((i& 1)<<(shift+3)); s->me.direct_basis_mv[i][1]= s->me.co_located_mv[i][1]*time_pb/time_pp + ((i>>1)<<(shift+3)); // s->me.direct_basis_mv[1][i][0]= s->me.co_located_mv[i][0]*(time_pb - time_pp)/time_pp + ((i &1)<<(shift+3); @@ -1435,15 +1577,15 @@ static inline int direct_search(MpegEncContext * s, max= FFMAX(s->me.direct_basis_mv[i][0], s->me.direct_basis_mv[i][0] - s->me.co_located_mv[i][0])>>shift; min= FFMIN(s->me.direct_basis_mv[i][0], s->me.direct_basis_mv[i][0] - s->me.co_located_mv[i][0])>>shift; - max+= (2*mb_x + (i& 1))*8 + 1; // +-1 is for the simpler rounding - min+= (2*mb_x + (i& 1))*8 - 1; + max+= 16*mb_x + 1; // +-1 is for the simpler rounding + min+= 16*mb_x - 1; xmax= FFMIN(xmax, s->width - max); xmin= FFMAX(xmin, - 16 - min); max= FFMAX(s->me.direct_basis_mv[i][1], s->me.direct_basis_mv[i][1] - s->me.co_located_mv[i][1])>>shift; min= FFMIN(s->me.direct_basis_mv[i][1], s->me.direct_basis_mv[i][1] - s->me.co_located_mv[i][1])>>shift; - max+= (2*mb_y + (i>>1))*8 + 1; // +-1 is for the simpler rounding - min+= (2*mb_y + (i>>1))*8 - 1; + max+= 16*mb_y + 1; // +-1 is for the simpler rounding + min+= 16*mb_y - 1; ymax= FFMIN(ymax, s->height - max); ymin= FFMAX(ymin, - 16 - min); @@ -1458,6 +1600,11 @@ static inline int direct_search(MpegEncContext * s, return 256*256*256*64; } + + s->me.xmin= xmin; + s->me.ymin= ymin; + s->me.xmax= xmax; + s->me.ymax= ymax; P_LEFT[0] = clip(mv_table[mot_xy - 1][0], xmin<<shift, xmax<<shift); P_LEFT[1] = clip(mv_table[mot_xy - 1][1], ymin<<shift, ymax<<shift); @@ -1475,22 +1622,24 @@ static inline int direct_search(MpegEncContext * s, //FIXME direct_search ptr in context!!! (needed for chroma anyway or this will get messy) if(s->flags&CODEC_FLAG_QPEL){ - dmin = simple_direct_qpel_epzs_motion_search(s, 0, &mx, &my, P, 0, 0, xmin, ymin, xmax, ymax, - &s->last_picture, mv_table, 1<<14, mv_penalty); - dmin = simple_direct_qpel_qpel_motion_search(s, &mx, &my, dmin, xmin, ymin, xmax, ymax, - 0, 0, &s->last_picture, 0, 0, mv_penalty); + dmin = simple_direct_qpel_epzs_motion_search(s, &mx, &my, P, 0, 0, + src_data, ref_data, stride, uvstride, mv_table, 1<<14, mv_penalty); + dmin = simple_direct_qpel_qpel_motion_search(s, &mx, &my, dmin, + 0, 0, src_data, ref_data, stride, uvstride, 0, 16, mv_penalty); if(s->avctx->me_sub_cmp != s->avctx->mb_cmp && !s->me.skip) - dmin= simple_direct_qpel_qpel_get_mb_score(s, mx, my, 0, 0, &s->last_picture, mv_penalty); + dmin= simple_direct_qpel_qpel_get_mb_score(s, mx, my, 0, 0, src_data, ref_data, stride, uvstride, mv_penalty); }else{ - dmin = simple_direct_hpel_epzs_motion_search(s, 0, &mx, &my, P, 0, 0, xmin, ymin, xmax, ymax, - &s->last_picture, mv_table, 1<<15, mv_penalty); - dmin = simple_direct_hpel_hpel_motion_search(s, &mx, &my, dmin, xmin, ymin, xmax, ymax, - 0, 0, &s->last_picture, 0, 0, mv_penalty); + dmin = simple_direct_hpel_epzs_motion_search(s, &mx, &my, P, 0, 0, + src_data, ref_data, stride, uvstride, mv_table, 1<<15, mv_penalty); + dmin = simple_direct_hpel_hpel_motion_search(s, &mx, &my, dmin, + 0, 0, src_data, ref_data, stride, uvstride, 0, 16, mv_penalty); if(s->avctx->me_sub_cmp != s->avctx->mb_cmp && !s->me.skip) - dmin= simple_direct_hpel_hpel_get_mb_score(s, mx, my, 0, 0, &s->last_picture, mv_penalty); + dmin= simple_direct_hpel_hpel_get_mb_score(s, mx, my, 0, 0, src_data, ref_data, stride, uvstride, mv_penalty); } + + get_limits(s, 16*mb_x, 16*mb_y); //restore s->me.?min/max, maybe not needed s->b_direct_mv_table[mot_xy][0]= mx; s->b_direct_mv_table[mot_xy][1]= my; @@ -1501,40 +1650,80 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { const int penalty_factor= s->me.mb_penalty_factor; - int fmin, bmin, dmin, fbmin; + int fmin, bmin, dmin, fbmin, bimin, fimin; int type=0; + const int stride= s->linesize; + const int uvstride= s->uvlinesize; + uint8_t *src_data[3]= { + s->new_picture.data[0] + 16*(s->mb_x + stride*s->mb_y), + s->new_picture.data[1] + 8*(s->mb_x + uvstride*s->mb_y), + s->new_picture.data[2] + 8*(s->mb_x + uvstride*s->mb_y) + }; + uint8_t *ref_data[6]= { + s->last_picture.data[0] + 16*(s->mb_x + stride*s->mb_y), + s->last_picture.data[1] + 8*(s->mb_x + uvstride*s->mb_y), + s->last_picture.data[2] + 8*(s->mb_x + uvstride*s->mb_y), + s->next_picture.data[0] + 16*(s->mb_x + stride*s->mb_y), + s->next_picture.data[1] + 8*(s->mb_x + uvstride*s->mb_y), + s->next_picture.data[2] + 8*(s->mb_x + uvstride*s->mb_y) + }; s->me.skip=0; if (s->codec_id == CODEC_ID_MPEG4) - dmin= direct_search(s, mb_x, mb_y); + dmin= direct_search(s, src_data, ref_data, stride, uvstride, mb_x, mb_y); else dmin= INT_MAX; - +//FIXME penalty stuff for non mpeg4 s->me.skip=0; - fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, &s->last_picture, s->f_code) + 3*penalty_factor; + fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, src_data, + ref_data, stride, uvstride, s->f_code) + 3*penalty_factor; s->me.skip=0; - bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, &s->next_picture, s->b_code) + 2*penalty_factor; + bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, src_data, + ref_data+3, stride, uvstride, s->b_code) + 2*penalty_factor; //printf(" %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); s->me.skip=0; - fbmin= bidir_refine(s, mb_x, mb_y) + penalty_factor; + fbmin= bidir_refine(s, src_data, ref_data, stride, uvstride, mb_x, mb_y) + penalty_factor; //printf("%d %d %d %d\n", dmin, fmin, bmin, fbmin); + + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + const int xy = mb_y*s->mb_stride + mb_x; + +//FIXME mb type penalty + s->me.skip=0; + fimin= interlaced_search(s, src_data, ref_data , + s->b_field_mv_table[0], s->b_field_select_table[0], s->f_code, + s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]); + bimin= interlaced_search(s, src_data, ref_data+3, + s->b_field_mv_table[1], s->b_field_select_table[1], s->b_code, + s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1]); + }else + fimin= bimin= INT_MAX; + { int score= fmin; - type = MB_TYPE_FORWARD; + type = CANDIDATE_MB_TYPE_FORWARD; if (dmin <= score){ score = dmin; - type = MB_TYPE_DIRECT; + type = CANDIDATE_MB_TYPE_DIRECT; } if(bmin<score){ score=bmin; - type= MB_TYPE_BACKWARD; + type= CANDIDATE_MB_TYPE_BACKWARD; } if(fbmin<score){ score=fbmin; - type= MB_TYPE_BIDIR; + type= CANDIDATE_MB_TYPE_BIDIR; + } + if(fimin<score){ + score=fimin; + type= CANDIDATE_MB_TYPE_FORWARD_I; + } + if(bimin<score){ + score=bimin; + type= CANDIDATE_MB_TYPE_BACKWARD_I; } score= ((unsigned)(score*score + 128*256))>>16; @@ -1543,8 +1732,20 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, } if(s->avctx->mb_decision > FF_MB_DECISION_SIMPLE){ - type= MB_TYPE_FORWARD | MB_TYPE_BACKWARD | MB_TYPE_BIDIR | MB_TYPE_DIRECT; //FIXME something smarter - if(dmin>256*256*16) type&= ~MB_TYPE_DIRECT; //dont try direct mode if its invalid for this MB + type= CANDIDATE_MB_TYPE_FORWARD | CANDIDATE_MB_TYPE_BACKWARD | CANDIDATE_MB_TYPE_BIDIR | CANDIDATE_MB_TYPE_DIRECT; + if(fimin < INT_MAX) + type |= CANDIDATE_MB_TYPE_FORWARD_I; + if(bimin < INT_MAX) + type |= CANDIDATE_MB_TYPE_BACKWARD_I; + if(fimin < INT_MAX && bimin < INT_MAX){ + type |= CANDIDATE_MB_TYPE_BIDIR_I; + } + //FIXME something smarter + if(dmin>256*256*16) type&= ~CANDIDATE_MB_TYPE_DIRECT; //dont try direct mode if its invalid for this MB +#if 0 + if(s->out_format == FMT_MPEG1) + type |= CANDIDATE_MB_TYPE_INTRA; +#endif } s->mb_type[mb_y*s->mb_stride + mb_x]= type; @@ -1611,23 +1812,6 @@ void ff_fix_long_p_mvs(MpegEncContext * s) if(s->avctx->me_range && range > s->avctx->me_range) range= s->avctx->me_range; - /* clip / convert to intra 16x16 type MVs */ - for(y=0; y<s->mb_height; y++){ - int x; - int xy= y*s->mb_stride; - for(x=0; x<s->mb_width; x++){ - if(s->mb_type[xy]&MB_TYPE_INTER){ - if( s->p_mv_table[xy][0] >=range || s->p_mv_table[xy][0] <-range - || s->p_mv_table[xy][1] >=range || s->p_mv_table[xy][1] <-range){ - s->mb_type[xy] &= ~MB_TYPE_INTER; - s->mb_type[xy] |= MB_TYPE_INTRA; - s->p_mv_table[xy][0] = 0; - s->p_mv_table[xy][1] = 0; - } - } - xy++; - } - } //printf("%d no:%d %d//\n", clip, noclip, f_code); if(s->flags&CODEC_FLAG_4MV){ const int wrap= 2+ s->mb_width*2; @@ -1639,17 +1823,18 @@ void ff_fix_long_p_mvs(MpegEncContext * s) int x; for(x=0; x<s->mb_width; x++){ - if(s->mb_type[i]&MB_TYPE_INTER4V){ + if(s->mb_type[i]&CANDIDATE_MB_TYPE_INTER4V){ int block; for(block=0; block<4; block++){ int off= (block& 1) + (block>>1)*wrap; - int mx= s->motion_val[ xy + off ][0]; - int my= s->motion_val[ xy + off ][1]; + int mx= s->current_picture.motion_val[0][ xy + off ][0]; + int my= s->current_picture.motion_val[0][ xy + off ][1]; if( mx >=range || mx <-range || my >=range || my <-range){ - s->mb_type[i] &= ~MB_TYPE_INTER4V; - s->mb_type[i] |= MB_TYPE_INTRA; + s->mb_type[i] &= ~CANDIDATE_MB_TYPE_INTER4V; + s->mb_type[i] |= CANDIDATE_MB_TYPE_INTRA; + s->current_picture.mb_type[i]= CANDIDATE_MB_TYPE_INTRA; } } } @@ -1660,30 +1845,45 @@ void ff_fix_long_p_mvs(MpegEncContext * s) } } -void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type) +/** + * + * @param truncate 1 for truncation, 0 for using intra + */ +void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select, + int16_t (*mv_table)[2], int f_code, int type, int truncate) { - int y; + int y, h_range, v_range; // RAL: 8 in MPEG-1, 16 in MPEG-4 int range = (((s->out_format == FMT_MPEG1) ? 8 : 16) << f_code); - + + if(s->msmpeg4_version) range= 16; if(s->avctx->me_range && range > s->avctx->me_range) range= s->avctx->me_range; + h_range= range; + v_range= field_select_table ? range>>1 : range; + /* clip / convert to intra 16x16 type MVs */ for(y=0; y<s->mb_height; y++){ int x; int xy= y*s->mb_stride; for(x=0; x<s->mb_width; x++){ if (s->mb_type[xy] & type){ // RAL: "type" test added... - if( mv_table[xy][0] >=range || mv_table[xy][0] <-range - || mv_table[xy][1] >=range || mv_table[xy][1] <-range){ - - if(s->codec_id == CODEC_ID_MPEG1VIDEO && 0){ - }else{ - if (mv_table[xy][0] > range-1) mv_table[xy][0]= range-1; - else if(mv_table[xy][0] < -range ) mv_table[xy][0]= -range; - if (mv_table[xy][1] > range-1) mv_table[xy][1]= range-1; - else if(mv_table[xy][1] < -range ) mv_table[xy][1]= -range; + if(field_select_table==NULL || field_select_table[xy] == field_select){ + if( mv_table[xy][0] >=h_range || mv_table[xy][0] <-h_range + || mv_table[xy][1] >=v_range || mv_table[xy][1] <-v_range){ + + if(truncate){ + if (mv_table[xy][0] > h_range-1) mv_table[xy][0]= h_range-1; + else if(mv_table[xy][0] < -h_range ) mv_table[xy][0]= -h_range; + if (mv_table[xy][1] > v_range-1) mv_table[xy][1]= v_range-1; + else if(mv_table[xy][1] < -v_range ) mv_table[xy][1]= -v_range; + }else{ + s->mb_type[xy] &= ~type; + s->mb_type[xy] |= CANDIDATE_MB_TYPE_INTRA; + mv_table[xy][0]= + mv_table[xy][1]= 0; + } } } } diff --git a/src/libffmpeg/libavcodec/motion_est_template.c b/src/libffmpeg/libavcodec/motion_est_template.c index db51d676d..18203ec06 100644 --- a/src/libffmpeg/libavcodec/motion_est_template.c +++ b/src/libffmpeg/libavcodec/motion_est_template.c @@ -1,6 +1,6 @@ /* * Motion estimation - * Copyright (c) 2002 Michael Niedermayer + * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -22,29 +22,31 @@ * @file motion_est_template.c * Motion estimation template. */ - +//FIXME ref2_y next_pic? //lets hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...) //Note, the last line is there to kill these ugly unused var warnings -#define LOAD_COMMON(x, y)\ +#define LOAD_COMMON\ uint32_t * const score_map= s->me.score_map;\ - const int stride= s->linesize;\ - const int uvstride= s->uvlinesize;\ const int time_pp= s->pp_time;\ const int time_pb= s->pb_time;\ - uint8_t * const src_y= s->new_picture.data[0] + ((y) * stride) + (x);\ - uint8_t * const src_u= s->new_picture.data[1] + (((y)>>1) * uvstride) + ((x)>>1);\ - uint8_t * const src_v= s->new_picture.data[2] + (((y)>>1) * uvstride) + ((x)>>1);\ - uint8_t * const ref_y= ref_picture->data[0] + ((y) * stride) + (x);\ - uint8_t * const ref_u= ref_picture->data[1] + (((y)>>1) * uvstride) + ((x)>>1);\ - uint8_t * const ref_v= ref_picture->data[2] + (((y)>>1) * uvstride) + ((x)>>1);\ - uint8_t * const ref2_y= s->next_picture.data[0] + ((y) * stride) + (x);\ + const int xmin= s->me.xmin;\ + const int ymin= s->me.ymin;\ + const int xmax= s->me.xmax;\ + const int ymax= s->me.ymax;\ + uint8_t * const src_y= src_data[0];\ + uint8_t * const src_u= src_data[1];\ + uint8_t * const src_v= src_data[2];\ + uint8_t * const ref_y= ref_data[0];\ + uint8_t * const ref_u= ref_data[1];\ + uint8_t * const ref_v= ref_data[2];\ op_pixels_func (*hpel_put)[4];\ op_pixels_func (*hpel_avg)[4]= &s->dsp.avg_pixels_tab[size];\ op_pixels_func (*chroma_hpel_put)[4];\ qpel_mc_func (*qpel_put)[16];\ qpel_mc_func (*qpel_avg)[16]= &s->dsp.avg_qpel_pixels_tab[size];\ const __attribute__((unused)) int unu= time_pp + time_pb + (size_t)src_u + (size_t)src_v + (size_t)ref_u + (size_t)ref_v\ - + (size_t)ref2_y + (size_t)hpel_avg + (size_t)qpel_avg + (size_t)score_map;\ + + (size_t)hpel_avg + (size_t)qpel_avg + (size_t)score_map\ + + xmin + xmax + ymin + ymax;\ if(s->no_rounding /*FIXME b_type*/){\ hpel_put= &s->dsp.put_no_rnd_pixels_tab[size];\ chroma_hpel_put= &s->dsp.put_no_rnd_pixels_tab[size+1];\ @@ -70,9 +72,8 @@ #if 0 static int RENAME(hpel_motion_search)(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, - int xmin, int ymin, int xmax, int ymax, - int pred_x, int pred_y, Picture *ref_picture, - int n, int size, uint8_t * const mv_penalty) + int pred_x, int pred_y, uint8_t *ref_data[3], + int size, uint8_t * const mv_penalty) { const int xx = 16 * s->mb_x + 8*(n&1); const int yy = 16 * s->mb_y + 8*(n>>1); @@ -80,7 +81,7 @@ static int RENAME(hpel_motion_search)(MpegEncContext * s, const int my = *my_ptr; const int penalty_factor= s->me.sub_penalty_factor; - LOAD_COMMON(xx, yy); + LOAD_COMMON // INIT; //FIXME factorize @@ -139,19 +140,17 @@ static int RENAME(hpel_motion_search)(MpegEncContext * s, #else static int RENAME(hpel_motion_search)(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, - int xmin, int ymin, int xmax, int ymax, - int pred_x, int pred_y, Picture *ref_picture, - int n, int size, uint8_t * const mv_penalty) + int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, + int size, int h, uint8_t * const mv_penalty) { - const int xx = 16 * s->mb_x + 8*(n&1); - const int yy = 16 * s->mb_y + 8*(n>>1); const int mx = *mx_ptr; const int my = *my_ptr; const int penalty_factor= s->me.sub_penalty_factor; me_cmp_func cmp_sub, chroma_cmp_sub; int bx=2*mx, by=2*my; - LOAD_COMMON(xx, yy); + LOAD_COMMON //FIXME factorize @@ -247,20 +246,18 @@ static int RENAME(hpel_motion_search)(MpegEncContext * s, } #endif -static int RENAME(hpel_get_mb_score)(MpegEncContext * s, int mx, int my, int pred_x, int pred_y, Picture *ref_picture, +static int RENAME(hpel_get_mb_score)(MpegEncContext * s, int mx, int my, int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, uint8_t * const mv_penalty) { // const int check_luma= s->dsp.me_sub_cmp != s->dsp.mb_cmp; const int size= 0; - const int xx = 16 * s->mb_x; - const int yy = 16 * s->mb_y; + const int h= 16; const int penalty_factor= s->me.mb_penalty_factor; - const int xmin= -256*256, ymin= -256*256, xmax= 256*256, ymax= 256*256; //assume that the caller checked these - const __attribute__((unused)) int unu2= xmin + xmax +ymin + ymax; //no unused warning shit me_cmp_func cmp_sub, chroma_cmp_sub; int d; - LOAD_COMMON(xx, yy); + LOAD_COMMON //FIXME factorize @@ -295,12 +292,10 @@ static int RENAME(hpel_get_mb_score)(MpegEncContext * s, int mx, int my, int pre static int RENAME(qpel_motion_search)(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, - int xmin, int ymin, int xmax, int ymax, - int pred_x, int pred_y, Picture *ref_picture, - int n, int size, uint8_t * const mv_penalty) + int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, + int size, int h, uint8_t * const mv_penalty) { - const int xx = 16 * s->mb_x + 8*(n&1); - const int yy = 16 * s->mb_y + 8*(n>>1); const int mx = *mx_ptr; const int my = *my_ptr; const int penalty_factor= s->me.sub_penalty_factor; @@ -310,7 +305,7 @@ static int RENAME(qpel_motion_search)(MpegEncContext * s, me_cmp_func cmp, chroma_cmp; me_cmp_func cmp_sub, chroma_cmp_sub; - LOAD_COMMON(xx, yy); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; //factorize FIXME @@ -514,19 +509,17 @@ static int RENAME(qpel_motion_search)(MpegEncContext * s, return dmin; } -static int RENAME(qpel_get_mb_score)(MpegEncContext * s, int mx, int my, int pred_x, int pred_y, Picture *ref_picture, +static int RENAME(qpel_get_mb_score)(MpegEncContext * s, int mx, int my, int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, uint8_t * const mv_penalty) { const int size= 0; - const int xx = 16 * s->mb_x; - const int yy = 16 * s->mb_y; + const int h= 16; const int penalty_factor= s->me.mb_penalty_factor; - const int xmin= -256*256, ymin= -256*256, xmax= 256*256, ymax= 256*256; //assume that the caller checked these - const __attribute__((unused)) int unu2= xmin + xmax +ymin + ymax; //no unused warning shit me_cmp_func cmp_sub, chroma_cmp_sub; int d; - LOAD_COMMON(xx, yy); + LOAD_COMMON //FIXME factorize @@ -597,15 +590,16 @@ if( (y)>(ymax<<(S)) ) printf("%d %d %d %d %d ymax" #v, ymax, (x), (y), s->mb_x, static inline int RENAME(small_diamond_search)(MpegEncContext * s, int *best, int dmin, - Picture *ref_picture, + uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int const pred_x, int const pred_y, int const penalty_factor, - int const xmin, int const ymin, int const xmax, int const ymax, int const shift, - uint32_t *map, int map_generation, int size, uint8_t * const mv_penalty + int const shift, + uint32_t *map, int map_generation, int size, int h, uint8_t * const mv_penalty ) { me_cmp_func cmp, chroma_cmp; int next_dir=-1; - LOAD_COMMON(s->mb_x*16, s->mb_y*16); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; @@ -639,15 +633,16 @@ static inline int RENAME(small_diamond_search)(MpegEncContext * s, int *best, in } static inline int RENAME(funny_diamond_search)(MpegEncContext * s, int *best, int dmin, - Picture *ref_picture, + uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int const pred_x, int const pred_y, int const penalty_factor, - int const xmin, int const ymin, int const xmax, int const ymax, int const shift, - uint32_t *map, int map_generation, int size, uint8_t * const mv_penalty + int const shift, + uint32_t *map, int map_generation, int size, int h, uint8_t * const mv_penalty ) { me_cmp_func cmp, chroma_cmp; int dia_size; - LOAD_COMMON(s->mb_x*16, s->mb_y*16); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; @@ -730,17 +725,18 @@ if(256*256*256*64 % (stats[0]+1)==0){ #define MAX_SAB_SIZE 16 static inline int RENAME(sab_diamond_search)(MpegEncContext * s, int *best, int dmin, - Picture *ref_picture, + uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int const pred_x, int const pred_y, int const penalty_factor, - int const xmin, int const ymin, int const xmax, int const ymax, int const shift, - uint32_t *map, int map_generation, int size, uint8_t * const mv_penalty + int const shift, + uint32_t *map, int map_generation, int size, int h, uint8_t * const mv_penalty ) { me_cmp_func cmp, chroma_cmp; Minima minima[MAX_SAB_SIZE]; const int minima_count= ABS(s->me.dia_size); int i, j; - LOAD_COMMON(s->mb_x*16, s->mb_y*16); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; @@ -810,15 +806,16 @@ static inline int RENAME(sab_diamond_search)(MpegEncContext * s, int *best, int } static inline int RENAME(var_diamond_search)(MpegEncContext * s, int *best, int dmin, - Picture *ref_picture, + uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int const pred_x, int const pred_y, int const penalty_factor, - int const xmin, int const ymin, int const xmax, int const ymax, int const shift, - uint32_t *map, int map_generation, int size, uint8_t * const mv_penalty + int const shift, + uint32_t *map, int map_generation, int size, int h, uint8_t * const mv_penalty ) { me_cmp_func cmp, chroma_cmp; int dia_size; - LOAD_COMMON(s->mb_x*16, s->mb_y*16); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; @@ -886,10 +883,10 @@ if(256*256*256*64 % (stats[0]+1)==0){ return dmin; } -static int RENAME(epzs_motion_search)(MpegEncContext * s, int block, +static int RENAME(epzs_motion_search)(MpegEncContext * s, int *mx_ptr, int *my_ptr, - int P[10][2], int pred_x, int pred_y, - int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2], + int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int16_t (*last_mv)[2], int ref_mv_scale, uint8_t * const mv_penalty) { int best[2]={0, 0}; @@ -899,10 +896,11 @@ static int RENAME(epzs_motion_search)(MpegEncContext * s, int block, int map_generation; const int penalty_factor= s->me.penalty_factor; const int size=0; - const int ref_mv_stride= s->mb_stride; - const int ref_mv_xy= s->mb_x + s->mb_y*ref_mv_stride; + const int h=16; + const int ref_mv_stride= s->mb_stride; //pass as arg FIXME + const int ref_mv_xy= s->mb_x + s->mb_y*ref_mv_stride; //add to last_mv beforepassing FIXME me_cmp_func cmp, chroma_cmp; - LOAD_COMMON(s->mb_x*16, s->mb_y*16); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; @@ -973,21 +971,21 @@ static int RENAME(epzs_motion_search)(MpegEncContext * s, int block, //check(best[0],best[1],0, b0) if(s->me.dia_size==-1) - dmin= RENAME(funny_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(funny_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); else if(s->me.dia_size<-1) - dmin= RENAME(sab_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(sab_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); else if(s->me.dia_size<2) - dmin= RENAME(small_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(small_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); else - dmin= RENAME(var_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(var_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); //check(best[0],best[1],0, b1) *mx_ptr= best[0]; @@ -998,10 +996,11 @@ static int RENAME(epzs_motion_search)(MpegEncContext * s, int block, } #ifndef CMP_DIRECT /* no 4mv search needed in direct mode */ -static int RENAME(epzs_motion_search4)(MpegEncContext * s, int block, +static int RENAME(epzs_motion_search4)(MpegEncContext * s, int *mx_ptr, int *my_ptr, int P[10][2], int pred_x, int pred_y, - int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2], + uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int16_t (*last_mv)[2], int ref_mv_scale, uint8_t * const mv_penalty) { int best[2]={0, 0}; @@ -1011,10 +1010,11 @@ static int RENAME(epzs_motion_search4)(MpegEncContext * s, int block, int map_generation; const int penalty_factor= s->me.penalty_factor; const int size=1; + const int h=8; const int ref_mv_stride= s->mb_stride; const int ref_mv_xy= s->mb_x + s->mb_y *ref_mv_stride; me_cmp_func cmp, chroma_cmp; - LOAD_COMMON((s->mb_x*2 + (block&1))*8, (s->mb_y*2 + (block>>1))*8); + LOAD_COMMON cmp= s->dsp.me_cmp[size]; chroma_cmp= s->dsp.me_cmp[size+1]; @@ -1024,7 +1024,7 @@ static int RENAME(epzs_motion_search4)(MpegEncContext * s, int block, dmin = 1000000; //printf("%d %d %d %d //",xmin, ymin, xmax, ymax); /* first line */ - if (s->mb_y == 0 && block<2) { + if (s->mb_y == 0/* && block<2*/) { CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) @@ -1049,21 +1049,100 @@ static int RENAME(epzs_motion_search4)(MpegEncContext * s, int block, } if(s->me.dia_size==-1) - dmin= RENAME(funny_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(funny_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); else if(s->me.dia_size<-1) - dmin= RENAME(sab_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(sab_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); else if(s->me.dia_size<2) - dmin= RENAME(small_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(small_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); else - dmin= RENAME(var_diamond_search)(s, best, dmin, ref_picture, - pred_x, pred_y, penalty_factor, xmin, ymin, xmax, ymax, - shift, map, map_generation, size, mv_penalty); + dmin= RENAME(var_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); + + + *mx_ptr= best[0]; + *my_ptr= best[1]; + +// printf("%d %d %d \n", best[0], best[1], dmin); + return dmin; +} + +//try to merge with above FIXME (needs PSNR test) +static int RENAME(epzs_motion_search2)(MpegEncContext * s, + int *mx_ptr, int *my_ptr, + int P[10][2], int pred_x, int pred_y, + uint8_t *src_data[3], + uint8_t *ref_data[3], int stride, int uvstride, int16_t (*last_mv)[2], + int ref_mv_scale, uint8_t * const mv_penalty) +{ + int best[2]={0, 0}; + int d, dmin; + const int shift= 1+s->quarter_sample; + uint32_t *map= s->me.map; + int map_generation; + const int penalty_factor= s->me.penalty_factor; + const int size=0; //FIXME pass as arg + const int h=8; + const int ref_mv_stride= s->mb_stride; + const int ref_mv_xy= s->mb_x + s->mb_y *ref_mv_stride; + me_cmp_func cmp, chroma_cmp; + LOAD_COMMON + + cmp= s->dsp.me_cmp[size]; + chroma_cmp= s->dsp.me_cmp[size+1]; + + map_generation= update_map_generation(s); + + dmin = 1000000; +//printf("%d %d %d %d //",xmin, ymin, xmax, ymax); + /* first line */ + if (s->mb_y == 0) { + CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) + CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, + (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) + CHECK_MV(P_MV1[0]>>shift, P_MV1[1]>>shift) + }else{ + CHECK_MV(P_MV1[0]>>shift, P_MV1[1]>>shift) + //FIXME try some early stop + if(dmin>64*2){ + CHECK_MV(P_MEDIAN[0]>>shift, P_MEDIAN[1]>>shift) + CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) + CHECK_MV(P_TOP[0]>>shift, P_TOP[1]>>shift) + CHECK_MV(P_TOPRIGHT[0]>>shift, P_TOPRIGHT[1]>>shift) + CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, + (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) + } + } + if(dmin>64*4){ + CHECK_CLIPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16, + (last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16) + CHECK_CLIPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16, + (last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16) + } + + if(s->me.dia_size==-1) + dmin= RENAME(funny_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); + else if(s->me.dia_size<-1) + dmin= RENAME(sab_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); + else if(s->me.dia_size<2) + dmin= RENAME(small_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); + else + dmin= RENAME(var_diamond_search)(s, best, dmin, src_data, ref_data, stride, uvstride, + pred_x, pred_y, penalty_factor, + shift, map, map_generation, size, h, mv_penalty); + *mx_ptr= best[0]; *my_ptr= best[1]; diff --git a/src/libffmpeg/libavcodec/mpeg12.c b/src/libffmpeg/libavcodec/mpeg12.c index 7cf6a21df..aecfd35f1 100644 --- a/src/libffmpeg/libavcodec/mpeg12.c +++ b/src/libffmpeg/libavcodec/mpeg12.c @@ -1,6 +1,7 @@ /* * MPEG1 codec / MPEG2 decoder * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -29,6 +30,9 @@ #include "mpeg12data.h" +//#undef NDEBUG +//#include <assert.h> + /* Start codes. */ #define SEQ_END_CODE 0x000001b7 @@ -54,7 +58,6 @@ static void mpeg1_encode_block(MpegEncContext *s, int component); static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); // RAL: f_code parameter added #endif //CONFIG_ENCODERS -static void mpeg1_skip_picture(MpegEncContext *s, int pict_num); static inline int mpeg1_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n); @@ -68,10 +71,13 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n); static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred); +static void exchange_uv(MpegEncContext *s); #ifdef HAVE_XVMC extern int XVMC_field_start(MpegEncContext *s, AVCodecContext *avctx); extern int XVMC_field_end(MpegEncContext *s); +extern void XVMC_pack_pblocks(MpegEncContext *s,int cbp); +extern void XVMC_init_block(MpegEncContext *s);//set s->block #endif #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) @@ -175,6 +181,46 @@ static void init_uni_ac_vlc(RLTable *rl, uint32_t *uni_ac_vlc_bits, uint8_t *uni } } + +static int find_frame_rate_index(MpegEncContext *s){ + int i; + int64_t dmin= INT64_MAX; + int64_t d; + + for(i=1;i<14;i++) { + if(s->avctx->strict_std_compliance >= 0 && i>=9) break; + + d = ABS(MPEG1_FRAME_RATE_BASE*(int64_t)s->avctx->frame_rate - frame_rate_tab[i]*(int64_t)s->avctx->frame_rate_base); + if(d < dmin){ + dmin=d; + s->frame_rate_index= i; + } + } + if(dmin) + return -1; + else + return 0; +} + +static int encode_init(AVCodecContext *avctx) +{ + MpegEncContext *s = avctx->priv_data; + + if(MPV_encode_init(avctx) < 0) + return -1; + + if(find_frame_rate_index(s) < 0){ + if(s->strict_std_compliance >=0){ + av_log(avctx, AV_LOG_ERROR, "MPEG1/2 doesnt support %d/%d fps\n", avctx->frame_rate, avctx->frame_rate_base); + return -1; + }else{ + av_log(avctx, AV_LOG_INFO, "MPEG1/2 doesnt support %d/%d fps, there may be AV sync issues\n", avctx->frame_rate, avctx->frame_rate_base); + } + } + + return 0; +} + static void put_header(MpegEncContext *s, int header) { align_put_bits(&s->pb); @@ -198,22 +244,6 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) if (s->current_picture.key_frame) { /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); - - /* search closest frame rate */ - { - int i, dmin, d; - s->frame_rate_index = 0; - dmin = 0x7fffffff; - for(i=1;i<14;i++) { - if(s->avctx->strict_std_compliance >= 0 && i>=9) break; - - d = abs(MPEG1_FRAME_RATE_BASE*(int64_t)s->avctx->frame_rate/s->avctx->frame_rate_base - frame_rate_tab[i]); - if (d < dmin) { - dmin = d; - s->frame_rate_index = i; - } - } - } put_bits(&s->pb, 12, s->width); put_bits(&s->pb, 12, s->height); @@ -221,7 +251,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) for(i=1; i<15; i++){ float error= aspect_ratio; if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1) - error-= mpeg1_aspect[i]; + error-= 1.0/mpeg1_aspect[i]; else error-= av_q2d(mpeg2_aspect[i])*s->height/s->width; @@ -275,7 +305,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) put_bits(&s->pb, 1, 0); //esc put_bits(&s->pb, 3, 4); //profile put_bits(&s->pb, 4, 8); //level - put_bits(&s->pb, 1, s->progressive_sequence=1); + put_bits(&s->pb, 1, s->progressive_sequence); put_bits(&s->pb, 2, 1); //chroma format 4:2:0 put_bits(&s->pb, 2, 0); //horizontal size ext put_bits(&s->pb, 2, 0); //vertical size ext @@ -291,30 +321,18 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) put_bits(&s->pb, 1, 0); /* do drop frame */ /* time code : we must convert from the real frame rate to a fake mpeg frame rate in case of low frame rate */ - fps = frame_rate_tab[s->frame_rate_index]; - time_code = (int64_t)s->fake_picture_number * MPEG1_FRAME_RATE_BASE; - s->gop_picture_number = s->fake_picture_number; + fps = (frame_rate_tab[s->frame_rate_index] + MPEG1_FRAME_RATE_BASE/2)/ MPEG1_FRAME_RATE_BASE; + time_code = s->current_picture_ptr->coded_picture_number; + + s->gop_picture_number = time_code; put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24)); put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60)); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); - put_bits(&s->pb, 6, (uint32_t)((time_code % fps) / MPEG1_FRAME_RATE_BASE)); - put_bits(&s->pb, 1, 0); /* closed gop */ + put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); + put_bits(&s->pb, 1, !!(s->flags & CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); /* broken link */ } - - if (s->avctx->frame_rate < (24 * s->avctx->frame_rate_base) && s->picture_number > 0) { - /* insert empty P pictures to slow down to the desired - frame rate. Each fake pictures takes about 20 bytes */ - fps = frame_rate_tab[s->frame_rate_index]; - n = av_rescale((int64_t)s->picture_number * s->avctx->frame_rate_base, fps, s->avctx->frame_rate) / MPEG1_FRAME_RATE_BASE - 1; - while (s->fake_picture_number < n) { - mpeg1_skip_picture(s, s->fake_picture_number - - s->gop_picture_number); - s->fake_picture_number++; - } - - } } static inline void encode_mb_skip_run(MpegEncContext *s, int run){ @@ -325,49 +343,6 @@ static inline void encode_mb_skip_run(MpegEncContext *s, int run){ put_bits(&s->pb, mbAddrIncrTable[run][1], mbAddrIncrTable[run][0]); } - -/* insert a fake P picture */ -static void mpeg1_skip_picture(MpegEncContext *s, int pict_num) -{ - assert(s->codec_id == CODEC_ID_MPEG1VIDEO); // mpeg2 can do these repeat things - - /* mpeg1 picture header */ - put_header(s, PICTURE_START_CODE); - /* temporal reference */ - put_bits(&s->pb, 10, pict_num & 0x3ff); - - put_bits(&s->pb, 3, P_TYPE); - put_bits(&s->pb, 16, 0xffff); /* non constant bit rate */ - - put_bits(&s->pb, 1, 1); /* integer coordinates */ - put_bits(&s->pb, 3, 1); /* forward_f_code */ - - put_bits(&s->pb, 1, 0); /* extra bit picture */ - - /* only one slice */ - put_header(s, SLICE_MIN_START_CODE); - put_bits(&s->pb, 5, 1); /* quantizer scale */ - put_bits(&s->pb, 1, 0); /* slice extra information */ - - encode_mb_skip_run(s, 0); - - /* empty macroblock */ - put_bits(&s->pb, 3, 1); /* motion only */ - - /* zero motion x & y */ - put_bits(&s->pb, 1, 1); - put_bits(&s->pb, 1, 1); - - /* output a number of empty slice */ - encode_mb_skip_run(s, s->mb_width * s->mb_height - 2); - - /* empty macroblock */ - put_bits(&s->pb, 3, 1); /* motion only */ - - /* zero motion x & y */ - put_bits(&s->pb, 1, 1); - put_bits(&s->pb, 1, 1); -} #endif //CONFIG_ENCODERS static void common_init(MpegEncContext *s) @@ -402,10 +377,10 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) // RAL: s->picture_number instead of s->fake_picture_number put_bits(&s->pb, 10, (s->picture_number - s->gop_picture_number) & 0x3ff); - s->fake_picture_number++; - put_bits(&s->pb, 3, s->pict_type); - put_bits(&s->pb, 16, 0xffff); /* non constant bit rate */ + + s->vbv_delay_ptr= s->pb.buf + get_bit_count(&s->pb)/8; + put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ // RAL: Forward f_code also needed for B frames if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { @@ -426,7 +401,8 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) } put_bits(&s->pb, 1, 0); /* extra bit picture */ - + + s->frame_pred_frame_dct = 1; if(s->codec_id == CODEC_ID_MPEG2VIDEO){ put_header(s, EXT_START_CODE); put_bits(&s->pb, 4, 8); //pic ext @@ -444,22 +420,50 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) } put_bits(&s->pb, 2, s->intra_dc_precision); put_bits(&s->pb, 2, s->picture_structure= PICT_FRAME); - put_bits(&s->pb, 1, s->top_field_first); - put_bits(&s->pb, 1, s->frame_pred_frame_dct= 1); + if (s->progressive_sequence) { + put_bits(&s->pb, 1, 0); /* no repeat */ + } else { + put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first); + } + /* XXX: optimize the generation of this flag with entropy + measures */ + s->frame_pred_frame_dct = s->progressive_sequence; + + put_bits(&s->pb, 1, s->frame_pred_frame_dct); put_bits(&s->pb, 1, s->concealment_motion_vectors); put_bits(&s->pb, 1, s->q_scale_type); put_bits(&s->pb, 1, s->intra_vlc_format); put_bits(&s->pb, 1, s->alternate_scan); put_bits(&s->pb, 1, s->repeat_first_field); put_bits(&s->pb, 1, s->chroma_420_type=1); - put_bits(&s->pb, 1, s->progressive_frame=1); + s->progressive_frame = s->progressive_sequence; + put_bits(&s->pb, 1, s->progressive_frame); put_bits(&s->pb, 1, 0); //composite_display_flag } + if(s->flags & CODEC_FLAG_SVCD_SCAN_OFFSET){ + int i; + + put_header(s, USER_START_CODE); + for(i=0; i<sizeof(svcd_scan_offset_placeholder); i++){ + put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]); + } + } s->mb_y=0; ff_mpeg1_encode_slice_header(s); } +static inline void put_mb_modes(MpegEncContext *s, int n, int bits, + int has_mv, int field_motion) +{ + put_bits(&s->pb, n, bits); + if (!s->frame_pred_frame_dct) { + if (has_mv) + put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */ + put_bits(&s->pb, 1, s->interlaced_dct); + } +} + void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y) @@ -475,9 +479,9 @@ void mpeg1_encode_mb(MpegEncContext *s, if (s->block_last_index[i] >= 0) cbp |= 1 << (5 - i); } - + if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && - ((s->pict_type == P_TYPE && (motion_x | motion_y) == 0) || + ((s->pict_type == P_TYPE && s->mv_type == MV_TYPE_16X16 && (motion_x | motion_y) == 0) || (s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { s->mb_skip_run++; @@ -485,6 +489,10 @@ void mpeg1_encode_mb(MpegEncContext *s, s->skip_count++; s->misc_bits++; s->last_bits++; + if(s->pict_type == P_TYPE){ + s->last_mv[0][1][0]= s->last_mv[0][0][0]= + s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; + } } else { if(first_mb){ assert(s->mb_skip_run == 0); @@ -495,145 +503,167 @@ void mpeg1_encode_mb(MpegEncContext *s, if (s->pict_type == I_TYPE) { if(s->dquant && cbp){ - put_bits(&s->pb, 2, 1); /* macroblock_type : macroblock_quant = 1 */ + put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */ put_bits(&s->pb, 5, s->qscale); }else{ - put_bits(&s->pb, 1, 1); /* macroblock_type : macroblock_quant = 0 */ + put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */ s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; } else if (s->mb_intra) { if(s->dquant && cbp){ - put_bits(&s->pb, 6, 0x01); + put_mb_modes(s, 6, 0x01, 0, 0); put_bits(&s->pb, 5, s->qscale); }else{ - put_bits(&s->pb, 5, 0x03); + put_mb_modes(s, 5, 0x03, 0, 0); s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; - s->last_mv[0][0][0] = - s->last_mv[0][0][1] = 0; + memset(s->last_mv, 0, sizeof(s->last_mv)); } else if (s->pict_type == P_TYPE) { + if(s->mv_type == MV_TYPE_16X16){ if (cbp != 0) { - if (motion_x == 0 && motion_y == 0) { + if ((motion_x|motion_y) == 0) { if(s->dquant){ - put_bits(&s->pb, 5, 1); /* macroblock_pattern & quant */ + put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */ put_bits(&s->pb, 5, s->qscale); }else{ - put_bits(&s->pb, 2, 1); /* macroblock_pattern only */ + put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */ } s->misc_bits+= get_bits_diff(s); - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); } else { if(s->dquant){ - put_bits(&s->pb, 5, 2); /* motion + cbp */ + put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */ put_bits(&s->pb, 5, s->qscale); }else{ - put_bits(&s->pb, 1, 1); /* motion + cbp */ + put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */ } s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added s->mv_bits+= get_bits_diff(s); - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); } } else { put_bits(&s->pb, 3, 1); /* motion only */ + if (!s->frame_pred_frame_dct) + put_bits(&s->pb, 2, 2); /* motion_type: frame */ + s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added s->qscale -= s->dquant; s->mv_bits+= get_bits_diff(s); } - s->f_count++; - } else - { // RAL: All the following bloc added for B frames: - if (cbp != 0) - { // With coded bloc pattern - if (s->mv_dir == (MV_DIR_FORWARD | MV_DIR_BACKWARD)) - { // Bi-directional motion - if (s->dquant) - { // With QScale - put_bits(&s->pb, 5, 2); - put_bits(&s->pb, 5, s->qscale); - } - else // Without QScale - put_bits(&s->pb, 2, 3); - s->misc_bits += get_bits_diff(s); - mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); - mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); - mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); - mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); - s->b_count++; - s->f_count++; - s->mv_bits += get_bits_diff(s); - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); - } - else if (s->mv_dir == MV_DIR_BACKWARD) - { // Backward motion - if (s->dquant) - { // With QScale - put_bits(&s->pb, 6, 2); - put_bits(&s->pb, 5, s->qscale); - } - else // Without QScale - put_bits(&s->pb, 3, 3); - s->misc_bits += get_bits_diff(s); - mpeg1_encode_motion(s, motion_x - s->last_mv[1][0][0], s->b_code); - mpeg1_encode_motion(s, motion_y - s->last_mv[1][0][1], s->b_code); - s->b_count++; - s->mv_bits += get_bits_diff(s); - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); - } - else if (s->mv_dir == MV_DIR_FORWARD) - { // Forward motion - if (s->dquant) - { // With QScale - put_bits(&s->pb, 6, 3); - put_bits(&s->pb, 5, s->qscale); - } - else // Without QScale - put_bits(&s->pb, 4, 3); - s->misc_bits += get_bits_diff(s); - mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); - mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); - s->f_count++; - s->mv_bits += get_bits_diff(s); - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); - } + s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x; + s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y; + }else{ + assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD); + + if (cbp) { + if(s->dquant){ + put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */ + put_bits(&s->pb, 5, s->qscale); + }else{ + put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */ } - else - { // No coded bloc pattern - if (s->mv_dir == (MV_DIR_FORWARD | MV_DIR_BACKWARD)) - { // Bi-directional motion - put_bits(&s->pb, 2, 2); /* backward & forward motion */ - mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); - mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); - mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); - mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); - s->b_count++; - s->f_count++; - } - else if (s->mv_dir == MV_DIR_BACKWARD) - { // Backward motion - put_bits(&s->pb, 3, 2); /* backward motion only */ - mpeg1_encode_motion(s, motion_x - s->last_mv[1][0][0], s->b_code); - mpeg1_encode_motion(s, motion_y - s->last_mv[1][0][1], s->b_code); - s->b_count++; - } - else if (s->mv_dir == MV_DIR_FORWARD) - { // Forward motion - put_bits(&s->pb, 4, 2); /* forward motion only */ - mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); - mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); - s->f_count++; - } + } else { + put_bits(&s->pb, 3, 1); /* motion only */ + put_bits(&s->pb, 2, 1); /* motion_type: field */ s->qscale -= s->dquant; - s->mv_bits += get_bits_diff(s); + } + s->misc_bits+= get_bits_diff(s); + for(i=0; i<2; i++){ + put_bits(&s->pb, 1, s->field_select[0][i]); + mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); + mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); + s->last_mv[0][i][0]= s->mv[0][i][0]; + s->last_mv[0][i][1]= 2*s->mv[0][i][1]; + } + s->mv_bits+= get_bits_diff(s); + } + if(cbp) + put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); + s->f_count++; + } else{ + static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi + + if(s->mv_type == MV_TYPE_16X16){ + if (cbp){ // With coded bloc pattern + if (s->dquant) { + if(s->mv_dir == MV_DIR_FORWARD) + put_mb_modes(s, 6, 3, 1, 0); + else + put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 0); + put_bits(&s->pb, 5, s->qscale); + } else { + put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 0); + } + }else{ // No coded bloc pattern + put_bits(&s->pb, mb_type_len[s->mv_dir], 2); + if (!s->frame_pred_frame_dct) + put_bits(&s->pb, 2, 2); /* motion_type: frame */ + s->qscale -= s->dquant; + } + s->misc_bits += get_bits_diff(s); + if (s->mv_dir&MV_DIR_FORWARD){ + mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); + mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); + s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0]; + s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1]; + s->f_count++; + } + if (s->mv_dir&MV_DIR_BACKWARD){ + mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); + mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); + s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0]; + s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1]; + s->b_count++; + } + }else{ + assert(s->mv_type == MV_TYPE_FIELD); + assert(!s->frame_pred_frame_dct); + if (cbp){ // With coded bloc pattern + if (s->dquant) { + if(s->mv_dir == MV_DIR_FORWARD) + put_mb_modes(s, 6, 3, 1, 1); + else + put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 1); + put_bits(&s->pb, 5, s->qscale); + } else { + put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 1); } - // End of bloc from RAL + }else{ // No coded bloc pattern + put_bits(&s->pb, mb_type_len[s->mv_dir], 2); + put_bits(&s->pb, 2, 1); /* motion_type: field */ + s->qscale -= s->dquant; + } + s->misc_bits += get_bits_diff(s); + if (s->mv_dir&MV_DIR_FORWARD){ + for(i=0; i<2; i++){ + put_bits(&s->pb, 1, s->field_select[0][i]); + mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code); + mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code); + s->last_mv[0][i][0]= s->mv[0][i][0]; + s->last_mv[0][i][1]= 2*s->mv[0][i][1]; + } + s->f_count++; + } + if (s->mv_dir&MV_DIR_BACKWARD){ + for(i=0; i<2; i++){ + put_bits(&s->pb, 1, s->field_select[1][i]); + mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code); + mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code); + s->last_mv[1][i][0]= s->mv[1][i][0]; + s->last_mv[1][i][1]= 2*s->mv[1][i][1]; + } + s->b_count++; + } } + s->mv_bits += get_bits_diff(s); + if(cbp) + put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); + } for(i=0;i<6;i++) { if (cbp & (1 << (5 - i))) { mpeg1_encode_block(s, block[i], i); @@ -645,18 +675,6 @@ void mpeg1_encode_mb(MpegEncContext *s, else s->p_tex_bits+= get_bits_diff(s); } - - // RAL: By this: - if (s->mv_dir & MV_DIR_FORWARD) - { - s->last_mv[0][0][0]= s->mv[0][0][0]; - s->last_mv[0][0][1]= s->mv[0][0][1]; - } - if (s->mv_dir & MV_DIR_BACKWARD) - { - s->last_mv[1][0][0]= s->mv[1][0][0]; - s->last_mv[1][0][1]= s->mv[1][0][1]; - } } // RAL: Parameter added: f_or_b_code @@ -965,14 +983,11 @@ static inline int get_dmv(MpegEncContext *s) static inline int get_qscale(MpegEncContext *s) { int qscale = get_bits(&s->gb, 5); - if (s->codec_id == CODEC_ID_MPEG2VIDEO) { - if (s->q_scale_type) { - return non_linear_qscale[qscale]; - } else { - return qscale << 1; - } + if (s->q_scale_type) { + return non_linear_qscale[qscale]; + } else { + return qscale << 1; } - return qscale; } /* motion type (for mpeg2) */ @@ -992,7 +1007,7 @@ static int mpeg_decode_mb(MpegEncContext *s, if (s->mb_skip_run-- != 0) { if(s->pict_type == I_TYPE){ - fprintf(stderr, "skiped MB in I frame at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "skiped MB in I frame at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -1032,7 +1047,7 @@ static int mpeg_decode_mb(MpegEncContext *s, case I_TYPE: if (get_bits1(&s->gb) == 0) { if (get_bits1(&s->gb) == 0){ - fprintf(stderr, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y); return -1; } mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA; @@ -1043,7 +1058,7 @@ static int mpeg_decode_mb(MpegEncContext *s, case P_TYPE: mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1); if (mb_type < 0){ - fprintf(stderr, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y); return -1; } mb_type = ptype2mb_type[ mb_type ]; @@ -1051,7 +1066,7 @@ static int mpeg_decode_mb(MpegEncContext *s, case B_TYPE: mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1); if (mb_type < 0){ - fprintf(stderr, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y); return -1; } mb_type = btype2mb_type[ mb_type ]; @@ -1083,21 +1098,30 @@ static int mpeg_decode_mb(MpegEncContext *s, }else memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */ s->mb_intra = 1; +#ifdef HAVE_XVMC + //one 1 we memcpy blocks in xvmcvideo + if(s->avctx->xvmc_acceleration > 1){ + XVMC_pack_pblocks(s,-1);//inter are always full blocks + if(s->swap_uv){ + exchange_uv(s); + } + } +#endif if (s->codec_id == CODEC_ID_MPEG2VIDEO) { for(i=0;i<6;i++) { - if (mpeg2_decode_block_intra(s, block[i], i) < 0) + if (mpeg2_decode_block_intra(s, s->pblocks[i], i) < 0) return -1; } } else { for(i=0;i<6;i++) { - if (mpeg1_decode_block_intra(s, block[i], i) < 0) + if (mpeg1_decode_block_intra(s, s->pblocks[i], i) < 0) return -1; } } } else { if (mb_type & MB_TYPE_ZERO_MV){ - assert(mb_type & MB_TYPE_PAT); + assert(mb_type & MB_TYPE_CBP); /* compute dct type */ if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var? @@ -1128,7 +1152,7 @@ static int mpeg_decode_mb(MpegEncContext *s, /* compute dct type */ if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var? - !s->frame_pred_frame_dct && IS_PAT(mb_type)) { + !s->frame_pred_frame_dct && HAS_CBP(mb_type)) { s->interlaced_dct = get_bits1(&s->gb); } @@ -1158,7 +1182,7 @@ static int mpeg_decode_mb(MpegEncContext *s, } } else { /* MT_16X8 */ - mb_type |= MB_TYPE_16x8; + mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; s->mv_type = MV_TYPE_16X8; for(j=0;j<2;j++) { s->field_select[i][j] = get_bits1(&s->gb); @@ -1189,7 +1213,7 @@ static int mpeg_decode_mb(MpegEncContext *s, dprintf("fmy=%d\n", val); } } else { - mb_type |= MB_TYPE_16x16; + mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; s->field_select[i][0] = get_bits1(&s->gb); for(k=0;k<2;k++) { val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], @@ -1247,6 +1271,9 @@ static int mpeg_decode_mb(MpegEncContext *s, } } break; + default: + av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y); + return -1; } } } @@ -1254,18 +1281,28 @@ static int mpeg_decode_mb(MpegEncContext *s, s->mb_intra = 0; - if (IS_PAT(mb_type)) { + if (HAS_CBP(mb_type)) { cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1); if (cbp < 0){ - fprintf(stderr, "invalid cbp at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y); return -1; } cbp++; +#ifdef HAVE_XVMC + //on 1 we memcpy blocks in xvmcvideo + if(s->avctx->xvmc_acceleration > 1){ + XVMC_pack_pblocks(s,cbp); + if(s->swap_uv){ + exchange_uv(s); + } + } +#endif + if (s->codec_id == CODEC_ID_MPEG2VIDEO) { for(i=0;i<6;i++) { if (cbp & 32) { - if (mpeg2_decode_block_non_intra(s, block[i], i) < 0) + if (mpeg2_decode_block_non_intra(s, s->pblocks[i], i) < 0) return -1; } else { s->block_last_index[i] = -1; @@ -1275,7 +1312,7 @@ static int mpeg_decode_mb(MpegEncContext *s, } else { for(i=0;i<6;i++) { if (cbp & 32) { - if (mpeg1_decode_block_inter(s, block[i], i) < 0) + if (mpeg1_decode_block_inter(s, s->pblocks[i], i) < 0) return -1; } else { s->block_last_index[i] = -1; @@ -1335,7 +1372,7 @@ static inline int decode_dc(GetBitContext *gb, int component) code = get_vlc2(gb, dc_chroma_vlc.table, DC_VLC_BITS, 2); } if (code < 0){ - fprintf(stderr, "invalid dc code at\n"); + av_log(NULL, AV_LOG_ERROR, "invalid dc code at\n"); return 0xffff; } if (code == 0) { @@ -1380,7 +1417,7 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, } else if(level != 0) { i += run; j = scantable[i]; - level= (level*qscale*quant_matrix[j])>>3; + level= (level*qscale*quant_matrix[j])>>4; level= (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); @@ -1398,16 +1435,16 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, j = scantable[i]; if(level<0){ level= -level; - level= (level*qscale*quant_matrix[j])>>3; + level= (level*qscale*quant_matrix[j])>>4; level= (level-1)|1; level= -level; }else{ - level= (level*qscale*quant_matrix[j])>>3; + level= (level*qscale*quant_matrix[j])>>4; level= (level-1)|1; } } if (i > 63){ - fprintf(stderr, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -1438,7 +1475,7 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s, v= SHOW_UBITS(re, &s->gb, 2); if (v & 2) { LAST_SKIP_BITS(re, &s->gb, 2); - level= (3*qscale*quant_matrix[0])>>4; + level= (3*qscale*quant_matrix[0])>>5; level= (level-1)|1; if(v&1) level= -level; @@ -1456,7 +1493,7 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s, } else if(level != 0) { i += run; j = scantable[i]; - level= ((level*2+1)*qscale*quant_matrix[j])>>4; + level= ((level*2+1)*qscale*quant_matrix[j])>>5; level= (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); @@ -1474,16 +1511,16 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s, j = scantable[i]; if(level<0){ level= -level; - level= ((level*2+1)*qscale*quant_matrix[j])>>4; + level= ((level*2+1)*qscale*quant_matrix[j])>>5; level= (level-1)|1; level= -level; }else{ - level= ((level*2+1)*qscale*quant_matrix[j])>>4; + level= ((level*2+1)*qscale*quant_matrix[j])>>5; level= (level-1)|1; } } if (i > 63){ - fprintf(stderr, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -1561,7 +1598,7 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, } } if (i > 63){ - fprintf(stderr, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -1641,7 +1678,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, } } if (i > 63){ - fprintf(stderr, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } @@ -1667,7 +1704,9 @@ static int mpeg_decode_init(AVCodecContext *avctx) { Mpeg1Context *s = avctx->priv_data; + s->mpeg_enc_ctx.avctx= avctx; s->mpeg_enc_ctx.flags= avctx->flags; + s->mpeg_enc_ctx.flags2= avctx->flags2; common_init(&s->mpeg_enc_ctx); init_vlcs(); @@ -1707,15 +1746,14 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; - int ref, f_code; + int ref, f_code, vbv_delay; init_get_bits(&s->gb, buf, buf_size*8); ref = get_bits(&s->gb, 10); /* temporal ref */ s->pict_type = get_bits(&s->gb, 3); - dprintf("pict_type=%d number=%d\n", s->pict_type, s->picture_number); - skip_bits(&s->gb, 16); + vbv_delay= get_bits(&s->gb, 16); if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { s->full_pel[0] = get_bits1(&s->gb); f_code = get_bits(&s->gb, 3); @@ -1735,6 +1773,9 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == I_TYPE; +// if(avctx->debug & FF_DEBUG_PICT_INFO) +// av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d\n", vbv_delay, ref); + s->y_dc_scale = 8; s->c_dc_scale = 8; s->first_slice = 1; @@ -1744,7 +1785,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, static void mpeg_decode_sequence_extension(MpegEncContext *s) { int horiz_size_ext, vert_size_ext; - int bit_rate_ext, vbv_buf_ext; + int bit_rate_ext; int frame_rate_ext_n, frame_rate_ext_d; int level, profile; @@ -1760,7 +1801,7 @@ static void mpeg_decode_sequence_extension(MpegEncContext *s) bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */ s->bit_rate = ((s->bit_rate / 400) | (bit_rate_ext << 12)) * 400; skip_bits1(&s->gb); /* marker */ - vbv_buf_ext = get_bits(&s->gb, 8); + s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10; s->low_delay = get_bits1(&s->gb); if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; @@ -1789,7 +1830,8 @@ static void mpeg_decode_sequence_extension(MpegEncContext *s) } if(s->avctx->debug & FF_DEBUG_PICT_INFO) - printf("profile: %d, level: %d \n", profile, level); + av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n", + profile, level, s->avctx->rc_buffer_size, s->bit_rate); } static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) @@ -1820,7 +1862,7 @@ static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) ); if(s->avctx->debug & FF_DEBUG_PICT_INFO) - printf("sde w:%d, h:%d\n", w, h); + av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h); } static void mpeg_decode_picture_display_extension(Mpeg1Context *s1) @@ -1836,7 +1878,7 @@ static void mpeg_decode_picture_display_extension(Mpeg1Context *s1) } if(s->avctx->debug & FF_DEBUG_PICT_INFO) - printf("pde (%d,%d) (%d,%d) (%d,%d)\n", + av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n", s1->pan_scan.position[0][0], s1->pan_scan.position[0][1], s1->pan_scan.position[1][0], s1->pan_scan.position[1][1], s1->pan_scan.position[2][0], s1->pan_scan.position[2][1] @@ -1899,7 +1941,7 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s) s->repeat_first_field = get_bits1(&s->gb); s->chroma_420_type = get_bits1(&s->gb); s->progressive_frame = get_bits1(&s->gb); - + if(s->picture_structure == PICT_FRAME) s->first_field=0; else{ @@ -1910,13 +1952,9 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s) if(s->alternate_scan){ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); - ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); - ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); }else{ ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); - ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); - ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } /* composite display not parsed */ @@ -1960,10 +1998,12 @@ static void mpeg_decode_extension(AVCodecContext *avctx, } } -static void exchange_uv(AVFrame *f){ - uint8_t *t= f->data[1]; - f->data[1]= f->data[2]; - f->data[2]= t; +static void exchange_uv(MpegEncContext *s){ +short * tmp; + + tmp = s->pblocks[4]; + s->pblocks[4] = s->pblocks[5]; + s->pblocks[5] = tmp; } #define DECODE_SLICE_FATAL_ERROR -2 @@ -1991,7 +2031,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx, start_code = (start_code - 1) & 0xff; if (start_code >= s->mb_height){ - fprintf(stderr, "slice below image (%d >= %d)\n", start_code, s->mb_height); + av_log(s->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", start_code, s->mb_height); return -1; } @@ -2020,22 +2060,11 @@ static int mpeg_decode_slice(AVCodecContext *avctx, } *s->current_picture_ptr->pan_scan= s1->pan_scan; - - //printf("%d\n", s->current_picture_ptr->repeat_pict); - - if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("qp:%d fc:%2d%2d%2d%2d %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n", - s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], - s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")), - s->progressive_sequence ? "pro" :"", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", - s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, - s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); - } }else{ //second field int i; if(!s->current_picture_ptr){ - fprintf(stderr, "first field missing\n"); + av_log(s->avctx, AV_LOG_ERROR, "first field missing\n"); return -1; } @@ -2053,13 +2082,24 @@ static int mpeg_decode_slice(AVCodecContext *avctx, XVMC_field_start(s,avctx); #endif }//fi(s->first_slice) - s->first_slice = 0; init_get_bits(&s->gb, *buf, buf_size*8); s->qscale = get_qscale(s); + if (s->first_slice && (s->first_field || s->picture_structure==PICT_FRAME)) { + if(s->avctx->debug&FF_DEBUG_PICT_INFO){ + av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n", + s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], + s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")), + s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", + s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, + s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); + } + } + + s->first_slice = 0; if(s->qscale == 0){ - fprintf(stderr, "qscale == 0\n"); + av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n"); return -1; } @@ -2073,7 +2113,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx, for(;;) { int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); if (code < 0){ - fprintf(stderr, "first mb_incr damaged\n"); + av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n"); return -1; } if (code >= 33) { @@ -2093,39 +2133,71 @@ static int mpeg_decode_slice(AVCodecContext *avctx, ff_init_block_index(s); for(;;) { +#ifdef HAVE_XVMC + //one 1 we memcpy blocks in xvmcvideo + if(s->avctx->xvmc_acceleration > 1) + XVMC_init_block(s);//set s->block +#endif + s->dsp.clear_blocks(s->block[0]); ret = mpeg_decode_mb(s, s->block); + s->chroma_qscale= s->qscale; dprintf("ret=%d\n", ret); if (ret < 0) return -1; - - if(s->motion_val && s->pict_type != B_TYPE){ //note motion_val is normally NULL unless we want to extract the MVs - const int wrap = s->block_wrap[0]; - const int xy = s->mb_x*2 + 1 + (s->mb_y*2 +1)*wrap; - int motion_x, motion_y; + + if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs + const int wrap = field_pic ? 2*s->block_wrap[0] : s->block_wrap[0]; + int xy = s->mb_x*2 + 1 + (s->mb_y*2 +1)*wrap; + int motion_for_top_x, motion_for_top_y, motion_back_top_x, motion_back_top_y; + int motion_for_bottom_x, motion_for_bottom_y, motion_back_bottom_x, motion_back_bottom_y; + if(field_pic && !s->first_field) + xy += wrap/2; if (s->mb_intra) { - motion_x = motion_y = 0; - }else if (s->mv_type == MV_TYPE_16X16) { - motion_x = s->mv[0][0][0]; - motion_y = s->mv[0][0][1]; - } else /*if (s->mv_type == MV_TYPE_FIELD)*/ { - motion_x = s->mv[0][0][0] + s->mv[0][1][0]; - motion_y = s->mv[0][0][1] + s->mv[0][1][1]; - motion_x = (motion_x>>1) | (motion_x&1); + motion_for_top_x = motion_for_top_y = motion_back_top_x = motion_back_top_y = + motion_for_bottom_x = motion_for_bottom_y = motion_back_bottom_x = motion_back_bottom_y = 0; + }else if (s->mv_type == MV_TYPE_16X16){ + motion_for_top_x = motion_for_bottom_x = s->mv[0][0][0]; + motion_for_top_y = motion_for_bottom_y = s->mv[0][0][1]; + motion_back_top_x = motion_back_bottom_x = s->mv[1][0][0]; + motion_back_top_y = motion_back_bottom_y = s->mv[1][0][1]; + } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { + motion_for_top_x = s->mv[0][0][0]; + motion_for_top_y = s->mv[0][0][1]; + motion_for_bottom_x = s->mv[0][1][0]; + motion_for_bottom_y = s->mv[0][1][1]; + motion_back_top_x = s->mv[1][0][0]; + motion_back_top_y = s->mv[1][0][1]; + motion_back_bottom_x = s->mv[1][1][0]; + motion_back_bottom_y = s->mv[1][1][1]; + } + + s->current_picture.motion_val[0][xy][0] = motion_for_top_x; + s->current_picture.motion_val[0][xy][1] = motion_for_top_y; + s->current_picture.motion_val[0][xy + 1][0] = motion_for_top_x; + s->current_picture.motion_val[0][xy + 1][1] = motion_for_top_y; + s->current_picture.motion_val[0][xy + wrap][0] = motion_for_bottom_x; + s->current_picture.motion_val[0][xy + wrap][1] = motion_for_bottom_y; + s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_for_bottom_x; + s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_for_bottom_y; + + if(s->pict_type != B_TYPE){ + motion_back_top_x = motion_back_top_y = motion_back_bottom_x = motion_back_bottom_y = 0; } - s->motion_val[xy][0] = motion_x; - s->motion_val[xy][1] = motion_y; - s->motion_val[xy + 1][0] = motion_x; - s->motion_val[xy + 1][1] = motion_y; - s->motion_val[xy + wrap][0] = motion_x; - s->motion_val[xy + wrap][1] = motion_y; - s->motion_val[xy + 1 + wrap][0] = motion_x; - s->motion_val[xy + 1 + wrap][1] = motion_y; + + s->current_picture.motion_val[1][xy][0] = motion_back_top_x; + s->current_picture.motion_val[1][xy][1] = motion_back_top_y; + s->current_picture.motion_val[1][xy + 1][0] = motion_back_top_x; + s->current_picture.motion_val[1][xy + 1][1] = motion_back_top_y; + s->current_picture.motion_val[1][xy + wrap][0] = motion_back_bottom_x; + s->current_picture.motion_val[1][xy + wrap][1] = motion_back_bottom_y; + s->current_picture.motion_val[1][xy + 1 + wrap][0] = motion_back_bottom_x; + s->current_picture.motion_val[1][xy + 1 + wrap][1] = motion_back_bottom_y; } - + s->dest[0] += 16; s->dest[1] += 8; s->dest[2] += 8; @@ -2133,14 +2205,9 @@ static int mpeg_decode_slice(AVCodecContext *avctx, MPV_decode_mb(s, s->block); if (++s->mb_x >= s->mb_width) { - if(s->avctx->codec_tag == ff_get_fourcc("VCR2")) - exchange_uv((AVFrame*)s->current_picture_ptr); ff_draw_horiz_band(s, 16*s->mb_y, 16); - if(s->avctx->codec_tag == ff_get_fourcc("VCR2")) - exchange_uv((AVFrame*)s->current_picture_ptr); - s->mb_x = 0; s->mb_y++; @@ -2149,7 +2216,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx, if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23))) || (avctx->error_resilience >= FF_ER_AGGRESSIVE && left>8)){ - fprintf(stderr, "end missmatch left=%d\n", left); + av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d\n", left); return -1; }else goto eos; @@ -2165,7 +2232,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx, for(;;) { int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); if (code < 0){ - fprintf(stderr, "mb incr damaged\n"); + av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n"); return -1; } if (code >= 33) { @@ -2173,7 +2240,7 @@ static int mpeg_decode_slice(AVCodecContext *avctx, s->mb_skip_run += 33; }else if(code == 35){ if(s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0){ - fprintf(stderr, "slice missmatch\n"); + av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n"); return -1; } goto eos; /* end of slice */ @@ -2212,10 +2279,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) { /* end of image */ - if(s->codec_id == CODEC_ID_MPEG2VIDEO){ - s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2; - }else - s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG1; + s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2; ff_er_frame_end(s); @@ -2223,18 +2287,16 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) if (s->pict_type == B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; - ff_print_debug_info(s, s->current_picture_ptr); + ff_print_debug_info(s, pict); } else { s->picture_number++; /* latency of 1 frame for I and P frames */ /* XXX: use another variable than picture_number */ if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; - ff_print_debug_info(s, s->last_picture_ptr); + ff_print_debug_info(s, pict); } } - if(s->avctx->codec_tag == ff_get_fourcc("VCR2")) - exchange_uv(pict); return 1; } else { @@ -2255,10 +2317,10 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, width = get_bits(&s->gb, 12); height = get_bits(&s->gb, 12); s->aspect_ratio_info= get_bits(&s->gb, 4); - if(s->codec_id == CODEC_ID_MPEG1VIDEO){ - aspect= mpeg1_aspect[s->aspect_ratio_info]; - if(aspect!=0.0) avctx->sample_aspect_ratio= av_d2q(aspect, 255); - } + if (s->aspect_ratio_info == 0) + return -1; + aspect= 1.0/mpeg1_aspect[s->aspect_ratio_info]; + avctx->sample_aspect_ratio= av_d2q(aspect, 255); s->frame_rate_index = get_bits(&s->gb, 4); if (s->frame_rate_index == 0) @@ -2279,7 +2341,6 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, s->width = width; s->height = height; avctx->has_b_frames= 1; - s->avctx = avctx; avctx->width = width; avctx->height = height; av_reduce( @@ -2294,14 +2355,16 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, //get_format() or set_video(width,height,aspect,pix_fmt); //until then pix_fmt may be changed right after codec init if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) - avctx->idct_algo = FF_IDCT_SIMPLE; + if( avctx->idct_algo == FF_IDCT_AUTO ) + avctx->idct_algo = FF_IDCT_SIMPLE; if (MPV_common_init(s) < 0) return -1; s1->mpeg_enc_ctx_allocated = 1; + s->swap_uv = 0;//just in case vcr2 and mpeg2 stream have been concatinated } - skip_bits(&s->gb, 10); /* vbv_buffer_size */ + s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16; skip_bits(&s->gb, 1); /* get matrix */ @@ -2355,6 +2418,12 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, s->frame_pred_frame_dct = 1; s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO; avctx->sub_id = 1; /* indicates mpeg1 */ + if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; + + if(s->avctx->debug & FF_DEBUG_PICT_INFO) + av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n", + s->avctx->rc_buffer_size, s->bit_rate); + return 0; } @@ -2373,15 +2442,17 @@ static int vcr2_init_sequence(AVCodecContext *avctx) s->height = avctx->height; avctx->has_b_frames= 0; //true? s->low_delay= 1; - s->avctx = avctx; //get_format() or set_video(width,height,aspect,pix_fmt); //until then pix_fmt may be changed right after codec init if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) - avctx->idct_algo = FF_IDCT_SIMPLE; + if( avctx->idct_algo == FF_IDCT_AUTO ) + avctx->idct_algo = FF_IDCT_SIMPLE; if (MPV_common_init(s) < 0) return -1; + exchange_uv(s);//common init reset pblocks, so we swap them here + s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB s1->mpeg_enc_ctx_allocated = 1; for(i=0;i<64;i++) { @@ -2527,7 +2598,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, if (start_code < 0){ if(s2->pict_type != B_TYPE || avctx->hurry_up==0){ if (slice_end(avctx, picture)) { - if(s2->last_picture_ptr) //FIXME merge with the stuff in mpeg_decode_slice + if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice *data_size = sizeof(AVPicture); } } @@ -2537,7 +2608,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, input_size = buf_end - buf_ptr; if(avctx->debug & FF_DEBUG_STARTCODE){ - printf("%3X at %d left %d\n", start_code, buf_ptr-buf, input_size); + av_log(avctx, AV_LOG_DEBUG, "%3X at %d left %d\n", start_code, buf_ptr-buf, input_size); } /* prepare data for next start code */ @@ -2628,20 +2699,60 @@ AVCodec mpeg2video_decoder = { .flush= ff_mpeg_flush, }; +//legacy decoder +AVCodec mpegvideo_decoder = { + "mpegvideo", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEG2VIDEO, + sizeof(Mpeg1Context), + mpeg_decode_init, + NULL, + mpeg_decode_end, + mpeg_decode_frame, + CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED, + .flush= ff_mpeg_flush, +}; + +#if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) + +AVCodec mpeg1video_encoder = { + "mpeg1video", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEG1VIDEO, + sizeof(MpegEncContext), + encode_init, + MPV_encode_picture, + MPV_encode_end, +}; + +#ifdef CONFIG_RISKY + +AVCodec mpeg2video_encoder = { + "mpeg2video", + CODEC_TYPE_VIDEO, + CODEC_ID_MPEG2VIDEO, + sizeof(MpegEncContext), + encode_init, + MPV_encode_picture, + MPV_encode_end, +}; +#endif +#endif + #ifdef HAVE_XVMC static int mpeg_mc_decode_init(AVCodecContext *avctx){ Mpeg1Context *s; if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) ) return -1; - if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ) + if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){ dprintf("mpeg12.c: XvMC decoder will work better if SLICE_FLAG_ALLOW_FIELD is set\n"); - + } mpeg_decode_init(avctx); s = avctx->priv_data; avctx->pix_fmt = PIX_FMT_XVMC_MPEG2_IDCT; - avctx->xvmc_acceleration = 1; + avctx->xvmc_acceleration = 2;//2 - the blocks are packed! return 0; } diff --git a/src/libffmpeg/libavcodec/mpeg12data.h b/src/libffmpeg/libavcodec/mpeg12data.h index dd94bb572..42b3d49a1 100644 --- a/src/libffmpeg/libavcodec/mpeg12data.h +++ b/src/libffmpeg/libavcodec/mpeg12data.h @@ -283,10 +283,8 @@ static const uint8_t mbPatTable[63][2] = { {0xc, 6} }; -#define MB_TYPE_PAT 0x40000000 #define MB_TYPE_ZERO_MV 0x20000000 #define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV) -#define IS_PAT(a) ((a)&MB_TYPE_PAT) static const uint8_t table_mb_ptype[7][2] = { { 3, 5 }, // 0x01 MB_INTRA @@ -300,12 +298,12 @@ static const uint8_t table_mb_ptype[7][2] = { static const uint32_t ptype2mb_type[7] = { MB_TYPE_INTRA, - MB_TYPE_L0 | MB_TYPE_PAT | MB_TYPE_ZERO_MV | MB_TYPE_16x16, + MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, MB_TYPE_L0, - MB_TYPE_L0 | MB_TYPE_PAT, + MB_TYPE_L0 | MB_TYPE_CBP, MB_TYPE_QUANT | MB_TYPE_INTRA, - MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT | MB_TYPE_ZERO_MV | MB_TYPE_16x16, - MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT, + MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16, + MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, }; static const uint8_t table_mb_btype[11][2] = { @@ -325,15 +323,15 @@ static const uint8_t table_mb_btype[11][2] = { static const uint32_t btype2mb_type[11] = { MB_TYPE_INTRA, MB_TYPE_L1, - MB_TYPE_L1 | MB_TYPE_PAT, + MB_TYPE_L1 | MB_TYPE_CBP, MB_TYPE_L0, - MB_TYPE_L0 | MB_TYPE_PAT, + MB_TYPE_L0 | MB_TYPE_CBP, MB_TYPE_L0L1, - MB_TYPE_L0L1 | MB_TYPE_PAT, + MB_TYPE_L0L1 | MB_TYPE_CBP, MB_TYPE_QUANT | MB_TYPE_INTRA, - MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_PAT, - MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT, - MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_PAT, + MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP, + MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP, + MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP, }; static const uint8_t mbMotionVectorTable[17][2] = { @@ -435,3 +433,10 @@ static const AVRational mpeg2_aspect[16]={ {0,1}, }; +static const uint8_t svcd_scan_offset_placeholder[14]={ + 0x10, 0x0E, + 0x00, 0x80, 0x81, + 0x00, 0x80, 0x81, + 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, +}; diff --git a/src/libffmpeg/libavcodec/mpegaudio.h b/src/libffmpeg/libavcodec/mpegaudio.h index 7aba13200..e50e8bd6f 100644 --- a/src/libffmpeg/libavcodec/mpegaudio.h +++ b/src/libffmpeg/libavcodec/mpegaudio.h @@ -19,6 +19,7 @@ #define MPA_MONO 3 int l2_select_table(int bitrate, int nb_channels, int freq, int lsf); +int mpa_decode_header(AVCodecContext *avctx, uint32_t head); extern const uint16_t mpa_bitrate_tab[2][3][15]; extern const uint16_t mpa_freq_tab[3]; diff --git a/src/libffmpeg/libavcodec/mpegaudiodec.c b/src/libffmpeg/libavcodec/mpegaudiodec.c index b08f83757..09e9b8cdb 100644 --- a/src/libffmpeg/libavcodec/mpegaudiodec.c +++ b/src/libffmpeg/libavcodec/mpegaudiodec.c @@ -25,6 +25,7 @@ //#define DEBUG #include "avcodec.h" #include "mpegaudio.h" +#include "dsputil.h" /* * TODO: @@ -66,6 +67,8 @@ typedef int32_t MPA_INT; #define HEADER_SIZE 4 #define BACKSTEP_SIZE 512 +struct GranuleDef; + typedef struct MPADecodeContext { uint8_t inbuf1[2][MPA_MAX_CODED_FRAME_SIZE + BACKSTEP_SIZE]; /* input buffer */ int inbuf_index; @@ -93,6 +96,7 @@ typedef struct MPADecodeContext { #ifdef DEBUG int frame_count; #endif + void (*compute_antialias)(struct MPADecodeContext *s, struct GranuleDef *g); } MPADecodeContext; /* layer 3 "granule" */ @@ -127,6 +131,9 @@ typedef struct HuffTable { #include "mpegaudiodectab.h" +static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g); +static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g); + /* vlc structure for decoding layer 3 huffman tables */ static VLC huff_vlc[16]; static uint8_t *huff_code_table[16]; @@ -144,7 +151,8 @@ static uint32_t *table_4_3_value; /* intensity stereo coef table */ static int32_t is_table[2][16]; static int32_t is_table_lsf[2][2][16]; -static int32_t csa_table[8][2]; +static int32_t csa_table[8][4]; +static float csa_table_float[8][4]; static int32_t mdct_win[8][36]; /* lower 2 bits: modulo 3, higher bits: shift */ @@ -310,6 +318,11 @@ static int decode_init(AVCodecContext * avctx) static int init=0; int i, j, k; + if(avctx->antialias_algo == FF_AA_INT) + s->compute_antialias= compute_antialias_integer; + else + s->compute_antialias= compute_antialias_float; + if (!init && !avctx->parse_only) { /* scale factors table for layer 1/2 */ for(i=0;i<64;i++) { @@ -462,6 +475,13 @@ static int decode_init(AVCodecContext * avctx) ca = cs * ci; csa_table[i][0] = FIX(cs); csa_table[i][1] = FIX(ca); + csa_table[i][2] = FIX(ca) + FIX(cs); + csa_table[i][3] = FIX(ca) - FIX(cs); + csa_table_float[i][0] = cs; + csa_table_float[i][1] = ca; + csa_table_float[i][2] = ca + cs; + csa_table_float[i][3] = ca - cs; +// printf("%d %d %d %d\n", FIX(cs), FIX(cs-1), FIX(ca), FIX(cs)-FIX(ca)); } /* compute mdct windows */ @@ -1206,17 +1226,11 @@ static int decode_header(MPADecodeContext *s, uint32_t header) return 0; } -#if 0 /* useful helper to get mpeg audio stream infos. Return -1 if error in - header */ -int mp_decode_header(int *sample_rate_ptr, - int *nb_channels_ptr, - int *coded_frame_size_ptr, - int *decoded_frame_size_ptr, - uint32_t head) + header, otherwise the coded frame size in bytes */ +int mpa_decode_header(AVCodecContext *avctx, uint32_t head) { MPADecodeContext s1, *s = &s1; - int decoded_frame_size; if (check_header(head) != 0) return -1; @@ -1227,27 +1241,26 @@ int mp_decode_header(int *sample_rate_ptr, switch(s->layer) { case 1: - decoded_frame_size = 384; + avctx->frame_size = 384; break; case 2: - decoded_frame_size = 1152; + avctx->frame_size = 1152; break; default: case 3: if (s->lsf) - decoded_frame_size = 576; + avctx->frame_size = 576; else - decoded_frame_size = 1152; + avctx->frame_size = 1152; break; } - *sample_rate_ptr = s->sample_rate; - *nb_channels_ptr = s->nb_channels; - *coded_frame_size_ptr = s->frame_size; - *decoded_frame_size_ptr = decoded_frame_size * 2 * s->nb_channels; - return 0; + avctx->sample_rate = s->sample_rate; + avctx->channels = s->nb_channels; + avctx->bit_rate = s->bit_rate; + avctx->sub_id = s->layer; + return s->frame_size; } -#endif /* return the number of decoded frames */ static int mp_decode_layer1(MPADecodeContext *s) @@ -1899,11 +1912,11 @@ static void compute_stereo(MPADecodeContext *s, } } -static void compute_antialias(MPADecodeContext *s, +static void compute_antialias_integer(MPADecodeContext *s, GranuleDef *g) { int32_t *ptr, *p0, *p1, *csa; - int n, tmp0, tmp1, i, j; + int n, i, j; /* we antialias only "long" bands */ if (g->block_type == 2) { @@ -1919,17 +1932,85 @@ static void compute_antialias(MPADecodeContext *s, for(i = n;i > 0;i--) { p0 = ptr - 1; p1 = ptr; - csa = &csa_table[0][0]; - for(j=0;j<8;j++) { + csa = &csa_table[0][0]; + for(j=0;j<4;j++) { + int tmp0 = *p0; + int tmp1 = *p1; +#if 0 + *p0 = FRAC_RND(MUL64(tmp0, csa[0]) - MUL64(tmp1, csa[1])); + *p1 = FRAC_RND(MUL64(tmp0, csa[1]) + MUL64(tmp1, csa[0])); +#else + int64_t tmp2= MUL64(tmp0 + tmp1, csa[0]); + *p0 = FRAC_RND(tmp2 - MUL64(tmp1, csa[2])); + *p1 = FRAC_RND(tmp2 + MUL64(tmp0, csa[3])); +#endif + p0--; p1++; + csa += 4; tmp0 = *p0; tmp1 = *p1; +#if 0 *p0 = FRAC_RND(MUL64(tmp0, csa[0]) - MUL64(tmp1, csa[1])); *p1 = FRAC_RND(MUL64(tmp0, csa[1]) + MUL64(tmp1, csa[0])); - p0--; - p1++; - csa += 2; +#else + tmp2= MUL64(tmp0 + tmp1, csa[0]); + *p0 = FRAC_RND(tmp2 - MUL64(tmp1, csa[2])); + *p1 = FRAC_RND(tmp2 + MUL64(tmp0, csa[3])); +#endif + p0--; p1++; + csa += 4; } - ptr += 18; + ptr += 18; + } +} + +static void compute_antialias_float(MPADecodeContext *s, + GranuleDef *g) +{ + int32_t *ptr, *p0, *p1; + int n, i, j; + + /* we antialias only "long" bands */ + if (g->block_type == 2) { + if (!g->switch_point) + return; + /* XXX: check this for 8000Hz case */ + n = 1; + } else { + n = SBLIMIT - 1; + } + + ptr = g->sb_hybrid + 18; + for(i = n;i > 0;i--) { + float *csa = &csa_table_float[0][0]; + p0 = ptr - 1; + p1 = ptr; + for(j=0;j<4;j++) { + float tmp0 = *p0; + float tmp1 = *p1; +#if 1 + *p0 = lrintf(tmp0 * csa[0] - tmp1 * csa[1]); + *p1 = lrintf(tmp0 * csa[1] + tmp1 * csa[0]); +#else + float tmp2= (tmp0 + tmp1) * csa[0]; + *p0 = lrintf(tmp2 - tmp1 * csa[2]); + *p1 = lrintf(tmp2 + tmp0 * csa[3]); +#endif + p0--; p1++; + csa += 4; + tmp0 = *p0; + tmp1 = *p1; +#if 1 + *p0 = lrintf(tmp0 * csa[0] - tmp1 * csa[1]); + *p1 = lrintf(tmp0 * csa[1] + tmp1 * csa[0]); +#else + tmp2= (tmp0 + tmp1) * csa[0]; + *p0 = lrintf(tmp2 - tmp1 * csa[2]); + *p1 = lrintf(tmp2 + tmp0 * csa[3]); +#endif + p0--; p1++; + csa += 4; + } + ptr += 18; } } @@ -2359,7 +2440,7 @@ static int mp_decode_layer3(MPADecodeContext *s) #if defined(DEBUG) sample_dump(0, g->sb_hybrid, 576); #endif - compute_antialias(s, g); + s->compute_antialias(s, g); #if defined(DEBUG) sample_dump(1, g->sb_hybrid, 576); #endif diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c index 0dd59d62e..883c21260 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.c +++ b/src/libffmpeg/libavcodec/mpegvideo.c @@ -1,6 +1,7 @@ /* * The simplest mpeg encoder (well, it was the simplest!) * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -28,6 +29,7 @@ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" +#include "faandct.h" #ifdef USE_FASTMEMCPY #include "fastmemcpy.h" @@ -39,23 +41,30 @@ #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) static void encode_picture(MpegEncContext *s, int picture_number); #endif //CONFIG_ENCODERS -static void dct_unquantize_mpeg1_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void dct_unquantize_mpeg2_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); -static void dct_unquantize_h263_c(MpegEncContext *s, +static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale); +static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale); +static void dct_unquantize_h263_intra_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale); +static void dct_unquantize_h263_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w); #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); static int sse_mb(MpegEncContext *s); +static void denoise_dct_c(MpegEncContext *s, DCTELEM *block); #endif //CONFIG_ENCODERS #ifdef HAVE_XVMC extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx); extern void XVMC_field_end(MpegEncContext *s); -extern void XVMC_decode_mb(MpegEncContext *s, DCTELEM block[6][64]); +extern void XVMC_decode_mb(MpegEncContext *s); #endif void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c; @@ -87,22 +96,31 @@ static const uint8_t h263_chroma_roundtab[16] = { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, }; +static const uint8_t ff_default_chroma_qscale_table[32]={ +// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +}; + #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL; static uint8_t default_fcode_tab[MAX_MV*2+1]; enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1}; -static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64], +static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax) { int qscale; for(qscale=qmin; qscale<=qmax; qscale++){ int i; - if (s->dsp.fdct == ff_jpeg_fdct_islow) { + if (dsp->fdct == ff_jpeg_fdct_islow +#ifdef FAAN_POSTSCALE + || dsp->fdct == ff_faandct +#endif + ) { for(i=0;i<64;i++) { - const int j= s->dsp.idct_permutation[i]; + const int j= dsp->idct_permutation[i]; /* 16 <= qscale * quant_matrix[i] <= 7905 */ /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ @@ -111,9 +129,13 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); } - } else if (s->dsp.fdct == fdct_ifast) { + } else if (dsp->fdct == fdct_ifast +#ifndef FAAN_POSTSCALE + || dsp->fdct == ff_faandct +#endif + ) { for(i=0;i<64;i++) { - const int j= s->dsp.idct_permutation[i]; + const int j= dsp->idct_permutation[i]; /* 16 <= qscale * quant_matrix[i] <= 7905 */ /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ @@ -124,7 +146,7 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 } } else { for(i=0;i<64;i++) { - const int j= s->dsp.idct_permutation[i]; + const int j= dsp->idct_permutation[i]; /* We can safely suppose that 16 <= quant_matrix[i] <= 255 So 16 <= qscale * quant_matrix[i] <= 7905 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 @@ -132,10 +154,10 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16 */ qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); - qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); + qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]); - if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1; - qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]); + if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1; + qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]); } } } @@ -190,12 +212,16 @@ void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){ /* init common dct for both encoder and decoder */ int DCT_common_init(MpegEncContext *s) { - s->dct_unquantize_h263 = dct_unquantize_h263_c; - s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c; - s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c; + s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c; + s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c; + s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; + s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; + s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; + s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) s->dct_quantize= dct_quantize_c; + s->denoise_dct= denoise_dct_c; #endif #ifdef HAVE_MMX @@ -229,8 +255,13 @@ int DCT_common_init(MpegEncContext *s) /* load & permutate scantables note: only wmv uses differnt ones */ - ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); - ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); + if(s->alternate_scan){ + ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); + ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); + }else{ + ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); + ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); + } ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); @@ -244,6 +275,17 @@ static void copy_picture(Picture *dst, Picture *src){ dst->type= FF_BUFFER_TYPE_COPY; } +static void copy_picture_attributes(AVFrame *dst, AVFrame *src){ + dst->pict_type = src->pict_type; + dst->quality = src->quality; + dst->coded_picture_number = src->coded_picture_number; + dst->display_picture_number = src->display_picture_number; +// dst->reference = src->reference; + dst->pts = src->pts; + dst->interlaced_frame = src->interlaced_frame; + dst->top_field_first = src->top_field_first; +} + /** * allocates a Picture * The pixels are allocated/set by calling get_buffer() if shared=0 @@ -251,6 +293,8 @@ static void copy_picture(Picture *dst, Picture *src){ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11 const int mb_array_size= s->mb_stride*s->mb_height; + const int b8_array_size= s->b8_stride*s->mb_height*2; + const int b4_array_size= s->b4_stride*s->mb_height*4; int i; if(shared){ @@ -265,17 +309,17 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic); if(r<0 || !pic->age || !pic->type || !pic->data[0]){ - fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]); return -1; } if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){ - fprintf(stderr, "get_buffer() failed (stride changed)\n"); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); return -1; } if(pic->linesize[1] != pic->linesize[2]){ - fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n"); + av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n"); return -1; } @@ -288,18 +332,25 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t)) - CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t)) } CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t)) - CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int)) + CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t)) pic->mb_type= pic->mb_type_base + s->mb_stride+1; if(s->out_format == FMT_H264){ for(i=0; i<2; i++){ - CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t)) - CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t)) + CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+1) * sizeof(int16_t)) + pic->motion_val[i]= pic->motion_val_base[i]+1; + CHECKED_ALLOCZ(pic->ref_index[i] , b8_array_size * sizeof(uint8_t)) + } + pic->motion_subsample_log2= 2; + }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){ + for(i=0; i<2; i++){ + CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+1) * sizeof(int16_t)*2) //FIXME + pic->motion_val[i]= pic->motion_val_base[i]+1; } + pic->motion_subsample_log2= 3; } pic->qstride= s->mb_stride; CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan)) @@ -329,14 +380,13 @@ static void free_picture(MpegEncContext *s, Picture *pic){ av_freep(&pic->mb_var); av_freep(&pic->mc_mb_var); av_freep(&pic->mb_mean); - av_freep(&pic->mb_cmp_score); av_freep(&pic->mbskip_table); av_freep(&pic->qscale_table); av_freep(&pic->mb_type_base); av_freep(&pic->pan_scan); pic->mb_type= NULL; for(i=0; i<2; i++){ - av_freep(&pic->motion_val[i]); + av_freep(&pic->motion_val_base[i]); av_freep(&pic->ref_index[i]); } @@ -352,17 +402,21 @@ static void free_picture(MpegEncContext *s, Picture *pic){ /* init common structure for both encoder and decoder */ int MPV_common_init(MpegEncContext *s) { - int y_size, c_size, yc_size, i, mb_array_size, x, y; + int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y; dsputil_init(&s->dsp, s->avctx); DCT_common_init(s); s->flags= s->avctx->flags; + s->flags2= s->avctx->flags2; s->mb_width = (s->width + 15) / 16; s->mb_height = (s->height + 15) / 16; s->mb_stride = s->mb_width + 1; + s->b8_stride = s->mb_width*2 + 1; + s->b4_stride = s->mb_width*4 + 1; mb_array_size= s->mb_height * s->mb_stride; + mv_table_size= (s->mb_height+2) * s->mb_stride + 1; /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; @@ -377,6 +431,14 @@ int MPV_common_init(MpegEncContext *s) s->block_wrap[4]= s->block_wrap[5]= s->mb_width + 2; + s->y_dc_scale_table= + s->c_dc_scale_table= ff_mpeg1_dc_scale_table; + s->chroma_qscale_table= ff_default_chroma_qscale_table; + if (!s->encoding) + s->progressive_sequence= 1; + s->progressive_frame= 1; + s->coded_picture_number = 0; + y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); c_size = (s->mb_width + 2) * (s->mb_height + 2); yc_size = y_size + 2 * c_size; @@ -406,8 +468,6 @@ int MPV_common_init(MpegEncContext *s) s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed? if (s->encoding) { - int mv_table_size= s->mb_stride * (s->mb_height+2) + 1; - /* Allocate MV tables */ CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) @@ -439,25 +499,43 @@ int MPV_common_init(MpegEncContext *s) CHECKED_ALLOCZ(s->avctx->stats_out, 256); /* Allocate MB type table */ - CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding + CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int)) + + CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int)) + CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int)) + CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t)) + CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t)) + CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*)) + CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*)) + + if(s->avctx->noise_reduction){ + CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int)) + CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t)) + } } + CHECKED_ALLOCZ(s->blocks, 64*6*2 * sizeof(DCTELEM)) + CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture)) + CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t)) - if (s->out_format == FMT_H263 || s->encoding) { - int size; - - /* MV prediction */ - size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); - CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t)); - } - - if(s->codec_id==CODEC_ID_MPEG4){ + if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){ /* interlaced direct mode decoding tables */ - CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t)) - CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t)) + for(i=0; i<2; i++){ + int j, k; + for(j=0; j<2; j++){ + for(k=0; k<2; k++){ + CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t)) + s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1; + } + CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t)) + CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t)) + s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1; + } + CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t)) + } } if (s->out_format == FMT_H263) { /* ac values */ @@ -500,7 +578,16 @@ int MPV_common_init(MpegEncContext *s) s->block= s->blocks[0]; + for(i=0;i<12;i++){ + s->pblocks[i] = (short *)(&s->block[i]); + } + s->parse_context.state= -1; + if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){ + s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH); + s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH); + s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH); + } s->context_initialized = 1; return 0; @@ -515,7 +602,7 @@ int MPV_common_init(MpegEncContext *s) /* init common structure for both encoder and decoder */ void MPV_common_end(MpegEncContext *s) { - int i; + int i, j, k; av_freep(&s->parse_context.buffer); s->parse_context.buffer_size=0; @@ -533,8 +620,19 @@ void MPV_common_end(MpegEncContext *s) s->b_bidir_forw_mv_table= NULL; s->b_bidir_back_mv_table= NULL; s->b_direct_mv_table= NULL; + for(i=0; i<2; i++){ + for(j=0; j<2; j++){ + for(k=0; k<2; k++){ + av_freep(&s->b_field_mv_table_base[i][j][k]); + s->b_field_mv_table[i][j][k]=NULL; + } + av_freep(&s->b_field_select_table[i][j]); + av_freep(&s->p_field_mv_table_base[i][j]); + s->p_field_mv_table[i][j]=NULL; + } + av_freep(&s->p_field_select_table[i]); + } - av_freep(&s->motion_val); av_freep(&s->dc_val[0]); av_freep(&s->ac_val[0]); av_freep(&s->coded_block); @@ -551,22 +649,35 @@ void MPV_common_end(MpegEncContext *s) av_freep(&s->tex_pb_buffer); av_freep(&s->pb2_buffer); av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL; - av_freep(&s->field_mv_table); - av_freep(&s->field_select_table); av_freep(&s->avctx->stats_out); av_freep(&s->ac_stats); av_freep(&s->error_status_table); av_freep(&s->mb_index2xy); av_freep(&s->lambda_table); - - for(i=0; i<MAX_PICTURE_COUNT; i++){ - free_picture(s, &s->picture[i]); + av_freep(&s->q_intra_matrix); + av_freep(&s->q_inter_matrix); + av_freep(&s->q_intra_matrix16); + av_freep(&s->q_inter_matrix16); + av_freep(&s->blocks); + av_freep(&s->input_picture); + av_freep(&s->reordered_input_picture); + av_freep(&s->dct_error_sum); + av_freep(&s->dct_offset); + + if(s->picture){ + for(i=0; i<MAX_PICTURE_COUNT; i++){ + free_picture(s, &s->picture[i]); + } } + av_freep(&s->picture); avcodec_default_free_buffers(s->avctx); s->context_initialized = 0; s->last_picture_ptr= s->next_picture_ptr= s->current_picture_ptr= NULL; + for(i=0; i<3; i++) + if (s->visualization_buffer[i]) + av_free(s->visualization_buffer[i]); } #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) @@ -581,25 +692,17 @@ int MPV_encode_init(AVCodecContext *avctx) avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME s->bit_rate = avctx->bit_rate; - s->bit_rate_tolerance = avctx->bit_rate_tolerance; s->width = avctx->width; s->height = avctx->height; if(avctx->gop_size > 600){ - fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n"); + av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); avctx->gop_size=600; } s->gop_size = avctx->gop_size; - s->rtp_mode = avctx->rtp_mode; - s->rtp_payload_size = avctx->rtp_payload_size; - if (avctx->rtp_callback) - s->rtp_callback = avctx->rtp_callback; - s->max_qdiff= avctx->max_qdiff; - s->qcompress= avctx->qcompress; - s->qblur= avctx->qblur; s->avctx = avctx; s->flags= avctx->flags; + s->flags2= avctx->flags2; s->max_b_frames= avctx->max_b_frames; - s->b_frame_strategy= avctx->b_frame_strategy; s->codec_id= avctx->codec->id; s->luma_elim_threshold = avctx->luma_elim_threshold; s->chroma_elim_threshold= avctx->chroma_elim_threshold; @@ -607,6 +710,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->data_partitioning= avctx->flags & CODEC_FLAG_PART; s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0; s->mpeg_quant= avctx->mpeg_quant; + s->rtp_mode= !!avctx->rtp_payload_size; if (s->gop_size <= 1) { s->intra_only = 1; @@ -618,47 +722,88 @@ int MPV_encode_init(AVCodecContext *avctx) s->me_method = avctx->me_method; /* Fixed QSCALE */ - s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE); + s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); s->adaptive_quant= ( s->avctx->lumi_masking || s->avctx->dark_masking || s->avctx->temporal_cplx_masking || s->avctx->spatial_cplx_masking - || s->avctx->p_masking) + || s->avctx->p_masking + || (s->flags&CODEC_FLAG_QP_RD)) && !s->fixed_qscale; - s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT); + s->obmc= !!(s->flags & CODEC_FLAG_OBMC); + s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER); + s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN); - if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){ - fprintf(stderr, "4MV not supporetd by codec\n"); + if(avctx->rc_max_rate && !avctx->rc_buffer_size){ + av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n"); + return -1; + } + + if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ + av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n"); + } + + if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 + && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ + av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); + return -1; + } + + if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){ + av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n"); + return -1; + } + + if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ + av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n"); return -1; } if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){ - fprintf(stderr, "qpel not supporetd by codec\n"); + av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n"); return -1; } if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){ - fprintf(stderr, "data partitioning not supporetd by codec\n"); + av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n"); return -1; } if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){ - fprintf(stderr, "b frames not supporetd by codec\n"); + av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); return -1; } if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too - fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n"); + av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n"); return -1; } if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){ - fprintf(stderr, "CBP RD needs trellis quant\n"); + av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n"); + return -1; + } + + if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){ + av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n"); + return -1; + } + + if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){ + av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet\n"); return -1; } + i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base); + if(i > 1){ + av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); + avctx->frame_rate /= i; + avctx->frame_rate_base /= i; +// return -1; + } + if(s->codec_id==CODEC_ID_MJPEG){ s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x s->inter_quant_bias= 0; @@ -691,8 +836,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->out_format = FMT_MPEG1; s->low_delay= 0; //s->max_b_frames ? 0 : 1; avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); - s->rtp_mode= 1; // mpeg2 must have slices - if(s->rtp_payload_size == 0) s->rtp_payload_size= 256*256*256; + s->rtp_mode= 1; break; case CODEC_ID_LJPEG: case CODEC_ID_MJPEG: @@ -714,10 +858,11 @@ int MPV_encode_init(AVCodecContext *avctx) #ifdef CONFIG_RISKY case CODEC_ID_H263: if (h263_get_picture_format(s->width, s->height) == 7) { - printf("Input picture size isn't suitable for h263 codec! try h263+\n"); + av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n"); return -1; } s->out_format = FMT_H263; + s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; avctx->delay=0; s->low_delay=1; break; @@ -725,11 +870,17 @@ int MPV_encode_init(AVCodecContext *avctx) s->out_format = FMT_H263; s->h263_plus = 1; /* Fx */ - s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0; + s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0; s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0; + s->modified_quant= s->h263_aic; + s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0; + s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; + s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0; + s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus; + s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0; + /* /Fx */ /* These are just to be sure */ - s->umvplus = 1; avctx->delay=0; s->low_delay=1; break; @@ -743,7 +894,6 @@ int MPV_encode_init(AVCodecContext *avctx) break; case CODEC_ID_RV10: s->out_format = FMT_H263; - s->h263_rv10 = 1; avctx->delay=0; s->low_delay=1; break; @@ -807,7 +957,7 @@ int MPV_encode_init(AVCodecContext *avctx) default: return -1; } - + { /* set up some save defaults, some codecs might override them later */ static int done=0; if(!done){ @@ -825,8 +975,6 @@ int MPV_encode_init(AVCodecContext *avctx) } s->me.mv_penalty= default_mv_penalty; s->fcode_tab= default_fcode_tab; - s->y_dc_scale_table= - s->c_dc_scale_table= ff_mpeg1_dc_scale_table; /* dont use mv_penalty table for crap MV as it would be confused */ //FIXME remove after fixing / removing old ME @@ -837,6 +985,13 @@ int MPV_encode_init(AVCodecContext *avctx) /* init */ if (MPV_common_init(s) < 0) return -1; + + if(s->modified_quant) + s->chroma_qscale_table= ff_h263_chroma_qscale_table; + s->progressive_frame= + s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)); + + ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp); #if defined(CONFIG_ENCODERS) || !defined(XINE_MPEG_ENCODER) ff_init_me(s); @@ -856,7 +1011,6 @@ int MPV_encode_init(AVCodecContext *avctx) /* init default q matrix */ for(i=0;i<64;i++) { int j= s->dsp.idct_permutation[i]; -#if defined(CONFIG_ENCODERS) || !defined(XINE_MPEG_ENCODER) #ifdef CONFIG_RISKY if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; @@ -866,7 +1020,6 @@ int MPV_encode_init(AVCodecContext *avctx) s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; }else #endif -#endif { /* mpeg1/2 */ s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; @@ -880,9 +1033,9 @@ int MPV_encode_init(AVCodecContext *avctx) /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ if (s->out_format != FMT_MJPEG) { - convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, + convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, 1, 31); - convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias, + convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, s->inter_matrix, s->inter_quant_bias, 1, 31); } @@ -890,8 +1043,8 @@ int MPV_encode_init(AVCodecContext *avctx) return -1; s->picture_number = 0; + s->input_picture_number = 0; s->picture_in_gop_number = 0; - s->fake_picture_number = 0; /* motion detector init */ s->f_code = 1; s->b_code = 1; @@ -989,32 +1142,50 @@ static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w) } } -static int find_unused_picture(MpegEncContext *s, int shared){ +int ff_find_unused_picture(MpegEncContext *s, int shared){ int i; if(shared){ for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break; + if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i; } }else{ for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME + if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME } for(i=0; i<MAX_PICTURE_COUNT; i++){ - if(s->picture[i].data[0]==NULL) break; + if(s->picture[i].data[0]==NULL) return i; } } - assert(i<MAX_PICTURE_COUNT); - return i; + assert(0); + return -1; +} + +static void update_noise_reduction(MpegEncContext *s){ + int intra, i; + + for(intra=0; intra<2; intra++){ + if(s->dct_count[intra] > (1<<16)){ + for(i=0; i<64; i++){ + s->dct_error_sum[intra][i] >>=1; + } + s->dct_count[intra] >>= 1; + } + + for(i=0; i<64; i++){ + s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1); + } + } } -/* generic function for encode/decode called before a frame is coded/decoded */ +/** + * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded + */ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i; AVFrame *pic; - s->mb_skiped = 0; assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); @@ -1028,7 +1199,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) if(!s->encoding){ for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){ - fprintf(stderr, "releasing zombie picture\n"); + av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } } @@ -1043,18 +1214,23 @@ alloc: } } - i= find_unused_picture(s, 0); - - pic= (AVFrame*)&s->picture[i]; + if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL) + pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header) + else{ + i= ff_find_unused_picture(s, 0); + pic= (AVFrame*)&s->picture[i]; + } + pic->reference= s->pict_type != B_TYPE ? 3 : 0; - if(s->current_picture_ptr) - pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1; + pic->coded_picture_number= s->coded_picture_number++; if( alloc_picture(s, (Picture*)pic, 0) < 0) return -1; - s->current_picture_ptr= &s->picture[i]; + s->current_picture_ptr= (Picture*)pic; + s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic + s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence; } s->current_picture_ptr->pict_type= s->pict_type; @@ -1074,7 +1250,7 @@ alloc: if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr); if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){ - fprintf(stderr, "warning: first frame is no keyframe\n"); + av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference goto alloc; } @@ -1099,13 +1275,23 @@ alloc: /* set dequantizer, we cant do it during init as it might change for mpeg4 and we cant do it in the header decode as init isnt called for mpeg4 there yet */ - if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) - s->dct_unquantize = s->dct_unquantize_mpeg2; - else if(s->out_format == FMT_H263) - s->dct_unquantize = s->dct_unquantize_h263; - else - s->dct_unquantize = s->dct_unquantize_mpeg1; + if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){ + s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; + s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; + }else if(s->out_format == FMT_H263){ + s->dct_unquantize_intra = s->dct_unquantize_h263_intra; + s->dct_unquantize_inter = s->dct_unquantize_h263_inter; + }else{ + s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra; + s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; + } + + if(s->dct_error_sum){ + assert(s->avctx->noise_reduction && s->encoding); + update_noise_reduction(s); + } + #ifdef HAVE_XVMC if(s->avctx->xvmc_acceleration) return XVMC_field_start(s, avctx); @@ -1243,105 +1429,220 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int /** * prints debuging info for the given picture. */ -void ff_print_debug_info(MpegEncContext *s, Picture *pict){ +void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ if(!pict || !pict->mb_type) return; if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){ int x,y; - + + av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: "); + switch (pict->pict_type) { + case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break; + case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break; + case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break; + case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break; + case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break; + case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break; + } for(y=0; y<s->mb_height; y++){ for(x=0; x<s->mb_width; x++){ if(s->avctx->debug&FF_DEBUG_SKIP){ int count= s->mbskip_table[x + y*s->mb_stride]; if(count>9) count=9; - printf("%1d", count); + av_log(s->avctx, AV_LOG_DEBUG, "%1d", count); } if(s->avctx->debug&FF_DEBUG_QP){ - printf("%2d", pict->qscale_table[x + y*s->mb_stride]); + av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]); } if(s->avctx->debug&FF_DEBUG_MB_TYPE){ int mb_type= pict->mb_type[x + y*s->mb_stride]; - //Type & MV direction if(IS_PCM(mb_type)) - printf("P"); + av_log(s->avctx, AV_LOG_DEBUG, "P"); else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type)) - printf("A"); + av_log(s->avctx, AV_LOG_DEBUG, "A"); else if(IS_INTRA4x4(mb_type)) - printf("i"); + av_log(s->avctx, AV_LOG_DEBUG, "i"); else if(IS_INTRA16x16(mb_type)) - printf("I"); + av_log(s->avctx, AV_LOG_DEBUG, "I"); else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)) - printf("d"); + av_log(s->avctx, AV_LOG_DEBUG, "d"); else if(IS_DIRECT(mb_type)) - printf("D"); + av_log(s->avctx, AV_LOG_DEBUG, "D"); else if(IS_GMC(mb_type) && IS_SKIP(mb_type)) - printf("g"); + av_log(s->avctx, AV_LOG_DEBUG, "g"); else if(IS_GMC(mb_type)) - printf("G"); + av_log(s->avctx, AV_LOG_DEBUG, "G"); else if(IS_SKIP(mb_type)) - printf("S"); + av_log(s->avctx, AV_LOG_DEBUG, "S"); else if(!USES_LIST(mb_type, 1)) - printf(">"); + av_log(s->avctx, AV_LOG_DEBUG, ">"); else if(!USES_LIST(mb_type, 0)) - printf("<"); + av_log(s->avctx, AV_LOG_DEBUG, "<"); else{ assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); - printf("X"); + av_log(s->avctx, AV_LOG_DEBUG, "X"); } //segmentation if(IS_8X8(mb_type)) - printf("+"); + av_log(s->avctx, AV_LOG_DEBUG, "+"); else if(IS_16X8(mb_type)) - printf("-"); + av_log(s->avctx, AV_LOG_DEBUG, "-"); else if(IS_8X16(mb_type)) - printf("¦"); + av_log(s->avctx, AV_LOG_DEBUG, "¦"); else if(IS_INTRA(mb_type) || IS_16X16(mb_type)) - printf(" "); + av_log(s->avctx, AV_LOG_DEBUG, " "); else - printf("?"); + av_log(s->avctx, AV_LOG_DEBUG, "?"); if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264) - printf("="); + av_log(s->avctx, AV_LOG_DEBUG, "="); else - printf(" "); + av_log(s->avctx, AV_LOG_DEBUG, " "); } -// printf(" "); +// av_log(s->avctx, AV_LOG_DEBUG, " "); } - printf("\n"); + av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } - - if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){ + + if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){ const int shift= 1 + s->quarter_sample; int mb_y; - uint8_t *ptr= pict->data[0]; + uint8_t *ptr; + int i; + int h_chroma_shift, v_chroma_shift; s->low_delay=0; //needed to see the vectors without trashing the buffers + avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); + for(i=0; i<3; i++){ + memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift); + pict->data[i]= s->visualization_buffer[i]; + } + pict->type= FF_BUFFER_TYPE_COPY; + ptr= pict->data[0]; + for(mb_y=0; mb_y<s->mb_height; mb_y++){ int mb_x; for(mb_x=0; mb_x<s->mb_width; mb_x++){ const int mb_index= mb_x + mb_y*s->mb_stride; - if(IS_8X8(s->current_picture.mb_type[mb_index])){ - int i; - for(i=0; i<4; i++){ + if((s->avctx->debug_mv) && pict->motion_val){ + int type; + for(type=0; type<3; type++){ + int direction; + switch (type) { + case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE)) + continue; + direction = 0; + break; + case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE)) + continue; + direction = 0; + break; + case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE)) + continue; + direction = 1; + break; + } + if(!USES_LIST(pict->mb_type[mb_index], direction)) + continue; + + if(IS_8X8(pict->mb_type[mb_index])){ + int i; + for(i=0; i<4; i++){ int sx= mb_x*16 + 4 + 8*(i&1); int sy= mb_y*16 + 4 + 8*(i>>1); int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2); - int mx= (s->motion_val[xy][0]>>shift) + sx; - int my= (s->motion_val[xy][1]>>shift) + sy; + int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; + int my= (pict->motion_val[direction][xy][1]>>shift) + sy; draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + } + }else if(IS_16X8(pict->mb_type[mb_index])){ + int i; + for(i=0; i<2; i++){ + int sx=mb_x*16 + 8; + int sy=mb_y*16 + 4 + 8*i; + int xy=1 + mb_x*2 + (mb_y*2 + 1 + i)*(s->mb_width*2 + 2); + int mx=(pict->motion_val[direction][xy][0]>>shift) + sx; + int my=(pict->motion_val[direction][xy][1]>>shift) + sy; + draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + } + }else{ + int sx= mb_x*16 + 8; + int sy= mb_y*16 + 8; + int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); + int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; + int my= (pict->motion_val[direction][xy][1]>>shift) + sy; + draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + } + } + } + if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){ + uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL; + int y; + for(y=0; y<8; y++){ + *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c; + *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c; + } + } + if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){ + int mb_type= pict->mb_type[mb_index]; + uint64_t u,v; + int y; +#define COLOR(theta, r)\ +u= (int)(128 + r*cos(theta*3.141592/180));\ +v= (int)(128 + r*sin(theta*3.141592/180)); + + + u=v=128; + if(IS_PCM(mb_type)){ + COLOR(120,48) + }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){ + COLOR(30,48) + }else if(IS_INTRA4x4(mb_type)){ + COLOR(90,48) + }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){ +// COLOR(120,48) + }else if(IS_DIRECT(mb_type)){ + COLOR(150,48) + }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){ + COLOR(170,48) + }else if(IS_GMC(mb_type)){ + COLOR(190,48) + }else if(IS_SKIP(mb_type)){ +// COLOR(180,48) + }else if(!USES_LIST(mb_type, 1)){ + COLOR(240,48) + }else if(!USES_LIST(mb_type, 0)){ + COLOR(0,48) + }else{ + assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); + COLOR(300,48) + } + + u*= 0x0101010101010101ULL; + v*= 0x0101010101010101ULL; + for(y=0; y<8; y++){ + *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u; + *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v; + } + + //segmentation + if(IS_8X8(mb_type) || IS_16X8(mb_type)){ + *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL; + *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL; + } + if(IS_8X8(mb_type) || IS_8X16(mb_type)){ + for(y=0; y<16; y++) + pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80; + } + + if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){ + // hmm } - }else{ - int sx= mb_x*16 + 8; - int sy= mb_y*16 + 8; - int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); - int mx= (s->motion_val[xy][0]>>shift) + sx; - int my= (s->motion_val[xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); } s->mbskip_table[mb_index]=0; } @@ -1374,7 +1675,7 @@ static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int st for(y=0; y<h; y+=16){ for(x=0; x<w; x+=16){ int offset= x + y*stride; - int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride); + int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16); int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8; int sae = get_sae(src + offset, mean, stride); @@ -1397,10 +1698,10 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ if(pic_arg->linesize[1] != s->uvlinesize) direct=0; if(pic_arg->linesize[2] != s->uvlinesize) direct=0; -// printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); +// av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); if(direct){ - i= find_unused_picture(s, 1); + i= ff_find_unused_picture(s, 1); pic= (AVFrame*)&s->picture[i]; pic->reference= 3; @@ -1412,7 +1713,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ alloc_picture(s, (Picture*)pic, 1); }else{ int offset= 16; - i= find_unused_picture(s, 0); + i= ff_find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; pic->reference= 3; @@ -1449,13 +1750,9 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ } } } - pic->quality= pic_arg->quality; - pic->pict_type= pic_arg->pict_type; - pic->pts = pic_arg->pts; - - if(s->input_picture[encoding_delay]) - pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1; + copy_picture_attributes(pic, pic_arg); + pic->display_picture_number= s->input_picture_number++; } /* shift buffer entries */ @@ -1469,10 +1766,6 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){ static void select_input_picture(MpegEncContext *s){ int i; - int coded_pic_num=0; - - if(s->reordered_input_picture[0]) - coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1; for(i=1; i<MAX_PICTURE_COUNT; i++) s->reordered_input_picture[i-1]= s->reordered_input_picture[i]; @@ -1483,7 +1776,7 @@ static void select_input_picture(MpegEncContext *s){ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ s->reordered_input_picture[0]= s->input_picture[0]; s->reordered_input_picture[0]->pict_type= I_TYPE; - s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; + s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; }else{ int b_frames; @@ -1504,13 +1797,13 @@ static void select_input_picture(MpegEncContext *s){ } if(b_frames > s->max_b_frames){ - fprintf(stderr, "warning, too many bframes in a row\n"); + av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n"); b_frames = s->max_b_frames; } - }else if(s->b_frame_strategy==0){ + }else if(s->avctx->b_frame_strategy==0){ b_frames= s->max_b_frames; while(b_frames && !s->input_picture[b_frames]) b_frames--; - }else if(s->b_frame_strategy==1){ + }else if(s->avctx->b_frame_strategy==1){ for(i=1; i<s->max_b_frames+1; i++){ if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){ s->input_picture[i]->b_frame_score= @@ -1529,27 +1822,33 @@ static void select_input_picture(MpegEncContext *s){ s->input_picture[i]->b_frame_score=0; } }else{ - fprintf(stderr, "illegal b frame strategy\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n"); b_frames=0; } emms_c(); //static int b_count=0; //b_count+= b_frames; -//printf("b_frames: %d\n", b_count); - +//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count); + if(s->picture_in_gop_number + b_frames >= s->gop_size){ + if(s->flags & CODEC_FLAG_CLOSED_GOP) + b_frames=0; + s->input_picture[b_frames]->pict_type= I_TYPE; + } + + if( (s->flags & CODEC_FLAG_CLOSED_GOP) + && b_frames + && s->input_picture[b_frames]->pict_type== I_TYPE) + b_frames--; + s->reordered_input_picture[0]= s->input_picture[b_frames]; - if( s->picture_in_gop_number + b_frames >= s->gop_size - || s->reordered_input_picture[0]->pict_type== I_TYPE) - s->reordered_input_picture[0]->pict_type= I_TYPE; - else + if(s->reordered_input_picture[0]->pict_type != I_TYPE) s->reordered_input_picture[0]->pict_type= P_TYPE; - s->reordered_input_picture[0]->coded_picture_number= coded_pic_num; + s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; for(i=0; i<b_frames; i++){ - coded_pic_num++; s->reordered_input_picture[i+1]= s->input_picture[i]; s->reordered_input_picture[i+1]->pict_type= B_TYPE; - s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num; + s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; } } } @@ -1562,7 +1861,7 @@ static void select_input_picture(MpegEncContext *s){ if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable - int i= find_unused_picture(s, 0); + int i= ff_find_unused_picture(s, 0); Picture *pic= &s->picture[i]; /* mark us unused / free shared pic */ @@ -1570,12 +1869,8 @@ static void select_input_picture(MpegEncContext *s){ s->reordered_input_picture[0]->data[i]= NULL; s->reordered_input_picture[0]->type= 0; - //FIXME bad, copy * except - pic->pict_type = s->reordered_input_picture[0]->pict_type; - pic->quality = s->reordered_input_picture[0]->quality; - pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number; - pic->reference = s->reordered_input_picture[0]->reference; - pic->pts = s->reordered_input_picture[0]->pts; + copy_picture_attributes((AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); + pic->reference = s->reordered_input_picture[0]->reference; alloc_picture(s, pic, 0); @@ -1605,10 +1900,10 @@ int MPV_encode_picture(AVCodecContext *avctx, { MpegEncContext *s = avctx->priv_data; AVFrame *pic_arg = data; - int i; + int i, stuffing_count; if(avctx->pix_fmt != PIX_FMT_YUV420P){ - fprintf(stderr, "this codec supports only YUV420P\n"); + av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n"); return -1; } @@ -1622,7 +1917,6 @@ int MPV_encode_picture(AVCodecContext *avctx, /* output? */ if(s->new_picture.data[0]){ - s->pict_type= s->new_picture.pict_type; //emms_c(); //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale); @@ -1645,25 +1939,66 @@ int MPV_encode_picture(AVCodecContext *avctx, #if defined(CONFIG_ENCODERS) || !defined(XINE_MPEG_ENCODER) if (s->out_format == FMT_MJPEG) mjpeg_picture_trailer(s); +#endif if(s->flags&CODEC_FLAG_PASS1) ff_write_pass1_stats(s); -#endif for(i=0; i<4; i++){ avctx->error[i] += s->current_picture_ptr->error[i]; } - } - s->input_picture_number++; + flush_put_bits(&s->pb); + s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; + + stuffing_count= ff_vbv_update(s, s->frame_bits); + if(stuffing_count){ + switch(s->codec_id){ + case CODEC_ID_MPEG1VIDEO: + case CODEC_ID_MPEG2VIDEO: + while(stuffing_count--){ + put_bits(&s->pb, 8, 0); + } + break; + case CODEC_ID_MPEG4: + put_bits(&s->pb, 16, 0); + put_bits(&s->pb, 16, 0x1C3); + stuffing_count -= 4; + while(stuffing_count--){ + put_bits(&s->pb, 8, 0xFF); + } + break; + default: + av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n"); + } + flush_put_bits(&s->pb); + s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; + } - flush_put_bits(&s->pb); - s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8; - - s->total_bits += s->frame_bits; - avctx->frame_bits = s->frame_bits; + /* update mpeg1/2 vbv_delay for CBR */ + if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate){ + int vbv_delay; + + assert(s->repeat_first_field==0); + + vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate); + assert(vbv_delay < 0xFFFF); + + s->vbv_delay_ptr[0] &= 0xF8; + s->vbv_delay_ptr[0] |= vbv_delay>>13; + s->vbv_delay_ptr[1] = vbv_delay>>5; + s->vbv_delay_ptr[2] &= 0x07; + s->vbv_delay_ptr[2] |= vbv_delay<<3; + } + s->total_bits += s->frame_bits; + avctx->frame_bits = s->frame_bits; + }else{ + assert((pbBufPtr(&s->pb) == s->pb.buf)); + s->frame_bits=0; + } + assert((s->frame_bits&7)==0); - return pbBufPtr(&s->pb) - s->pb.buf; + return s->frame_bits/8; } #endif //CONFIG_ENCODERS @@ -1890,6 +2225,42 @@ void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, } } +static inline int hpel_motion(MpegEncContext *s, + uint8_t *dest, uint8_t *src, + int src_x, int src_y, + int width, int height, int stride, + int h_edge_pos, int v_edge_pos, + int w, int h, op_pixels_func *pix_op, + int motion_x, int motion_y) +{ + int dxy; + int emu=0; + + dxy = ((motion_y & 1) << 1) | (motion_x & 1); + src_x += motion_x >> 1; + src_y += motion_y >> 1; + + /* WARNING: do no forget half pels */ + src_x = clip(src_x, -16, width); //FIXME unneeded for emu? + if (src_x == width) + dxy &= ~1; + src_y = clip(src_y, -16, height); + if (src_y == height) + dxy &= ~2; + src += src_y * stride + src_x; + + if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){ + if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w + || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ + ff_emulated_edge_mc(s->edge_emu_buffer, src, stride, w+1, h+1, + src_x, src_y, h_edge_pos, v_edge_pos); + src= s->edge_emu_buffer; + emu=1; + } + } + pix_op[dxy](dest, src, stride, h); + return emu; +} /* apply one mpeg motion vector to the three components */ static inline void mpeg_motion(MpegEncContext *s, @@ -1900,7 +2271,7 @@ static inline void mpeg_motion(MpegEncContext *s, int motion_x, int motion_y, int h) { uint8_t *ptr; - int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize; + int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, uvlinesize; int emu=0; #if 0 if(s->quarter_sample) @@ -1909,34 +2280,19 @@ if(s->quarter_sample) motion_y>>=1; } #endif - dxy = ((motion_y & 1) << 1) | (motion_x & 1); - src_x = s->mb_x * 16 + (motion_x >> 1); - src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1); - - /* WARNING: do no forget half pels */ + height = s->height >> field_based; v_edge_pos = s->v_edge_pos >> field_based; - src_x = clip(src_x, -16, s->width); - if (src_x == s->width) - dxy &= ~1; - src_y = clip(src_y, -16, height); - if (src_y == height) - dxy &= ~2; - linesize = s->current_picture.linesize[0] << field_based; uvlinesize = s->current_picture.linesize[1] << field_based; - ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset; - dest_y += dest_offset; - if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){ - if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16 - || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below - src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos); - ptr= s->edge_emu_buffer + src_offset; - emu=1; - } - } - pix_op[0][dxy](dest_y, ptr, linesize, h); + emu= hpel_motion(s, + dest_y + dest_offset, ref_picture[0] + src_offset, + s->mb_x * 16, s->mb_y * (16 >> field_based), + s->width, height, s->current_picture.linesize[0] << field_based, + s->h_edge_pos, v_edge_pos, + 16, h, pix_op[0], + motion_x, motion_y); + if(s->flags&CODEC_FLAG_GRAY) return; @@ -1981,6 +2337,87 @@ if(s->quarter_sample) } pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1); } +//FIXME move to dsputil, avg variant, 16x16 version +static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){ + int x; + uint8_t * const top = src[1]; + uint8_t * const left = src[2]; + uint8_t * const mid = src[0]; + uint8_t * const right = src[3]; + uint8_t * const bottom= src[4]; +#define OBMC_FILTER(x, t, l, m, r, b)\ + dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3 +#define OBMC_FILTER4(x, t, l, m, r, b)\ + OBMC_FILTER(x , t, l, m, r, b);\ + OBMC_FILTER(x+1 , t, l, m, r, b);\ + OBMC_FILTER(x +stride, t, l, m, r, b);\ + OBMC_FILTER(x+1+stride, t, l, m, r, b); + + x=0; + OBMC_FILTER (x , 2, 2, 4, 0, 0); + OBMC_FILTER (x+1, 2, 1, 5, 0, 0); + OBMC_FILTER4(x+2, 2, 1, 5, 0, 0); + OBMC_FILTER4(x+4, 2, 0, 5, 1, 0); + OBMC_FILTER (x+6, 2, 0, 5, 1, 0); + OBMC_FILTER (x+7, 2, 0, 4, 2, 0); + x+= stride; + OBMC_FILTER (x , 1, 2, 5, 0, 0); + OBMC_FILTER (x+1, 1, 2, 5, 0, 0); + OBMC_FILTER (x+6, 1, 0, 5, 2, 0); + OBMC_FILTER (x+7, 1, 0, 5, 2, 0); + x+= stride; + OBMC_FILTER4(x , 1, 2, 5, 0, 0); + OBMC_FILTER4(x+2, 1, 1, 6, 0, 0); + OBMC_FILTER4(x+4, 1, 0, 6, 1, 0); + OBMC_FILTER4(x+6, 1, 0, 5, 2, 0); + x+= 2*stride; + OBMC_FILTER4(x , 0, 2, 5, 0, 1); + OBMC_FILTER4(x+2, 0, 1, 6, 0, 1); + OBMC_FILTER4(x+4, 0, 0, 6, 1, 1); + OBMC_FILTER4(x+6, 0, 0, 5, 2, 1); + x+= 2*stride; + OBMC_FILTER (x , 0, 2, 5, 0, 1); + OBMC_FILTER (x+1, 0, 2, 5, 0, 1); + OBMC_FILTER4(x+2, 0, 1, 5, 0, 2); + OBMC_FILTER4(x+4, 0, 0, 5, 1, 2); + OBMC_FILTER (x+6, 0, 0, 5, 2, 1); + OBMC_FILTER (x+7, 0, 0, 5, 2, 1); + x+= stride; + OBMC_FILTER (x , 0, 2, 4, 0, 2); + OBMC_FILTER (x+1, 0, 1, 5, 0, 2); + OBMC_FILTER (x+6, 0, 0, 5, 1, 2); + OBMC_FILTER (x+7, 0, 0, 4, 2, 2); +} + +/* obmc for 1 8x8 luma block */ +static inline void obmc_motion(MpegEncContext *s, + uint8_t *dest, uint8_t *src, + int src_x, int src_y, + op_pixels_func *pix_op, + int16_t mv[5][2]/* mid top left right bottom*/) +#define MID 0 +{ + int i; + uint8_t *ptr[5]; + + assert(s->quarter_sample==0); + + for(i=0; i<5; i++){ + if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){ + ptr[i]= ptr[MID]; + }else{ + ptr[i]= s->edge_emu_buffer + 16 + 8*(i&1) + s->linesize*8*(i>>1); + hpel_motion(s, ptr[i], src, + src_x, src_y, + s->width, s->height, s->linesize, + s->h_edge_pos, s->v_edge_pos, + 8, 8, pix_op, + mv[i][0], mv[i][1]); + } + } + + put_obmc(dest, ptr, s->linesize); +} static inline void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, @@ -2090,6 +2527,55 @@ inline int ff_h263_round_chroma(int x){ } /** + * h263 chorma 4mv motion compensation. + */ +static inline void chroma_4mv_motion(MpegEncContext *s, + uint8_t *dest_cb, uint8_t *dest_cr, + uint8_t **ref_picture, + op_pixels_func *pix_op, + int mx, int my){ + int dxy, emu=0, src_x, src_y, offset; + uint8_t *ptr; + + /* In case of 8X8, we construct a single chroma motion vector + with a special rounding */ + mx= ff_h263_round_chroma(mx); + my= ff_h263_round_chroma(my); + + dxy = ((my & 1) << 1) | (mx & 1); + mx >>= 1; + my >>= 1; + + src_x = s->mb_x * 8 + mx; + src_y = s->mb_y * 8 + my; + src_x = clip(src_x, -8, s->width/2); + if (src_x == s->width/2) + dxy &= ~1; + src_y = clip(src_y, -8, s->height/2); + if (src_y == s->height/2) + dxy &= ~2; + + offset = (src_y * (s->uvlinesize)) + src_x; + ptr = ref_picture[1] + offset; + if(s->flags&CODEC_FLAG_EMU_EDGE){ + if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8 + || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer; + emu=1; + } + } + pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); + + ptr = ref_picture[2] + offset; + if(emu){ + ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); + ptr= s->edge_emu_buffer; + } + pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); +} + +/** * motion compesation of a single macroblock * @param s context * @param dest_y luma destination pointer @@ -2106,14 +2592,74 @@ static inline void MPV_motion(MpegEncContext *s, int dir, uint8_t **ref_picture, op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16]) { - int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y; + int dxy, mx, my, src_x, src_y, motion_x, motion_y; int mb_x, mb_y, i; uint8_t *ptr, *dest; - int emu=0; mb_x = s->mb_x; mb_y = s->mb_y; + if(s->obmc && s->pict_type != B_TYPE){ + int16_t mv_cache[4][4][2]; + const int xy= s->mb_x + s->mb_y*s->mb_stride; + const int mot_stride= s->mb_width*2 + 2; + const int mot_xy= 1 + mb_x*2 + (mb_y*2 + 1)*mot_stride; + + assert(!s->mb_skiped); + + memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4); + memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); + memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4); + + if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){ + memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4); + }else{ + memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4); + } + + if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){ + *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1]; + *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1]; + }else{ + *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1]; + *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride]; + } + + if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){ + *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2]; + *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2]; + }else{ + *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2]; + *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride]; + } + + mx = 0; + my = 0; + for(i=0;i<4;i++) { + const int x= (i&1)+1; + const int y= (i>>1)+1; + int16_t mv[5][2]= { + {mv_cache[y][x ][0], mv_cache[y][x ][1]}, + {mv_cache[y-1][x][0], mv_cache[y-1][x][1]}, + {mv_cache[y][x-1][0], mv_cache[y][x-1][1]}, + {mv_cache[y][x+1][0], mv_cache[y][x+1][1]}, + {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}}; + //FIXME cleanup + obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, + ref_picture[0], + mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, + pix_op[1], + mv); + + mx += mv[0][0]; + my += mv[0][1]; + } + if(!(s->flags&CODEC_FLAG_GRAY)) + chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); + + return; + } + switch(s->mv_type) { case MV_TYPE_16X16: #ifdef CONFIG_RISKY @@ -2179,73 +2725,21 @@ static inline void MPV_motion(MpegEncContext *s, } }else{ for(i=0;i<4;i++) { - motion_x = s->mv[dir][i][0]; - motion_y = s->mv[dir][i][1]; - - dxy = ((motion_y & 1) << 1) | (motion_x & 1); - src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8; - src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8; - - /* WARNING: do no forget half pels */ - src_x = clip(src_x, -16, s->width); - if (src_x == s->width) - dxy &= ~1; - src_y = clip(src_y, -16, s->height); - if (src_y == s->height) - dxy &= ~2; - - ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 8 - || (unsigned)src_y > s->v_edge_pos - (motion_y&1) - 8){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); - ptr= s->edge_emu_buffer; - } - } - dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; - pix_op[1][dxy](dest, ptr, s->linesize, 8); + hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, + ref_picture[0], + mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8, + s->width, s->height, s->linesize, + s->h_edge_pos, s->v_edge_pos, + 8, 8, pix_op[1], + s->mv[dir][i][0], s->mv[dir][i][1]); mx += s->mv[dir][i][0]; my += s->mv[dir][i][1]; } } - if(s->flags&CODEC_FLAG_GRAY) break; - /* In case of 8X8, we construct a single chroma motion vector - with a special rounding */ - mx= ff_h263_round_chroma(mx); - my= ff_h263_round_chroma(my); - dxy = ((my & 1) << 1) | (mx & 1); - mx >>= 1; - my >>= 1; - - src_x = mb_x * 8 + mx; - src_y = mb_y * 8 + my; - src_x = clip(src_x, -8, s->width/2); - if (src_x == s->width/2) - dxy &= ~1; - src_y = clip(src_y, -8, s->height/2); - if (src_y == s->height/2) - dxy &= ~2; - - offset = (src_y * (s->uvlinesize)) + src_x; - ptr = ref_picture[1] + offset; - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8 - || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer; - emu=1; - } - } - pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8); - - ptr = ref_picture[2] + offset; - if(emu){ - ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1); - ptr= s->edge_emu_buffer; - } - pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8); + if(!(s->flags&CODEC_FLAG_GRAY)) + chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my); break; case MV_TYPE_FIELD: if (s->picture_structure == PICT_FRAME) { @@ -2385,16 +2879,16 @@ static inline void MPV_motion(MpegEncContext *s, } } break; - + default: assert(0); } } /* put block[] to dest[] */ static inline void put_dct(MpegEncContext *s, - DCTELEM *block, int i, uint8_t *dest, int line_size) + DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale) { - s->dct_unquantize(s, block, i, s->qscale); + s->dct_unquantize_intra(s, block, i, qscale); s->dsp.idct_put (dest, line_size, block); } @@ -2408,10 +2902,10 @@ static inline void add_dct(MpegEncContext *s, } static inline void add_dequant_dct(MpegEncContext *s, - DCTELEM *block, int i, uint8_t *dest, int line_size) + DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale) { if (s->block_last_index[i] >= 0) { - s->dct_unquantize(s, block, i, s->qscale); + s->dct_unquantize_inter(s, block, i, qscale); s->dsp.idct_add (dest, line_size, block); } @@ -2466,7 +2960,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; #ifdef HAVE_XVMC if(s->avctx->xvmc_acceleration){ - XVMC_decode_mb(s,block); + XVMC_decode_mb(s);//xvmc uses pblocks return; } #endif @@ -2497,6 +2991,7 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) qpel_mc_func (*op_qpix)[16]; const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; + const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band; /* avoid copy if macroblock skipped in last frame too */ /* skip only during decoding as we might trash the buffers during encoding a bit */ @@ -2532,11 +3027,15 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) dct_linesize = linesize; dct_offset = linesize * 8; } - - dest_y= s->dest[0]; - dest_cb= s->dest[1]; - dest_cr= s->dest[2]; - + if(readable){ + dest_y= s->dest[0]; + dest_cb= s->dest[1]; + dest_cr= s->dest[2]; + }else{ + dest_y = s->edge_emu_buffer+32; //FIXME cleanup scratchpad pointers + dest_cb= s->edge_emu_buffer+48; + dest_cr= s->edge_emu_buffer+56; + } if (!s->mb_intra) { /* motion handling */ /* decoding or more than one mb_type (MC was allready done otherwise) */ @@ -2565,14 +3064,14 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) /* add dct residue */ if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){ - add_dequant_dct(s, block[0], 0, dest_y, dct_linesize); - add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize); - add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); - add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); + add_dequant_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale); + add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale); + add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale); + add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale); if(!(s->flags&CODEC_FLAG_GRAY)){ - add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize); - add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize); + add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); + add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); } } else if(s->codec_id != CODEC_ID_WMV2){ add_dct(s, block[0], 0, dest_y, dct_linesize); @@ -2593,14 +3092,14 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) } else { /* dct only in intra block */ if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){ - put_dct(s, block[0], 0, dest_y, dct_linesize); - put_dct(s, block[1], 1, dest_y + 8, dct_linesize); - put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); - put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); + put_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale); + put_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale); + put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale); + put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale); if(!(s->flags&CODEC_FLAG_GRAY)){ - put_dct(s, block[4], 4, dest_cb, uvlinesize); - put_dct(s, block[5], 5, dest_cr, uvlinesize); + put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); + put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); } }else{ s->dsp.idct_put(dest_y , dct_linesize, block[0]); @@ -2614,6 +3113,11 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) } } } + if(!readable){ + s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); + s->dsp.put_pixels_tab[1][0](s->dest[1], dest_cb, uvlinesize, 8); + s->dsp.put_pixels_tab[1][0](s->dest[2], dest_cr, uvlinesize, 8); + } } } @@ -2673,6 +3177,7 @@ static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index int i; const int maxlevel= s->max_qcoeff; const int minlevel= s->min_qcoeff; + int overflow=0; if(s->mb_intra){ i=1; //skip clipping of intra dc @@ -2683,78 +3188,21 @@ static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index const int j= s->intra_scantable.permutated[i]; int level = block[j]; - if (level>maxlevel) level=maxlevel; - else if(level<minlevel) level=minlevel; - - block[j]= level; - } -} - -#if 0 -static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize - int score=0; - int x,y; - - for(y=0; y<7; y++){ - for(x=0; x<16; x+=4){ - score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride]) - +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]); - } - s+= stride; - } - - return score; -} - -static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize - int score=0; - int x,y; - - for(y=0; y<7; y++){ - for(x=0; x<16; x++){ - score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]); + if (level>maxlevel){ + level=maxlevel; + overflow++; + }else if(level<minlevel){ + level=minlevel; + overflow++; } - s1+= stride; - s2+= stride; - } - - return score; -} -#else -#define SQ(a) ((a)*(a)) - -static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize - int score=0; - int x,y; - - for(y=0; y<7; y++){ - for(x=0; x<16; x+=4){ - score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride]) - +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]); - } - s+= stride; - } - - return score; -} - -static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize - int score=0; - int x,y; - - for(y=0; y<7; y++){ - for(x=0; x<16; x++){ - score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]); - } - s1+= stride; - s2+= stride; + + block[j]= level; } - return score; + if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE) + av_log(s->avctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel); } -#endif - #endif //CONFIG_ENCODERS /** @@ -2840,20 +3288,21 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) s->lambda= s->lambda_table[mb_xy]; update_qscale(s); - s->dquant= s->qscale - last_qp; + + if(!(s->flags&CODEC_FLAG_QP_RD)){ + s->dquant= s->qscale - last_qp; - if(s->out_format==FMT_H263) - s->dquant= clip(s->dquant, -2, 2); //FIXME RD + if(s->out_format==FMT_H263) + s->dquant= clip(s->dquant, -2, 2); //FIXME RD - if(s->codec_id==CODEC_ID_MPEG4){ - if(!s->mb_intra){ - if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8) - s->dquant=0; + if(s->codec_id==CODEC_ID_MPEG4){ + if(!s->mb_intra){ + if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8) + s->dquant=0; + } } } - s->qscale= last_qp + s->dquant; - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; - s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + ff_set_qscale(s, last_qp + s->dquant); } if (s->mb_intra) { @@ -2872,17 +3321,21 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) if(s->flags&CODEC_FLAG_INTERLACED_DCT){ int progressive_score, interlaced_score; + + s->interlaced_dct=0; + progressive_score= s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y, 8) + +s->dsp.ildct_cmp[4](s, ptr + wrap_y*8, NULL, wrap_y, 8) - 400; + + if(progressive_score > 0){ + interlaced_score = s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y*2, 8) + +s->dsp.ildct_cmp[4](s, ptr + wrap_y , NULL, wrap_y*2, 8); + if(progressive_score > interlaced_score){ + s->interlaced_dct=1; - progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y ); - interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2); - - if(progressive_score > interlaced_score + 100){ - s->interlaced_dct=1; - - dct_offset= wrap_y; - wrap_y<<=1; - }else - s->interlaced_dct=0; + dct_offset= wrap_y; + wrap_y<<=1; + } + } } s->dsp.get_pixels(s->block[0], ptr , wrap_y); @@ -2951,19 +3404,24 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) if(s->flags&CODEC_FLAG_INTERLACED_DCT){ int progressive_score, interlaced_score; + + s->interlaced_dct=0; + progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8) + +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400; - progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y ) - + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y ); - interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2) - + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2); + if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400; + + if(progressive_score>0){ + interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8) + +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8); - if(progressive_score > interlaced_score + 600){ - s->interlaced_dct=1; + if(progressive_score > interlaced_score){ + s->interlaced_dct=1; - dct_offset= wrap_y; - wrap_y<<=1; - }else - s->interlaced_dct=0; + dct_offset= wrap_y; + wrap_y<<=1; + } + } } s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y); @@ -2989,12 +3447,12 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) /* pre quantization */ if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ //FIXME optimize - if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1; - if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1; - if(s->dsp.pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1; - if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1; - if(s->dsp.pix_abs8x8(ptr_cb , dest_cb , wrap_c) < 20*s->qscale) skip_dct[4]= 1; - if(s->dsp.pix_abs8x8(ptr_cr , dest_cr , wrap_c) < 20*s->qscale) skip_dct[5]= 1; + if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1; + if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1; + if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1; + if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; + if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; + if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; #if 0 { static int stat[7]; @@ -3056,6 +3514,19 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale; } + //non c quantize code returns incorrect block_last_index FIXME + if(s->alternate_scan && s->dct_quantize != dct_quantize_c){ + for(i=0; i<6; i++){ + int j; + if(s->block_last_index[i]>0){ + for(j=63; j>0; j--){ + if(s->block[i][ s->intra_scantable.permutated[j] ]) break; + } + s->block_last_index[i]= j; + } + } + } + /* huffman encode */ switch(s->codec_id){ //FIXME funct ptr could be slightly faster case CODEC_ID_MPEG1VIDEO: @@ -3149,12 +3620,15 @@ void ff_mpeg_flush(AVCodecContext *avctx){ int i; MpegEncContext *s = avctx->priv_data; + if(s==NULL || s->picture==NULL) + return; + for(i=0; i<MAX_PICTURE_COUNT; i++){ if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL || s->picture[i].type == FF_BUFFER_TYPE_USER)) avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } - s->last_picture_ptr = s->next_picture_ptr = NULL; + s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL; s->parse_context.state= -1; s->parse_context.frame_start_found= 0; @@ -3200,6 +3674,7 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext d->mb_skiped= 0; d->qscale= s->qscale; + d->dquant= s->dquant; } static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){ @@ -3297,9 +3772,9 @@ static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, in int x,y; if(w==16 && h==16) - return s->dsp.sse[0](NULL, src1, src2, stride); + return s->dsp.sse[0](NULL, src1, src2, stride, 16); else if(w==8 && h==8) - return s->dsp.sse[1](NULL, src1, src2, stride); + return s->dsp.sse[1](NULL, src1, src2, stride, 8); for(y=0; y<h; y++){ for(x=0; x<w; x++){ @@ -3320,9 +3795,9 @@ static int sse_mb(MpegEncContext *s){ if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; if(w==16 && h==16) - return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize) - +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize) - +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize); + return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) + +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) + +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); else return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) @@ -3332,7 +3807,7 @@ static int sse_mb(MpegEncContext *s){ static void encode_picture(MpegEncContext *s, int picture_number) { int mb_x, mb_y, pdif = 0; - int i; + int i, j; int bits; MpegEncContext best_s, backup_s; uint8_t bit_buf[2][3000]; @@ -3383,9 +3858,9 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->me.dia_size= s->avctx->pre_dia_size; for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) { + s->mb_y = mb_y; for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) { s->mb_x = mb_x; - s->mb_y = mb_y; ff_pre_estimate_p_frame_motion(s, mb_x, mb_y); } } @@ -3395,13 +3870,13 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->me.dia_size= s->avctx->dia_size; for(mb_y=0; mb_y < s->mb_height; mb_y++) { + s->mb_y = mb_y; s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1; s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2); for(mb_x=0; mb_x < s->mb_width; mb_x++) { s->mb_x = mb_x; - s->mb_y = mb_y; s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; @@ -3417,10 +3892,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) #endif }else /* if(s->pict_type == I_TYPE) */{ /* I-Frame */ - //FIXME do we need to zero them? - memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2); - memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2); - memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height); + for(i=0; i<s->mb_stride*s->mb_height; i++) + s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; if(!s->fixed_qscale){ /* finding spatial complexity for I-frame rate control */ @@ -3445,37 +3918,66 @@ static void encode_picture(MpegEncContext *s, int picture_number) if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){ s->pict_type= I_TYPE; - memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height); + for(i=0; i<s->mb_stride*s->mb_height; i++) + s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); } #if defined(CONFIG_ENCODERS) || !defined(XINE_MPEG_ENCODER) if(!s->umvplus){ if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) { - s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER); - + s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); + + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + int a,b; + a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select + b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I); + s->f_code= FFMAX(s->f_code, FFMAX(a,b)); + } + ff_fix_long_p_mvs(s); + ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0); + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + for(i=0; i<2; i++){ + for(j=0; j<2; j++) + ff_fix_long_mvs(s, s->p_field_select_table[i], j, + s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0); + } + } } if(s->pict_type==B_TYPE){ int a, b; - a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD); - b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR); + a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD); + b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR); s->f_code = FFMAX(a, b); - a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD); - b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR); + a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD); + b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR); s->b_code = FFMAX(a, b); - ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD); - ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD); - ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR); - ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR); + ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1); + ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1); + ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1); + ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1); + if(s->flags & CODEC_FLAG_INTERLACED_ME){ + int dir; + for(dir=0; dir<2; dir++){ + for(i=0; i<2; i++){ + for(j=0; j<2; j++){ + int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I) + : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I); + ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j, + s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1); + } + } + } + } } } #endif - + if (!s->fixed_qscale) s->current_picture.quality = ff_rate_estimate_qscale(s); @@ -3513,8 +4015,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); } - convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, - s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8); + convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, + s->intra_matrix, s->intra_quant_bias, 8, 8); } //FIXME var duplication @@ -3538,7 +4040,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) msmpeg4_encode_picture_header(s, picture_number); else if (s->h263_pred) mpeg4_encode_picture_header(s, picture_number); - else if (s->h263_rv10) + else if (s->codec_id == CODEC_ID_RV10) rv10_encode_picture_header(s, picture_number); else if (s->codec_id == CODEC_ID_FLV1) ff_flv_encode_picture_header(s, picture_number); @@ -3560,6 +4062,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) break; case FMT_H264: break; + default: + assert(0); } bits= get_bit_count(&s->pb); s->header_bits= bits - s->last_bits; @@ -3581,10 +4085,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->current_picture_ptr->error[i] = 0; } s->mb_skip_run = 0; - s->last_mv[0][0][0] = 0; - s->last_mv[0][0][1] = 0; - s->last_mv[1][0][0] = 0; - s->last_mv[1][0][1] = 0; + memset(s->last_mv, 0, sizeof(s->last_mv)); s->last_mv_dir = 0; @@ -3612,8 +4113,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mb_x=0; s->mb_y= mb_y; - s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; - s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; + ff_set_qscale(s, s->qscale); ff_init_block_index(s); for(mb_x=0; mb_x < s->mb_width; mb_x++) { @@ -3621,6 +4121,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) int mb_type= s->mb_type[xy]; // int d; int dmin= INT_MAX; + int dir; s->mb_x = mb_x; ff_update_block_index(s); @@ -3628,54 +4129,79 @@ static void encode_picture(MpegEncContext *s, int picture_number) /* write gob / video packet header */ #if defined(CONFIG_ENCODERS) || !defined(XINE_MPEG_ENCODER) #ifdef CONFIG_RISKY - if(s->rtp_mode && mb_y + mb_x>0){ + if(s->rtp_mode){ int current_packet_size, is_gob_start; current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; - is_gob_start=0; - if(s->codec_id==CODEC_ID_MPEG4){ - if(current_packet_size >= s->rtp_payload_size){ + is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0; + + switch(s->codec_id){ + case CODEC_ID_H263: + case CODEC_ID_H263P: + if(!s->h263_slice_structured) + if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0; + break; + case CODEC_ID_MPEG2VIDEO: + if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1; + case CODEC_ID_MPEG1VIDEO: + if(s->mb_skip_run) is_gob_start=0; + break; + } + + if(is_gob_start){ + if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){ + ff_mpeg4_merge_partitions(s); + ff_mpeg4_init_partitions(s); + } + + if(s->codec_id==CODEC_ID_MPEG4) + ff_mpeg4_stuffing(&s->pb); - if(s->partitioned_frame){ - ff_mpeg4_merge_partitions(s); - ff_mpeg4_init_partitions(s); - } - ff_mpeg4_encode_video_packet_header(s); + align_put_bits(&s->pb); + flush_put_bits(&s->pb); - if(s->flags&CODEC_FLAG_PASS1){ - int bits= get_bit_count(&s->pb); - s->misc_bits+= bits - s->last_bits; - s->last_bits= bits; + assert((get_bit_count(&s->pb)&7) == 0); + current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; + + if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){ + int r= get_bit_count(&s->pb)/8 + s->picture_number + s->codec_id + s->mb_x + s->mb_y; + int d= 100 / s->avctx->error_rate; + if(r % d == 0){ + current_packet_size=0; +#ifndef ALT_BITSTREAM_WRITER + s->pb.buf_ptr= s->ptr_lastgob; +#endif + assert(pbBufPtr(&s->pb) == s->ptr_lastgob); } - ff_mpeg4_clean_buffers(s); - is_gob_start=1; } - }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){ - if( current_packet_size >= s->rtp_payload_size - && s->mb_skip_run==0){ - ff_mpeg1_encode_slice_header(s); - ff_mpeg1_clean_buffers(s); - is_gob_start=1; - } - }else if(s->codec_id==CODEC_ID_MPEG2VIDEO){ - if( ( current_packet_size >= s->rtp_payload_size || mb_x==0) - && s->mb_skip_run==0){ + + if (s->avctx->rtp_callback) + s->avctx->rtp_callback(s->ptr_lastgob, current_packet_size, 0); + + switch(s->codec_id){ + case CODEC_ID_MPEG4: + ff_mpeg4_encode_video_packet_header(s); + ff_mpeg4_clean_buffers(s); + break; + case CODEC_ID_MPEG1VIDEO: + case CODEC_ID_MPEG2VIDEO: ff_mpeg1_encode_slice_header(s); ff_mpeg1_clean_buffers(s); - is_gob_start=1; - } - }else{ - if(current_packet_size >= s->rtp_payload_size - && s->mb_x==0 && s->mb_y%s->gob_index==0){ - + break; + case CODEC_ID_H263: + case CODEC_ID_H263P: h263_encode_gob_header(s, mb_y); - is_gob_start=1; + break; } - } - if(is_gob_start){ - s->ptr_lastgob = pbBufPtr(&s->pb); + if(s->flags&CODEC_FLAG_PASS1){ + int bits= get_bit_count(&s->pb); + s->misc_bits+= bits - s->last_bits; + s->last_bits= bits; + } + + s->ptr_lastgob += current_packet_size; s->first_slice_line=1; s->resync_mb_x=mb_x; s->resync_mb_y=mb_y; @@ -3690,8 +4216,9 @@ static void encode_picture(MpegEncContext *s, int picture_number) } s->mb_skiped=0; + s->dquant=0; //only for QP_RD - if(mb_type & (mb_type-1)){ // more than 1 MB type possible + if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible int next_block=0; int pb_bits_count, pb2_bits_count, tex_pb_bits_count; @@ -3704,54 +4231,66 @@ static void encode_picture(MpegEncContext *s, int picture_number) backup_s.tex_pb= s->tex_pb; } - if(mb_type&MB_TYPE_INTER){ + if(mb_type&CANDIDATE_MB_TYPE_INTER){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->p_mv_table[xy][0]; s->mv[0][0][1] = s->p_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } - if(mb_type&MB_TYPE_SKIPED){ + if(mb_type&CANDIDATE_MB_TYPE_INTER_I){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->p_field_select_table[i][xy]; + s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; + s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_SKIPED){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_SKIPED, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPED, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } - if(mb_type&MB_TYPE_INTER4V){ + if(mb_type&CANDIDATE_MB_TYPE_INTER4V){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ - s->mv[0][i][0] = s->motion_val[s->block_index[i]][0]; - s->mv[0][i][1] = s->motion_val[s->block_index[i]][1]; + s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; } - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } - if(mb_type&MB_TYPE_FORWARD){ + if(mb_type&CANDIDATE_MB_TYPE_FORWARD){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } - if(mb_type&MB_TYPE_BACKWARD){ + if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){ s->mv_dir = MV_DIR_BACKWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[1][0][0] = s->b_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]); } - if(mb_type&MB_TYPE_BIDIR){ + if(mb_type&CANDIDATE_MB_TYPE_BIDIR){ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; @@ -3759,10 +4298,10 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } - if(mb_type&MB_TYPE_DIRECT){ + if(mb_type&CANDIDATE_MB_TYPE_DIRECT){ int mx= s->b_direct_mv_table[xy][0]; int my= s->b_direct_mv_table[xy][1]; @@ -3773,16 +4312,54 @@ static void encode_picture(MpegEncContext *s, int picture_number) ff_mpeg4_set_direct_mv(s, mx, my); #endif #endif - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, &dmin, &next_block, mx, my); } - if(mb_type&MB_TYPE_INTRA){ + if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){ + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; + s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; + s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){ + s->mv_dir = MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; + s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; + s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){ + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(dir=0; dir<2; dir++){ + for(i=0; i<2; i++){ + j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; + s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; + s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; + } + } + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb, + &dmin, &next_block, 0, 0); + } + if(mb_type&CANDIDATE_MB_TYPE_INTRA){ s->mv_dir = 0; s->mv_type = MV_TYPE_16X16; s->mb_intra= 1; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; - encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); if(s->h263_pred || s->h263_aic){ if(best_s.mb_intra) @@ -3791,6 +4368,60 @@ static void encode_picture(MpegEncContext *s, int picture_number) ff_clean_intra_table_entries(s); //old mode? } } + + if(s->flags & CODEC_FLAG_QP_RD){ + if(best_s.mv_type==MV_TYPE_16X16 && !(best_s.mv_dir&MV_DIRECT)){ + const int last_qp= backup_s.qscale; + int dquant, dir, qp, dc[6]; + DCTELEM ac[6][16]; + const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0; + + assert(backup_s.dquant == 0); + + //FIXME intra + s->mv_dir= best_s.mv_dir; + s->mv_type = MV_TYPE_16X16; + s->mb_intra= best_s.mb_intra; + s->mv[0][0][0] = best_s.mv[0][0][0]; + s->mv[0][0][1] = best_s.mv[0][0][1]; + s->mv[1][0][0] = best_s.mv[1][0][0]; + s->mv[1][0][1] = best_s.mv[1][0][1]; + + dir= s->pict_type == B_TYPE ? 2 : 1; + if(last_qp + dir > s->avctx->qmax) dir= -dir; + for(dquant= dir; dquant<=2 && dquant>=-2; dquant += dir){ + qp= last_qp + dquant; + if(qp < s->avctx->qmin || qp > s->avctx->qmax) + break; + backup_s.dquant= dquant; + if(s->mb_intra){ + for(i=0; i<6; i++){ + dc[i]= s->dc_val[0][ s->block_index[i] ]; + memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16); + } + } + + encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb, + &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]); + if(best_s.qscale != qp){ + if(s->mb_intra){ + for(i=0; i<6; i++){ + s->dc_val[0][ s->block_index[i] ]= dc[i]; + memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16); + } + } + if(dir > 0 && dquant==dir){ + dquant= 0; + dir= -dir; + }else + break; + } + } + qp= best_s.qscale; + s->current_picture.qscale_table[xy]= qp; + } + } + copy_context_after_encode(s, &best_s, -1); pb_bits_count= get_bit_count(&s->pb); @@ -3826,117 +4457,44 @@ static void encode_picture(MpegEncContext *s, int picture_number) MPV_decode_mb(s, s->block); } else { int motion_x, motion_y; - int intra_score; - int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride]; - - if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all - /* get luma score */ - if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){ - intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it - }else{ - uint8_t *dest_y; - - int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME - mean*= 0x01010101; - - dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16; - - for(i=0; i<16; i++){ - *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean; - *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean; - *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean; - *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean; - } - - s->mb_intra=1; - intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize); - -/* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, - s->current_picture.mb_var[mb_x + mb_y*s->mb_stride], - s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/ - } - - /* get chroma score */ - if(s->avctx->mb_cmp&FF_CMP_CHROMA){ - int i; - - s->mb_intra=1; - for(i=1; i<3; i++){ - uint8_t *dest_c; - int mean; - - if(s->out_format == FMT_H263){ - mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;) - }else{ - mean= (s->last_dc[i] + 4)>>3; - } - dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; - - mean*= 0x01010101; - for(i=0; i<8; i++){ - *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean; - *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean; - } - - intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize); - } - } - - /* bias */ - switch(s->avctx->mb_cmp&0xFF){ - default: - case FF_CMP_SAD: - intra_score+= 32*s->qscale; - break; - case FF_CMP_SSE: - intra_score+= 24*s->qscale*s->qscale; - break; - case FF_CMP_SATD: - intra_score+= 96*s->qscale; - break; - case FF_CMP_DCT: - intra_score+= 48*s->qscale; - break; - case FF_CMP_BIT: - intra_score+= 16; - break; - case FF_CMP_PSNR: - case FF_CMP_RD: - intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7; - break; - } - - if(intra_score < inter_score) - mb_type= MB_TYPE_INTRA; - } - s->mv_type=MV_TYPE_16X16; // only one MB-Type possible switch(mb_type){ - case MB_TYPE_INTRA: + case CANDIDATE_MB_TYPE_INTRA: s->mv_dir = 0; s->mb_intra= 1; motion_x= s->mv[0][0][0] = 0; motion_y= s->mv[0][0][1] = 0; break; - case MB_TYPE_INTER: + case CANDIDATE_MB_TYPE_INTER: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1]; break; - case MB_TYPE_INTER4V: + case CANDIDATE_MB_TYPE_INTER_I: + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->p_field_select_table[i][xy]; + s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; + s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; + } + motion_x = motion_y = 0; + break; + case CANDIDATE_MB_TYPE_INTER4V: s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ - s->mv[0][i][0] = s->motion_val[s->block_index[i]][0]; - s->mv[0][i][1] = s->motion_val[s->block_index[i]][1]; + s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; + s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; } motion_x= motion_y= 0; break; - case MB_TYPE_DIRECT: + case CANDIDATE_MB_TYPE_DIRECT: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; s->mb_intra= 0; motion_x=s->b_direct_mv_table[xy][0]; @@ -3947,7 +4505,7 @@ static void encode_picture(MpegEncContext *s, int picture_number) #endif #endif break; - case MB_TYPE_BIDIR: + case CANDIDATE_MB_TYPE_BIDIR: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mb_intra= 0; motion_x=0; @@ -3957,22 +4515,57 @@ static void encode_picture(MpegEncContext *s, int picture_number) s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; break; - case MB_TYPE_BACKWARD: + case CANDIDATE_MB_TYPE_BACKWARD: s->mv_dir = MV_DIR_BACKWARD; s->mb_intra= 0; motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0]; motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1]; break; - case MB_TYPE_FORWARD: + case CANDIDATE_MB_TYPE_FORWARD: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; // printf(" %d %d ", motion_x, motion_y); break; + case CANDIDATE_MB_TYPE_FORWARD_I: + s->mv_dir = MV_DIR_FORWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; + s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; + s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; + } + motion_x=motion_y=0; + break; + case CANDIDATE_MB_TYPE_BACKWARD_I: + s->mv_dir = MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(i=0; i<2; i++){ + j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; + s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; + s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; + } + motion_x=motion_y=0; + break; + case CANDIDATE_MB_TYPE_BIDIR_I: + s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; + s->mv_type = MV_TYPE_FIELD; + s->mb_intra= 0; + for(dir=0; dir<2; dir++){ + for(i=0; i<2; i++){ + j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; + s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; + s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; + } + } + motion_x=motion_y=0; + break; default: motion_x=motion_y=0; //gcc warning fix - printf("illegal MB type\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n"); } encode_mb(s, motion_x, motion_y); @@ -4011,6 +4604,8 @@ static void encode_picture(MpegEncContext *s, int picture_number) s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, s->dest[2], w>>1, h>>1, s->uvlinesize); } + if(s->loop_filter) + ff_h263_loop_filter(s); //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb)); } } @@ -4029,50 +4624,73 @@ static void encode_picture(MpegEncContext *s, int picture_number) #endif #endif - //if (s->gob_number) - // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number); - /* Send the last GOB if RTP */ - if (s->rtp_mode) { + if (s->avctx->rtp_callback) { flush_put_bits(&s->pb); pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; /* Call the RTP callback to send the last GOB */ - if (s->rtp_callback) - s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number); - s->ptr_lastgob = pbBufPtr(&s->pb); - //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif); + s->avctx->rtp_callback(s->ptr_lastgob, pdif, 0); } } +#endif //CONFIG_ENCODERS + +static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){ + const int intra= s->mb_intra; + int i; + + s->dct_count[intra]++; + + for(i=0; i<64; i++){ + int level= block[i]; + + if(level){ + if(level>0){ + s->dct_error_sum[intra][i] += level; + level -= s->dct_offset[intra][i]; + if(level<0) level=0; + }else{ + s->dct_error_sum[intra][i] -= level; + level += s->dct_offset[intra][i]; + if(level>0) level=0; + } + block[i]= level; + } + } +} + +#if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) + static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow){ const int *qmat; const uint8_t *scantable= s->intra_scantable.scantable; + const uint8_t *perm_scantable= s->intra_scantable.permutated; int max=0; unsigned int threshold1, threshold2; int bias=0; int run_tab[65]; int level_tab[65]; int score_tab[65]; + int survivor[65]; + int survivor_count; int last_run=0; int last_level=0; int last_score= 0; - int last_i= 0; - int not_coded_score= 0; - int coeff[3][64]; + int last_i; + int coeff[2][64]; int coeff_count[64]; int qmul, qadd, start_i, last_non_zero, i, dc; const int esc_length= s->ac_esc_length; uint8_t * length; uint8_t * last_length; - int score_limit=0; - int left_limit= 0; const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6); - const int patch_table= s->out_format == FMT_MPEG1 && !s->mb_intra; s->dsp.fdct (block); - + + if(s->dct_error_sum) + s->denoise_dct(s, block); qmul= qscale*16; qadd= ((qscale-1)|1)*8; @@ -4106,37 +4724,45 @@ static int dct_quantize_trellis_c(MpegEncContext *s, length = s->inter_ac_vlc_length; last_length= s->inter_ac_vlc_last_length; } + last_i= start_i; threshold1= (1<<QMAT_SHIFT) - bias - 1; threshold2= (threshold1<<1); - for(i=start_i; i<64; i++) { + for(i=63; i>=start_i; i--) { const int j = scantable[i]; - const int k= i-start_i; - int level = block[j]; - level = level * qmat[j]; + int level = block[j] * qmat[j]; + + if(((unsigned)(level+threshold1))>threshold2){ + last_non_zero = i; + break; + } + } + + for(i=start_i; i<=last_non_zero; i++) { + const int j = scantable[i]; + int level = block[j] * qmat[j]; // if( bias+level >= (1<<(QMAT_SHIFT - 3)) // || bias-level >= (1<<(QMAT_SHIFT - 3))){ if(((unsigned)(level+threshold1))>threshold2){ if(level>0){ level= (bias + level)>>QMAT_SHIFT; - coeff[0][k]= level; - coeff[1][k]= level-1; + coeff[0][i]= level; + coeff[1][i]= level-1; // coeff[2][k]= level-2; }else{ level= (bias - level)>>QMAT_SHIFT; - coeff[0][k]= -level; - coeff[1][k]= -level+1; + coeff[0][i]= -level; + coeff[1][i]= -level+1; // coeff[2][k]= -level+2; } - coeff_count[k]= FFMIN(level, 2); - assert(coeff_count[k]); + coeff_count[i]= FFMIN(level, 2); + assert(coeff_count[i]); max |=level; - last_non_zero = i; }else{ - coeff[0][k]= (level>>31)|1; - coeff_count[k]= 1; + coeff[0][i]= (level>>31)|1; + coeff_count[i]= 1; } } @@ -4147,73 +4773,55 @@ static int dct_quantize_trellis_c(MpegEncContext *s, return last_non_zero; } - score_tab[0]= 0; + score_tab[start_i]= 0; + survivor[0]= start_i; + survivor_count= 1; - if(patch_table){ -// length[UNI_AC_ENC_INDEX(0, 63)]= -// length[UNI_AC_ENC_INDEX(0, 65)]= 2; - } - - for(i=0; i<=last_non_zero - start_i; i++){ - int level_index, run, j; - const int dct_coeff= block[ scantable[i + start_i] ]; + for(i=start_i; i<=last_non_zero; i++){ + int level_index, j; + const int dct_coeff= ABS(block[ scantable[i] ]); const int zero_distoration= dct_coeff*dct_coeff; int best_score=256*256*256*120; - - last_score += zero_distoration; - not_coded_score += zero_distoration; for(level_index=0; level_index < coeff_count[i]; level_index++){ int distoration; int level= coeff[level_index][i]; + const int alevel= ABS(level); int unquant_coeff; assert(level); if(s->out_format == FMT_H263){ - if(level>0){ - unquant_coeff= level*qmul + qadd; - }else{ - unquant_coeff= level*qmul - qadd; - } + unquant_coeff= alevel*qmul + qadd; }else{ //MPEG1 - j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize + j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize if(s->mb_intra){ - if (level < 0) { - unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3; - unquant_coeff = -((unquant_coeff - 1) | 1); - } else { - unquant_coeff = (int)( level * qscale * s->intra_matrix[j]) >> 3; + unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3; unquant_coeff = (unquant_coeff - 1) | 1; - } }else{ - if (level < 0) { - unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; - unquant_coeff = -((unquant_coeff - 1) | 1); - } else { - unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; + unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4; unquant_coeff = (unquant_coeff - 1) | 1; - } } unquant_coeff<<= 3; } - distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff); + distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration; level+=64; if((level&(~127)) == 0){ - for(run=0; run<=i - left_limit; run++){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda; score += score_tab[i-run]; if(score < best_score){ - best_score= - score_tab[i+1]= score; + best_score= score; run_tab[i+1]= run; level_tab[i+1]= level-64; } } if(s->out_format == FMT_H263){ - for(run=0; run<=i - left_limit; run++){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda; score += score_tab[i-run]; if(score < last_score){ @@ -4226,19 +4834,20 @@ static int dct_quantize_trellis_c(MpegEncContext *s, } }else{ distoration += esc_length*lambda; - for(run=0; run<=i - left_limit; run++){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; int score= distoration + score_tab[i-run]; if(score < best_score){ - best_score= - score_tab[i+1]= score; + best_score= score; run_tab[i+1]= run; level_tab[i+1]= level-64; } } if(s->out_format == FMT_H263){ - for(run=0; run<=i - left_limit; run++){ + for(j=survivor_count-1; j>=0; j--){ + int run= i - survivor[j]; int score= distoration + score_tab[i-run]; if(score < last_score){ last_score= score; @@ -4250,26 +4859,28 @@ static int dct_quantize_trellis_c(MpegEncContext *s, } } } - - for(j=left_limit; j<=i; j++){ - score_tab[j] += zero_distoration; - } - score_limit+= zero_distoration; - if(score_tab[i+1] < score_limit) - score_limit= score_tab[i+1]; + score_tab[i+1]= best_score; + //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level - while(score_tab[ left_limit ] > score_limit + lambda) left_limit++; - - if(patch_table){ -// length[UNI_AC_ENC_INDEX(0, 63)]= -// length[UNI_AC_ENC_INDEX(0, 65)]= 3; + if(last_non_zero <= 27){ + for(; survivor_count; survivor_count--){ + if(score_tab[ survivor[survivor_count-1] ] <= best_score) + break; + } + }else{ + for(; survivor_count; survivor_count--){ + if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda) + break; + } } + + survivor[ survivor_count++ ]= i+1; } if(s->out_format != FMT_H263){ last_score= 256*256*256*120; - for(i= left_limit; i<=last_non_zero - start_i + 1; i++){ + for(i= survivor[0]; i<=last_non_zero + 1; i++){ int score= score_tab[i]; if(i) score += lambda*2; //FIXME exacter? @@ -4282,10 +4893,10 @@ static int dct_quantize_trellis_c(MpegEncContext *s, } } - s->coded_score[n] = last_score - not_coded_score; + s->coded_score[n] = last_score; - dc= block[0]; - last_non_zero= last_i - 1 + start_i; + dc= ABS(block[0]); + last_non_zero= last_i - 1; memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM)); if(last_non_zero < start_i) @@ -4297,32 +4908,22 @@ static int dct_quantize_trellis_c(MpegEncContext *s, for(i=0; i<coeff_count[0]; i++){ int level= coeff[i][0]; - int unquant_coeff, score, distoration; + int alevel= ABS(level); + int unquant_coeff, score, distortion; if(s->out_format == FMT_H263){ - if(level>0){ - unquant_coeff= (level*qmul + qadd)>>3; - }else{ - unquant_coeff= (level*qmul - qadd)>>3; - } + unquant_coeff= (alevel*qmul + qadd)>>3; }else{ //MPEG1 - if (level < 0) { - unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4; - unquant_coeff = -((unquant_coeff - 1) | 1); - } else { - unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4; - unquant_coeff = (unquant_coeff - 1) | 1; - } + unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4; + unquant_coeff = (unquant_coeff - 1) | 1; } unquant_coeff = (unquant_coeff + 4) >> 3; unquant_coeff<<= 3 + 3; - distoration= (unquant_coeff - dc) * (unquant_coeff - dc); + distortion= (unquant_coeff - dc) * (unquant_coeff - dc); level+=64; - if((level&(~127)) == 0) - score= distoration + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda; - else - score= distoration + esc_length*lambda; + if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda; + else score= distortion + esc_length*lambda; if(score < best_score){ best_score= score; @@ -4337,15 +4938,12 @@ static int dct_quantize_trellis_c(MpegEncContext *s, i= last_i; assert(last_level); -//FIXME use permutated scantable - block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level; + + block[ perm_scantable[last_non_zero] ]= last_level; i -= last_run + 1; - for(;i>0 ; i -= run_tab[i] + 1){ - const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ]; - - block[j]= level_tab[i]; - assert(block[j]); + for(; i>start_i; i -= run_tab[i] + 1){ + block[ perm_scantable[i-1] ]= level_tab[i]; } return last_non_zero; @@ -4355,7 +4953,7 @@ static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow) { - int i, j, level, last_non_zero, q; + int i, j, level, last_non_zero, q, start_i; const int *qmat; const uint8_t *scantable= s->intra_scantable.scantable; int bias; @@ -4364,6 +4962,9 @@ static int dct_quantize_c(MpegEncContext *s, s->dsp.fdct (block); + if(s->dct_error_sum) + s->denoise_dct(s, block); + if (s->mb_intra) { if (!s->h263_aic) { if (n < 4) @@ -4377,23 +4978,32 @@ static int dct_quantize_c(MpegEncContext *s, /* note: block[0] is assumed to be positive */ block[0] = (block[0] + (q >> 1)) / q; - i = 1; + start_i = 1; last_non_zero = 0; qmat = s->q_intra_matrix[qscale]; bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); } else { - i = 0; + start_i = 0; last_non_zero = -1; qmat = s->q_inter_matrix[qscale]; bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); } threshold1= (1<<QMAT_SHIFT) - bias - 1; threshold2= (threshold1<<1); + for(i=63;i>=start_i;i--) { + j = scantable[i]; + level = block[j] * qmat[j]; - for(;i<64;i++) { + if(((unsigned)(level+threshold1))>threshold2){ + last_non_zero = i; + break; + }else{ + block[j]=0; + } + } + for(i=start_i; i<=last_non_zero; i++) { j = scantable[i]; - level = block[j]; - level = level * qmat[j]; + level = block[j] * qmat[j]; // if( bias+level >= (1<<QMAT_SHIFT) // || bias-level >= (1<<QMAT_SHIFT)){ @@ -4406,7 +5016,6 @@ static int dct_quantize_c(MpegEncContext *s, block[j]= -level; } max |=level; - last_non_zero = i; }else{ block[j]=0; } @@ -4422,7 +5031,7 @@ static int dct_quantize_c(MpegEncContext *s, #endif //CONFIG_ENCODERS -static void dct_unquantize_mpeg1_c(MpegEncContext *s, +static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; @@ -4430,62 +5039,60 @@ static void dct_unquantize_mpeg1_c(MpegEncContext *s, nCoeffs= s->block_last_index[n]; - if (s->mb_intra) { - if (n < 4) - block[0] = block[0] * s->y_dc_scale; - else - block[0] = block[0] * s->c_dc_scale; - /* XXX: only mpeg1 */ - quant_matrix = s->intra_matrix; - for(i=1;i<=nCoeffs;i++) { - int j= s->intra_scantable.permutated[i]; - level = block[j]; - if (level) { - if (level < 0) { - level = -level; - level = (int)(level * qscale * quant_matrix[j]) >> 3; - level = (level - 1) | 1; - level = -level; - } else { - level = (int)(level * qscale * quant_matrix[j]) >> 3; - level = (level - 1) | 1; - } -#ifdef PARANOID - if (level < -2048 || level > 2047) - fprintf(stderr, "unquant error %d %d\n", i, level); -#endif - block[j] = level; + if (n < 4) + block[0] = block[0] * s->y_dc_scale; + else + block[0] = block[0] * s->c_dc_scale; + /* XXX: only mpeg1 */ + quant_matrix = s->intra_matrix; + for(i=1;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; + level = block[j]; + if (level) { + if (level < 0) { + level = -level; + level = (int)(level * qscale * quant_matrix[j]) >> 3; + level = (level - 1) | 1; + level = -level; + } else { + level = (int)(level * qscale * quant_matrix[j]) >> 3; + level = (level - 1) | 1; } + block[j] = level; } - } else { - i = 0; - quant_matrix = s->inter_matrix; - for(;i<=nCoeffs;i++) { - int j= s->intra_scantable.permutated[i]; - level = block[j]; - if (level) { - if (level < 0) { - level = -level; - level = (((level << 1) + 1) * qscale * - ((int) (quant_matrix[j]))) >> 4; - level = (level - 1) | 1; - level = -level; - } else { - level = (((level << 1) + 1) * qscale * - ((int) (quant_matrix[j]))) >> 4; - level = (level - 1) | 1; - } -#ifdef PARANOID - if (level < -2048 || level > 2047) - fprintf(stderr, "unquant error %d %d\n", i, level); -#endif - block[j] = level; + } +} + +static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int i, level, nCoeffs; + const uint16_t *quant_matrix; + + nCoeffs= s->block_last_index[n]; + + quant_matrix = s->inter_matrix; + for(i=0; i<=nCoeffs; i++) { + int j= s->intra_scantable.permutated[i]; + level = block[j]; + if (level) { + if (level < 0) { + level = -level; + level = (((level << 1) + 1) * qscale * + ((int) (quant_matrix[j]))) >> 4; + level = (level - 1) | 1; + level = -level; + } else { + level = (((level << 1) + 1) * qscale * + ((int) (quant_matrix[j]))) >> 4; + level = (level - 1) | 1; } + block[j] = level; } } } -static void dct_unquantize_mpeg2_c(MpegEncContext *s, +static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; @@ -4494,61 +5101,96 @@ static void dct_unquantize_mpeg2_c(MpegEncContext *s, if(s->alternate_scan) nCoeffs= 63; else nCoeffs= s->block_last_index[n]; - if (s->mb_intra) { + if (n < 4) + block[0] = block[0] * s->y_dc_scale; + else + block[0] = block[0] * s->c_dc_scale; + quant_matrix = s->intra_matrix; + for(i=1;i<=nCoeffs;i++) { + int j= s->intra_scantable.permutated[i]; + level = block[j]; + if (level) { + if (level < 0) { + level = -level; + level = (int)(level * qscale * quant_matrix[j]) >> 3; + level = -level; + } else { + level = (int)(level * qscale * quant_matrix[j]) >> 3; + } + block[j] = level; + } + } +} + +static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int i, level, nCoeffs; + const uint16_t *quant_matrix; + int sum=-1; + + if(s->alternate_scan) nCoeffs= 63; + else nCoeffs= s->block_last_index[n]; + + quant_matrix = s->inter_matrix; + for(i=0; i<=nCoeffs; i++) { + int j= s->intra_scantable.permutated[i]; + level = block[j]; + if (level) { + if (level < 0) { + level = -level; + level = (((level << 1) + 1) * qscale * + ((int) (quant_matrix[j]))) >> 4; + level = -level; + } else { + level = (((level << 1) + 1) * qscale * + ((int) (quant_matrix[j]))) >> 4; + } + block[j] = level; + sum+=level; + } + } + block[63]^=sum&1; +} + +static void dct_unquantize_h263_intra_c(MpegEncContext *s, + DCTELEM *block, int n, int qscale) +{ + int i, level, qmul, qadd; + int nCoeffs; + + assert(s->block_last_index[n]>=0); + + qmul = qscale << 1; + + if (!s->h263_aic) { if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; - quant_matrix = s->intra_matrix; - for(i=1;i<=nCoeffs;i++) { - int j= s->intra_scantable.permutated[i]; - level = block[j]; - if (level) { - if (level < 0) { - level = -level; - level = (int)(level * qscale * quant_matrix[j]) >> 3; - level = -level; - } else { - level = (int)(level * qscale * quant_matrix[j]) >> 3; - } -#ifdef PARANOID - if (level < -2048 || level > 2047) - fprintf(stderr, "unquant error %d %d\n", i, level); -#endif - block[j] = level; - } - } - } else { - int sum=-1; - i = 0; - quant_matrix = s->inter_matrix; - for(;i<=nCoeffs;i++) { - int j= s->intra_scantable.permutated[i]; - level = block[j]; - if (level) { - if (level < 0) { - level = -level; - level = (((level << 1) + 1) * qscale * - ((int) (quant_matrix[j]))) >> 4; - level = -level; - } else { - level = (((level << 1) + 1) * qscale * - ((int) (quant_matrix[j]))) >> 4; - } -#ifdef PARANOID - if (level < -2048 || level > 2047) - fprintf(stderr, "unquant error %d %d\n", i, level); -#endif - block[j] = level; - sum+=level; + qadd = (qscale - 1) | 1; + }else{ + qadd = 0; + } + if(s->ac_pred) + nCoeffs=63; + else + nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; + + for(i=1; i<=nCoeffs; i++) { + level = block[i]; + if (level) { + if (level < 0) { + level = level * qmul - qadd; + } else { + level = level * qmul + qadd; } + block[i] = level; } - block[63]^=sum&1; } } - -static void dct_unquantize_h263_c(MpegEncContext *s, +static void dct_unquantize_h263_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, qmul, qadd; @@ -4559,22 +5201,9 @@ static void dct_unquantize_h263_c(MpegEncContext *s, qadd = (qscale - 1) | 1; qmul = qscale << 1; - if (s->mb_intra) { - if (!s->h263_aic) { - if (n < 4) - block[0] = block[0] * s->y_dc_scale; - else - block[0] = block[0] * s->c_dc_scale; - }else - qadd = 0; - i = 1; - nCoeffs= 63; //does not allways use zigzag table - } else { - i = 0; - nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - } + nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - for(;i<=nCoeffs;i++) { + for(i=0; i<=nCoeffs; i++) { level = block[i]; if (level) { if (level < 0) { @@ -4582,16 +5211,11 @@ static void dct_unquantize_h263_c(MpegEncContext *s, } else { level = level * qmul + qadd; } -#ifdef PARANOID - if (level < -2048 || level > 2047) - fprintf(stderr, "unquant error %d %d\n", i, level); -#endif block[i] = level; } } } - static const AVOption mpeg4_options[] = { AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000), @@ -4667,30 +5291,7 @@ static const AVOption mpeg4_options[] = }; #if defined(CONFIG_ENCODERS) || defined(XINE_MPEG_ENCODER) - -AVCodec mpeg1video_encoder = { - "mpeg1video", - CODEC_TYPE_VIDEO, - CODEC_ID_MPEG1VIDEO, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - -#if defined(CONFIG_ENCODERS) || !defined(XINE_MPEG_ENCODER) #ifdef CONFIG_RISKY - -AVCodec mpeg2video_encoder = { - "mpeg2video", - CODEC_TYPE_VIDEO, - CODEC_ID_MPEG2VIDEO, - sizeof(MpegEncContext), - MPV_encode_init, - MPV_encode_picture, - MPV_encode_end, -}; - AVCodec h263_encoder = { "h263", CODEC_TYPE_VIDEO, @@ -4798,7 +5399,4 @@ AVCodec mjpeg_encoder = { MPV_encode_end, }; -#endif - #endif //CONFIG_ENCODERS - diff --git a/src/libffmpeg/libavcodec/mpegvideo.h b/src/libffmpeg/libavcodec/mpegvideo.h index 30432c173..88e2f6d50 100644 --- a/src/libffmpeg/libavcodec/mpegvideo.h +++ b/src/libffmpeg/libavcodec/mpegvideo.h @@ -1,6 +1,7 @@ /* * Generic DCT based hybrid video encoder * Copyright (c) 2000, 2001, 2002 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -93,7 +94,7 @@ typedef struct RateControlContext{ FILE *stats_file; int num_entries; ///< number of RateControlEntries RateControlEntry *entry; - int buffer_index; ///< amount of bits in the video/audio buffer + double buffer_index; ///< amount of bits in the video/audio buffer Predictor pred[5]; double short_term_qsum; ///< sum of recent qscales double short_term_qcount; ///< count of recent qscales @@ -134,33 +135,10 @@ typedef struct Picture{ * halfpel luma planes. */ uint8_t *interpolated[3]; - - int16_t (*motion_val[2])[2]; + int16_t (*motion_val_base[2])[2]; int8_t *ref_index[2]; uint32_t *mb_type_base; - uint32_t *mb_type; ///< mb_type_base + mb_width + 2, note: only used for decoding currently -#define MB_TYPE_INTRA4x4 0x0001 -#define MB_TYPE_INTRA16x16 0x0002 //FIXME h264 specific -#define MB_TYPE_INTRA_PCM 0x0004 //FIXME h264 specific -#define MB_TYPE_16x16 0x0008 -#define MB_TYPE_16x8 0x0010 -#define MB_TYPE_8x16 0x0020 -#define MB_TYPE_8x8 0x0040 -#define MB_TYPE_INTERLACED 0x0080 -#define MB_TYPE_DIRECT2 0x0100 //FIXME -#define MB_TYPE_ACPRED 0x0200 -#define MB_TYPE_GMC 0x0400 //FIXME mpeg4 specific -#define MB_TYPE_SKIP 0x0800 -#define MB_TYPE_P0L0 0x1000 -#define MB_TYPE_P1L0 0x2000 -#define MB_TYPE_P0L1 0x4000 -#define MB_TYPE_P1L1 0x8000 -#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) -#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) -#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) -#define MB_TYPE_QUANT 0x00010000 -//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 pat, ...) - +#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type #define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4) #define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16) #define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM) @@ -183,7 +161,7 @@ typedef struct Picture{ #define IS_QUANT(a) ((a)&MB_TYPE_QUANT) #define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0<<((part)+2*(list)))) #define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note doesnt work if subMBs - +#define HAS_CBP(a) ((a)&MB_TYPE_CBP) int field_poc[2]; ///< h264 top/bottom POC int poc; ///< h264 frame POC @@ -196,7 +174,7 @@ typedef struct Picture{ uint16_t *mb_var; ///< Table for MB variances uint16_t *mc_mb_var; ///< Table for motion compensated MB variances uint8_t *mb_mean; ///< Table for MB luminance - int32_t *mb_cmp_score; ///< Table for MB cmp scores, for mb decission + int32_t *mb_cmp_score; ///< Table for MB cmp scores, for mb decission FIXME remove int b_frame_score; /* */ } Picture; @@ -230,23 +208,28 @@ typedef struct MotionEstContext{ int mb_penalty_factor; int pre_pass; ///< = 1 for the pre pass int dia_size; + int xmin; + int xmax; + int ymin; + int ymax; uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV int (*sub_motion_search)(struct MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, - int xmin, int ymin, int xmax, int ymax, - int pred_x, int pred_y, Picture *ref_picture, - int n, int size, uint8_t * const mv_penalty); - int (*motion_search[7])(struct MpegEncContext * s, int block, + int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[6], int stride, int uvstride, + int size, int h, uint8_t * const mv_penalty); + int (*motion_search[7])(struct MpegEncContext * s, int *mx_ptr, int *my_ptr, - int P[10][2], int pred_x, int pred_y, - int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2], + int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2], int ref_mv_scale, uint8_t * const mv_penalty); - int (*pre_motion_search)(struct MpegEncContext * s, int block, + int (*pre_motion_search)(struct MpegEncContext * s, int *mx_ptr, int *my_ptr, - int P[10][2], int pred_x, int pred_y, - int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2], + int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2], int ref_mv_scale, uint8_t * const mv_penalty); - int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, Picture *ref_picture, + int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, uint8_t *src_data[3], + uint8_t *ref_data[6], int stride, int uvstride, uint8_t * const mv_penalty); }MotionEstContext; @@ -260,26 +243,20 @@ typedef struct MpegEncContext { int gop_size; int intra_only; ///< if true, only intra pictures are generated int bit_rate; ///< wanted bit rate - int bit_rate_tolerance; ///< amount of +- bits (>0) enum OutputFormat out_format; ///< output format int h263_pred; ///< use mpeg4/h263 ac/dc predictions /* the following codec id fields are deprecated in favor of codec_id */ int h263_plus; ///< h263 plus headers - int h263_rv10; ///< use RV10 variation for H263 int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead) - int h263_intel; ///< use I263 intel h263 header int h263_flv; ///< use flv h263 header int codec_id; /* see CODEC_ID_xxx */ int fixed_qscale; ///< fixed qscale if non zero - float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) - float qblur; ///< amount of qscale smoothing over time (0.0-1.0) - int max_qdiff; ///< max qscale difference between frames int encoding; ///< true if we are encoding (vs decoding) int flags; ///< AVCodecContext.flags (HQ, MV4, ...) + int flags2; ///< AVCodecContext.flags2 int max_b_frames; ///< max number of b-frames for encoding - int b_frame_strategy; int luma_elim_threshold; int chroma_elim_threshold; int strict_std_compliance; ///< strictly follow the std (MPEG4, ...) @@ -291,19 +268,22 @@ typedef struct MpegEncContext { /* sequence parameters */ int context_initialized; - int input_picture_number; - int picture_number; + int input_picture_number; ///< used to set pic->display_picture_number, shouldnt be used for/by anything else + int coded_picture_number; ///< used to set pic->coded_picture_number, shouldnt be used for/by anything else + int picture_number; //FIXME remove, unclear definition int picture_in_gop_number; ///< 0-> first pic in gop, ... int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input int mb_width, mb_height; ///< number of MBs horizontally & vertically int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressng of left & top MBs withoutt sig11 + int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressng + int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressng int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replicateion) int mb_num; ///< number of MBs of a picture int linesize; ///< line size, in bytes, may be different from width int uvlinesize; ///< line size, for chroma in bytes, may be different from width - Picture picture[MAX_PICTURE_COUNT]; ///< main picture buffer - Picture *input_picture[MAX_PICTURE_COUNT]; ///< next pictures on display order for encoding - Picture *reordered_input_picture[MAX_PICTURE_COUNT]; ///< pointer to the next pictures in codedorder for encoding + Picture *picture; ///< main picture buffer + Picture **input_picture; ///< next pictures on display order for encoding + Picture **reordered_input_picture; ///< pointer to the next pictures in codedorder for encoding /** * copy of the previous picture structure. @@ -332,12 +312,14 @@ typedef struct MpegEncContext { Picture *last_picture_ptr; ///< pointer to the previous picture. Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred) Picture *current_picture_ptr; ///< pointer to the current picture + uint8_t *visualization_buffer[3]; //< temporary buffer vor MV visualization int last_dc[3]; ///< last DC values for MPEG1 int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous int16_t dc_cache[4*5]; int y_dc_scale, c_dc_scale; uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table + const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (h263) uint8_t *coded_block; ///< used for coded block pattern prediction (msmpeg4v3, wmv1) int16_t (*ac_val[3])[16]; ///< used for for mpeg4 AC prediction, all 3 arrays must be continuous int ac_pred; @@ -353,6 +335,7 @@ typedef struct MpegEncContext { uint8_t *edge_emu_buffer; ///< points into the middle of allocated_edge_emu_buffer int qscale; ///< QP + int chroma_qscale; ///< chroma QP int lambda; ///< lagrange multipler used in rate distortion int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT int *lambda_table; @@ -370,19 +353,24 @@ typedef struct MpegEncContext { DSPContext dsp; ///< pointers for accelerated dsp fucntions int f_code; ///< forward MV resolution int b_code; ///< backward MV resolution for B Frames (mpeg4) - int16_t (*motion_val)[2]; int16_t (*p_mv_table_base)[2]; int16_t (*b_forw_mv_table_base)[2]; int16_t (*b_back_mv_table_base)[2]; int16_t (*b_bidir_forw_mv_table_base)[2]; int16_t (*b_bidir_back_mv_table_base)[2]; int16_t (*b_direct_mv_table_base)[2]; + int16_t (*p_field_mv_table_base[2][2])[2]; + int16_t (*b_field_mv_table_base[2][2][2])[2]; int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding + int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced p-frame encoding + int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced b-frame encoding + uint8_t (*p_field_select_table[2]); + uint8_t (*b_field_select_table[2][2]); int me_method; ///< ME algorithm int scene_change_score; int mv_dir; @@ -417,17 +405,22 @@ typedef struct MpegEncContext { int mb_x, mb_y; int mb_skip_run; int mb_intra; - uint8_t *mb_type; ///< Table for MB type FIXME remove and use picture->mb_type -#define MB_TYPE_INTRA 0x01 -#define MB_TYPE_INTER 0x02 -#define MB_TYPE_INTER4V 0x04 -#define MB_TYPE_SKIPED 0x08 + uint16_t *mb_type; ///< Table for candidate MB types for encoding +#define CANDIDATE_MB_TYPE_INTRA 0x01 +#define CANDIDATE_MB_TYPE_INTER 0x02 +#define CANDIDATE_MB_TYPE_INTER4V 0x04 +#define CANDIDATE_MB_TYPE_SKIPED 0x08 //#define MB_TYPE_GMC 0x10 -#define MB_TYPE_DIRECT 0x10 -#define MB_TYPE_FORWARD 0x20 -#define MB_TYPE_BACKWARD 0x40 -#define MB_TYPE_BIDIR 0x80 +#define CANDIDATE_MB_TYPE_DIRECT 0x10 +#define CANDIDATE_MB_TYPE_FORWARD 0x20 +#define CANDIDATE_MB_TYPE_BACKWARD 0x40 +#define CANDIDATE_MB_TYPE_BIDIR 0x80 + +#define CANDIDATE_MB_TYPE_INTER_I 0x100 +#define CANDIDATE_MB_TYPE_FORWARD_I 0x200 +#define CANDIDATE_MB_TYPE_BACKWARD_I 0x400 +#define CANDIDATE_MB_TYPE_BIDIR_I 0x800 int block_index[6]; ///< index to current MB in block based arrays with edges int block_wrap[6]; @@ -457,24 +450,26 @@ typedef struct MpegEncContext { int coded_score[6]; /** precomputed matrix (combine qscale and DCT renorm) */ - int __align8 q_intra_matrix[32][64]; - int __align8 q_inter_matrix[32][64]; - /** identical to the above but for MMX & these are not permutated */ - uint16_t __align8 q_intra_matrix16[32][64]; - uint16_t __align8 q_inter_matrix16[32][64]; - uint16_t __align8 q_intra_matrix16_bias[32][64]; - uint16_t __align8 q_inter_matrix16_bias[32][64]; + int (*q_intra_matrix)[64]; + int (*q_inter_matrix)[64]; + /** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/ + uint16_t (*q_intra_matrix16)[2][64]; + uint16_t (*q_inter_matrix16)[2][64]; int block_last_index[6]; ///< last non zero coefficient in block /* scantables */ ScanTable __align8 intra_scantable; ScanTable intra_h_scantable; ScanTable intra_v_scantable; ScanTable inter_scantable; ///< if inter == intra then intra should be used to reduce tha cache usage + + /* noise reduction */ + int (*dct_error_sum)[64]; + int dct_count[2]; + uint16_t (*dct_offset)[64]; void *opaque; ///< private data for the user /* bit rate control */ - int I_frame_bits; //FIXME used in mpeg12 ... int64_t wanted_bits; int64_t total_bits; int frame_bits; ///< bits used for the current frame @@ -514,13 +509,17 @@ typedef struct MpegEncContext { ParseContext parse_context; /* H.263 specific */ - int gob_number; int gob_index; + int obmc; ///< overlapped block motion compensation /* H.263+ specific */ int umvplus; ///< == H263+ && unrestricted_mv int h263_aic; ///< Advanded INTRA Coding (AIC) - int h263_aic_dir; ///< AIC direction: 0 = left, 1 = top + int h263_aic_dir; ///< AIC direction: 0 = left, 1 = top + int h263_slice_structured; + int alt_inter_vlc; ///< alternative inter vlc + int modified_quant; + int loop_filter; /* mpeg4 specific */ int time_increment_resolution; @@ -570,8 +569,6 @@ typedef struct MpegEncContext { uint8_t *tex_pb_buffer; uint8_t *pb2_buffer; int mpeg_quant; - int16_t (*field_mv_table)[2][2]; ///< used for interlaced b frame decoding - int8_t (*field_select_table)[2]; ///< wtf, no really another table for interlaced b frames int t_frame; ///< time distance of first I -> B, used for interlaced b frames int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4 @@ -622,10 +619,10 @@ typedef struct MpegEncContext { GetBitContext gb; /* Mpeg1 specific */ - int fake_picture_number; ///< picture number at the bitstream frame rate int gop_picture_number; ///< index of the first picture of a GOP based on fake_pic_num & mpeg1 specific int last_mv_dir; ///< last mv_dir, used for b frame encoding int broken_link; ///< no_output_of_prior_pics_flag + uint8_t *vbv_delay_ptr; ///< pointer to vbv_delay in the bitstream /* MPEG2 specific - I wish I had not to support this mess. */ int progressive_sequence; @@ -650,32 +647,41 @@ typedef struct MpegEncContext { int interlaced_dct; int first_slice; int first_field; ///< is 1 for the first field of a field picture 0 otherwise - + /* RTP specific */ - /* These are explained on avcodec.h */ int rtp_mode; - int rtp_payload_size; - void (*rtp_callback)(void *data, int size, int packet_number); + uint8_t *ptr_lastgob; + int swap_uv;//vcr2 codec is mpeg2 varint with UV swaped + short * pblocks[12]; DCTELEM (*block)[64]; ///< points to one of the following blocks - DCTELEM blocks[2][6][64] __align8; // for HQ mode we need to keep the best block + DCTELEM (*blocks)[6][64]; // for HQ mode we need to keep the best block int (*decode_mb)(struct MpegEncContext *s, DCTELEM block[6][64]); // used by some codecs to avoid a switch() #define SLICE_OK 0 #define SLICE_ERROR -1 #define SLICE_END -2 ///<end marker found #define SLICE_NOEND -3 ///<no end marker or error found but mb count exceeded - void (*dct_unquantize_mpeg1)(struct MpegEncContext *s, + void (*dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); - void (*dct_unquantize_mpeg2)(struct MpegEncContext *s, + void (*dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); - void (*dct_unquantize_h263)(struct MpegEncContext *s, + void (*dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale); - void (*dct_unquantize)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both) + void (*dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, + DCTELEM *block/*align 16*/, int n, int qscale); + void (*dct_unquantize_h263_intra)(struct MpegEncContext *s, + DCTELEM *block/*align 16*/, int n, int qscale); + void (*dct_unquantize_h263_inter)(struct MpegEncContext *s, + DCTELEM *block/*align 16*/, int n, int qscale); + void (*dct_unquantize_intra)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both) + DCTELEM *block/*align 16*/, int n, int qscale); + void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (mpeg4 can use both) DCTELEM *block/*align 16*/, int n, int qscale); int (*dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow); int (*fast_dct_quantize)(struct MpegEncContext *s, DCTELEM *block/*align 16*/, int n, int qscale, int *overflow); + void (*denoise_dct)(struct MpegEncContext *s, DCTELEM *block); } MpegEncContext; @@ -716,8 +722,10 @@ void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, #define END_NOT_FOUND -100 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size); void ff_mpeg_flush(AVCodecContext *avctx); -void ff_print_debug_info(MpegEncContext *s, Picture *pict); +void ff_print_debug_info(MpegEncContext *s, AVFrame *pict); void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix); +int ff_find_unused_picture(MpegEncContext *s, int shared); +void ff_denoise_dct(MpegEncContext *s, DCTELEM *block); void ff_er_frame_start(MpegEncContext *s); void ff_er_frame_end(MpegEncContext *s); @@ -756,7 +764,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s, int mb_x, int mb_y); int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type); void ff_fix_long_p_mvs(MpegEncContext * s); -void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type); +void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select, + int16_t (*mv_table)[2], int f_code, int type, int truncate); void ff_init_me(MpegEncContext *s); int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y); @@ -805,8 +814,13 @@ static inline int get_rl_index(const RLTable *rl, int last, int run, int level) extern uint8_t ff_mpeg4_y_dc_scale_table[32]; extern uint8_t ff_mpeg4_c_dc_scale_table[32]; +extern uint8_t ff_aic_dc_scale_table[32]; extern const int16_t ff_mpeg4_default_intra_matrix[64]; extern const int16_t ff_mpeg4_default_non_intra_matrix[64]; +extern const uint8_t ff_h263_chroma_qscale_table[32]; +extern const uint8_t ff_h263_loop_filter_strength[32]; + + int ff_h263_decode_init(AVCodecContext *avctx); int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, @@ -820,7 +834,7 @@ void mpeg4_encode_mb(MpegEncContext *s, int motion_x, int motion_y); void h263_encode_picture_header(MpegEncContext *s, int picture_number); void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number); -int h263_encode_gob_header(MpegEncContext * s, int mb_line); +void h263_encode_gob_header(MpegEncContext * s, int mb_line); int16_t *h263_pred_motion(MpegEncContext * s, int block, int *px, int *py); void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n, @@ -833,12 +847,17 @@ int h263_decode_picture_header(MpegEncContext *s); int ff_h263_decode_gob_header(MpegEncContext *s); int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb); void ff_h263_update_motion_val(MpegEncContext * s); - +void ff_h263_loop_filter(MpegEncContext * s); +void ff_set_qscale(MpegEncContext * s, int qscale); +int ff_h263_decode_mba(MpegEncContext *s); +void ff_h263_encode_mba(MpegEncContext *s); int intel_h263_decode_picture_header(MpegEncContext *s); int flv_h263_decode_picture_header(MpegEncContext *s); int ff_h263_decode_mb(MpegEncContext *s, DCTELEM block[6][64]); +int ff_mpeg4_decode_mb(MpegEncContext *s, + DCTELEM block[6][64]); int h263_get_picture_format(int width, int height); void ff_mpeg4_encode_video_packet_header(MpegEncContext *s); void ff_mpeg4_clean_buffers(MpegEncContext *s); @@ -900,6 +919,7 @@ double ff_eval(char *s, double *const_value, const char **const_name, double (**func1)(void *, double), const char **func1_name, double (**func2)(void *, double, double), char **func2_name, void *opaque); +int ff_vbv_update(MpegEncContext *s, int frame_size); #endif /* AVCODEC_MPEGVIDEO_H */ diff --git a/src/libffmpeg/libavcodec/msmpeg4.c b/src/libffmpeg/libavcodec/msmpeg4.c index dd4ce862a..b7b88c38f 100644 --- a/src/libffmpeg/libavcodec/msmpeg4.c +++ b/src/libffmpeg/libavcodec/msmpeg4.c @@ -1,6 +1,7 @@ /* * MSMPEG4 backend for ffmpeg encoder and decoder * Copyright (c) 2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -64,10 +65,10 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr); static int msmpeg4_decode_motion(MpegEncContext * s, int *mx_ptr, int *my_ptr); +static void msmpeg4v2_encode_motion(MpegEncContext * s, int val); static void init_h263_dc_for_msmpeg4(void); static inline void msmpeg4_memsetw(short *tab, int val, int n); #ifdef CONFIG_ENCODERS -static void msmpeg4v2_encode_motion(MpegEncContext * s, int val); static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra); #endif //CONFIG_ENCODERS static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]); @@ -655,7 +656,6 @@ void msmpeg4_encode_mb(MpegEncContext * s, #endif //CONFIG_ENCODERS -#if 0 /* old ffmpeg msmpeg4v3 mode */ static void ff_old_msmpeg4_dc_scale(MpegEncContext * s) { @@ -670,7 +670,6 @@ static void ff_old_msmpeg4_dc_scale(MpegEncContext * s) s->c_dc_scale = (s->qscale + 13)>>1; } } -#endif static inline int msmpeg4v1_pred_dc(MpegEncContext * s, int n, int32_t **dc_val_ptr) @@ -1240,7 +1239,7 @@ return -1; int start_code, num; start_code = (get_bits(&s->gb, 16)<<16) | get_bits(&s->gb, 16); if(start_code!=0x00000100){ - fprintf(stderr, "invalid startcode\n"); + av_log(s->avctx, AV_LOG_ERROR, "invalid startcode\n"); return -1; } @@ -1250,7 +1249,7 @@ return -1; s->pict_type = get_bits(&s->gb, 2) + 1; if (s->pict_type != I_TYPE && s->pict_type != P_TYPE){ - fprintf(stderr, "invalid picture type\n"); + av_log(s->avctx, AV_LOG_ERROR, "invalid picture type\n"); return -1; } #if 0 @@ -1260,9 +1259,9 @@ return -1; if(!had_i) return -1; } #endif - s->qscale = get_bits(&s->gb, 5); + s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ - fprintf(stderr, "invalid qscale\n"); + av_log(s->avctx, AV_LOG_ERROR, "invalid qscale\n"); return -1; } @@ -1270,7 +1269,7 @@ return -1; code = get_bits(&s->gb, 5); if(s->msmpeg4_version==1){ if(code==0 || code>s->mb_height){ - fprintf(stderr, "invalid slice height %d\n", code); + av_log(s->avctx, AV_LOG_ERROR, "invalid slice height %d\n", code); return -1; } @@ -1278,7 +1277,7 @@ return -1; }else{ /* 0x17: one slice, 0x18: two slices, ... */ if (code < 0x17){ - fprintf(stderr, "error, slice code was %X\n", code); + av_log(s->avctx, AV_LOG_ERROR, "error, slice code was %X\n", code); return -1; } @@ -1316,7 +1315,7 @@ return -1; } s->no_rounding = 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO) - printf("qscale:%d rlc:%d rl:%d dc:%d mbrl:%d slice:%d \n", + av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d slice:%d \n", s->qscale, s->rl_chroma_table_index, s->rl_table_index, @@ -1364,7 +1363,7 @@ return -1; } if(s->avctx->debug&FF_DEBUG_PICT_INFO) - printf("skip:%d rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d \n", + av_log(s->avctx, AV_LOG_DEBUG, "skip:%d rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d \n", s->use_skip_mb_code, s->rl_table_index, s->rl_chroma_table_index, @@ -1412,11 +1411,11 @@ int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size) { s->flipflop_rounding= 0; if(s->msmpeg4_version != 2) - printf("ext header missing, %d left\n", left); + av_log(s->avctx, AV_LOG_ERROR, "ext header missing, %d left\n", left); } else { - fprintf(stderr, "I frame too long, ignoring ext header\n"); + av_log(s->avctx, AV_LOG_ERROR, "I frame too long, ignoring ext header\n"); } return 0; @@ -1429,7 +1428,6 @@ static inline void msmpeg4_memsetw(short *tab, int val, int n) tab[i] = val; } -#ifdef CONFIG_ENCODERS static void msmpeg4v2_encode_motion(MpegEncContext * s, int val) { int range, bit_size, sign, code, bits; @@ -1462,7 +1460,6 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val) } } } -#endif /* this is identical to h263 except that its range is multiplied by 2 */ static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code) @@ -1521,7 +1518,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) else code = get_vlc2(&s->gb, v1_inter_cbpc_vlc.table, V1_INTER_CBPC_VLC_BITS, 3); if(code<0 || code>7){ - fprintf(stderr, "cbpc %d invalid at %d %d\n", code, s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "cbpc %d invalid at %d %d\n", code, s->mb_x, s->mb_y); return -1; } @@ -1535,7 +1532,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) else cbp= get_vlc2(&s->gb, v1_intra_cbpc_vlc.table, V1_INTRA_CBPC_VLC_BITS, 1); if(cbp<0 || cbp>3){ - fprintf(stderr, "cbpc %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "cbpc %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y); return -1; } } @@ -1545,7 +1542,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) cbpy= get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1); if(cbpy<0){ - fprintf(stderr, "cbpy %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "cbpy %d invalid at %d %d\n", cbp, s->mb_x, s->mb_y); return -1; } @@ -1574,7 +1571,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { - fprintf(stderr,"\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); + av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } @@ -1666,7 +1663,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { - fprintf(stderr,"\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); + av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } @@ -1692,20 +1689,20 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, level = msmpeg4_decode_dc(s, n, &dc_pred_dir); if (level < 0){ - fprintf(stderr, "dc overflow- block: %d qscale: %d//\n", n, s->qscale); + av_log(s->avctx, AV_LOG_ERROR, "dc overflow- block: %d qscale: %d//\n", n, s->qscale); if(s->inter_intra_pred) level=0; else return -1; } if (n < 4) { rl = &rl_table[s->rl_table_index]; if(level > 256*s->y_dc_scale){ - fprintf(stderr, "dc overflow+ L qscale: %d//\n", s->qscale); + av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ L qscale: %d//\n", s->qscale); if(!s->inter_intra_pred) return -1; } } else { rl = &rl_table[3 + s->rl_chroma_table_index]; if(level > 256*s->c_dc_scale){ - fprintf(stderr, "dc overflow+ C qscale: %d//\n", s->qscale); + av_log(s->avctx, AV_LOG_ERROR, "dc overflow+ C qscale: %d//\n", s->qscale); if(!s->inter_intra_pred) return -1; } } @@ -1774,7 +1771,7 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, if(s->qscale<8){ ll= SHOW_UBITS(re, &s->gb, 3); SKIP_BITS(re, &s->gb, 3); if(ll==0){ - if(SHOW_UBITS(re, &s->gb, 1)) printf("cool a new vlc code ,contact the ffmpeg developers and upload the file\n"); + if(SHOW_UBITS(re, &s->gb, 1)) av_log(s->avctx, AV_LOG_ERROR, "cool a new vlc code ,contact the ffmpeg developers and upload the file\n"); SKIP_BITS(re, &s->gb, 1); ll=8; } @@ -1895,10 +1892,10 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block, if(i&(~63)){ const int left= s->gb.size_in_bits - get_bits_count(&s->gb); if(((i+192 == 64 && level/qmul==-1) || s->error_resilience<=1) && left>=0){ - fprintf(stderr, "ignoring overflow at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ignoring overflow at %d %d\n", s->mb_x, s->mb_y); break; }else{ - fprintf(stderr, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } } @@ -1944,7 +1941,7 @@ static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr) level = get_vlc2(&s->gb, dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } if (level < 0){ - fprintf(stderr, "illegal dc vlc\n"); + av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n"); return -1; } @@ -1991,7 +1988,7 @@ static int msmpeg4_decode_motion(MpegEncContext * s, code = get_vlc2(&s->gb, mv->vlc.table, MV_VLC_BITS, 2); if (code < 0){ - fprintf(stderr, "illegal MV code at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "illegal MV code at %d %d\n", s->mb_x, s->mb_y); return -1; } if (code == mv->n) { diff --git a/src/libffmpeg/libavcodec/msrle.c b/src/libffmpeg/libavcodec/msrle.c index 8943ef3cd..b318faa77 100644 --- a/src/libffmpeg/libavcodec/msrle.c +++ b/src/libffmpeg/libavcodec/msrle.c @@ -26,7 +26,7 @@ * The MS RLE decoder outputs PAL8 colorspace data. * * Note that this decoder expects the palette colors from the end of the - * BITMAPINFO header passed through extradata. + * BITMAPINFO header passed through palctrl. */ #include <stdio.h> @@ -41,22 +41,115 @@ typedef struct MsrleContext { AVCodecContext *avctx; AVFrame frame; - AVFrame prev_frame; unsigned char *buf; int size; - unsigned int palette[256]; } MsrleContext; #define FETCH_NEXT_STREAM_BYTE() \ if (stream_ptr >= s->size) \ { \ - printf(" MS RLE: stream ptr just went out of bounds (1)\n"); \ + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: stream ptr just went out of bounds (1)\n"); \ return; \ } \ stream_byte = s->buf[stream_ptr++]; +static void msrle_decode_pal4(MsrleContext *s) +{ + int stream_ptr = 0; + unsigned char rle_code; + unsigned char extra_byte, odd_pixel; + unsigned char stream_byte; + int pixel_ptr = 0; + int row_dec = s->frame.linesize[0]; + int row_ptr = (s->avctx->height - 1) * row_dec; + int frame_size = row_dec * s->avctx->height; + int i; + + /* make the palette available */ + memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); + if (s->avctx->palctrl->palette_changed) { + s->frame.palette_has_changed = 1; + s->avctx->palctrl->palette_changed = 0; + } + + while (row_ptr >= 0) { + FETCH_NEXT_STREAM_BYTE(); + rle_code = stream_byte; + if (rle_code == 0) { + /* fetch the next byte to see how to handle escape code */ + FETCH_NEXT_STREAM_BYTE(); + if (stream_byte == 0) { + /* line is done, goto the next one */ + row_ptr -= row_dec; + pixel_ptr = 0; + } else if (stream_byte == 1) { + /* decode is done */ + return; + } else if (stream_byte == 2) { + /* reposition frame decode coordinates */ + FETCH_NEXT_STREAM_BYTE(); + pixel_ptr += stream_byte; + FETCH_NEXT_STREAM_BYTE(); + row_ptr -= stream_byte * row_dec; + } else { + // copy pixels from encoded stream + odd_pixel = stream_byte & 1; + rle_code = (stream_byte + 1) / 2; + extra_byte = rle_code & 0x01; + if ((row_ptr + pixel_ptr + stream_byte > frame_size) || + (row_ptr < 0)) { + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (1)\n"); + return; + } + + for (i = 0; i < rle_code; i++) { + if (pixel_ptr >= s->avctx->width) + break; + FETCH_NEXT_STREAM_BYTE(); + s->frame.data[0][row_ptr + pixel_ptr] = stream_byte >> 4; + pixel_ptr++; + if (i + 1 == rle_code && odd_pixel) + break; + if (pixel_ptr >= s->avctx->width) + break; + s->frame.data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; + pixel_ptr++; + } + + // if the RLE code is odd, skip a byte in the stream + if (extra_byte) + stream_ptr++; + } + } else { + // decode a run of data + if ((row_ptr + pixel_ptr + stream_byte > frame_size) || + (row_ptr < 0)) { + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (1)\n"); + return; + } + FETCH_NEXT_STREAM_BYTE(); + for (i = 0; i < rle_code; i++) { + if (pixel_ptr >= s->avctx->width) + break; + if ((i & 1) == 0) + s->frame.data[0][row_ptr + pixel_ptr] = stream_byte >> 4; + else + s->frame.data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; + pixel_ptr++; + } + } + } + + /* one last sanity check on the way out */ + if (stream_ptr < s->size) + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: ended frame decode with bytes left over (%d < %d)\n", + stream_ptr, s->size); +} + + + static void msrle_decode_pal8(MsrleContext *s) { int stream_ptr = 0; @@ -68,6 +161,13 @@ static void msrle_decode_pal8(MsrleContext *s) int row_ptr = (s->avctx->height - 1) * row_dec; int frame_size = row_dec * s->avctx->height; + /* make the palette available */ + memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); + if (s->avctx->palctrl->palette_changed) { + s->frame.palette_has_changed = 1; + s->avctx->palctrl->palette_changed = 0; + } + while (row_ptr >= 0) { FETCH_NEXT_STREAM_BYTE(); rle_code = stream_byte; @@ -91,14 +191,14 @@ static void msrle_decode_pal8(MsrleContext *s) /* copy pixels from encoded stream */ if ((row_ptr + pixel_ptr + stream_byte > frame_size) || (row_ptr < 0)) { - printf(" MS RLE: frame ptr just went out of bounds (1)\n"); + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (1)\n"); return; } rle_code = stream_byte; extra_byte = stream_byte & 0x01; if (stream_ptr + rle_code + extra_byte > s->size) { - printf(" MS RLE: stream ptr just went out of bounds (2)\n"); + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: stream ptr just went out of bounds (2)\n"); return; } @@ -116,7 +216,7 @@ static void msrle_decode_pal8(MsrleContext *s) /* decode a run of data */ if ((row_ptr + pixel_ptr + stream_byte > frame_size) || (row_ptr < 0)) { - printf(" MS RLE: frame ptr just went out of bounds (2)\n"); + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (2)\n"); return; } @@ -129,35 +229,21 @@ static void msrle_decode_pal8(MsrleContext *s) } } - /* make the palette available */ - memcpy(s->frame.data[1], s->palette, 256 * 4); - /* one last sanity check on the way out */ if (stream_ptr < s->size) - printf(" MS RLE: ended frame decode with bytes left over (%d < %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " MS RLE: ended frame decode with bytes left over (%d < %d)\n", stream_ptr, s->size); } static int msrle_decode_init(AVCodecContext *avctx) { MsrleContext *s = (MsrleContext *)avctx->priv_data; - int i, j; - unsigned char *palette; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; avctx->has_b_frames = 0; - s->frame.data[0] = s->prev_frame.data[0] = NULL; - - /* convert palette */ - palette = (unsigned char *)s->avctx->extradata; - memset (s->palette, 0, 256 * 4); - for (i = 0, j = 0; i < s->avctx->extradata_size / 4; i++, j += 4) - s->palette[i] = - (palette[j + 2] << 16) | - (palette[j + 1] << 8) | - (palette[j + 0] << 0); + s->frame.data[0] = NULL; return 0; } @@ -168,25 +254,31 @@ static int msrle_decode_frame(AVCodecContext *avctx, { MsrleContext *s = (MsrleContext *)avctx->priv_data; + /* no supplementary picture */ + if (buf_size == 0) + return 0; + s->buf = buf; s->size = buf_size; - if (avctx->get_buffer(avctx, &s->frame)) { - printf (" MS RLE: get_buffer() failed\n"); + s->frame.reference = 1; + s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; + if (avctx->reget_buffer(avctx, &s->frame)) { + av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } - /* grossly inefficient, but...oh well */ - memcpy(s->frame.data[0], s->prev_frame.data[0], - s->frame.linesize[0] * s->avctx->height); - - msrle_decode_pal8(s); - - if (s->frame.data[0]) - avctx->release_buffer(avctx, &s->frame); - - /* shuffle frames */ - s->prev_frame = s->frame; + switch (avctx->bits_per_sample) { + case 8: + msrle_decode_pal8(s); + break; + case 4: + msrle_decode_pal4(s); + break; + default: + av_log(avctx, AV_LOG_ERROR, "Don't know how to decode depth %u.\n", + avctx->bits_per_sample); + } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; @@ -200,8 +292,8 @@ static int msrle_decode_end(AVCodecContext *avctx) MsrleContext *s = (MsrleContext *)avctx->priv_data; /* release the last frame */ - if (s->prev_frame.data[0]) - avctx->release_buffer(avctx, &s->prev_frame); + if (s->frame.data[0]) + avctx->release_buffer(avctx, &s->frame); return 0; } diff --git a/src/libffmpeg/libavcodec/msvideo1.c b/src/libffmpeg/libavcodec/msvideo1.c index 6e1d0a8d4..3190efb9e 100644 --- a/src/libffmpeg/libavcodec/msvideo1.c +++ b/src/libffmpeg/libavcodec/msvideo1.c @@ -25,8 +25,8 @@ * http://www.pcisys.net/~melanson/codecs/ * * This decoder outputs either PAL8 or RGB555 data, depending on the - * whether a RGB palette was passed through via extradata; if the extradata - * is present, then the data is PAL8; RGB555 otherwise. + * whether a RGB palette was passed through palctrl; + * if it's present, then the data is PAL8; RGB555 otherwise. */ #include <stdio.h> @@ -38,64 +38,37 @@ #include "avcodec.h" #include "dsputil.h" -#undef LE_16 - #define PALETTE_COUNT 256 #define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) #define CHECK_STREAM_PTR(n) \ if ((stream_ptr + n) > s->size ) { \ - printf (" MS Video-1 warning: stream_ptr out of bounds (%d >= %d)\n", \ + av_log(s->avctx, AV_LOG_ERROR, " MS Video-1 warning: stream_ptr out of bounds (%d >= %d)\n", \ stream_ptr + n, s->size); \ return; \ } -#define COPY_PREV_BLOCK() \ - pixel_ptr = block_ptr; \ - for (pixel_y = 0; pixel_y < 4; pixel_y++) { \ - for (pixel_x = 0; pixel_x < 4; pixel_x++, pixel_ptr++) \ - pixels[pixel_ptr] = prev_pixels[pixel_ptr]; \ - pixel_ptr -= row_dec; \ - } - typedef struct Msvideo1Context { AVCodecContext *avctx; DSPContext dsp; AVFrame frame; - AVFrame prev_frame; unsigned char *buf; int size; int mode_8bit; /* if it's not 8-bit, it's 16-bit */ - unsigned char palette[PALETTE_COUNT * 4]; } Msvideo1Context; static int msvideo1_decode_init(AVCodecContext *avctx) { Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data; - int i; - unsigned char r, g, b; - unsigned char *raw_palette; - unsigned int *palette32; s->avctx = avctx; - /* figure out the colorspace based on the presence of a palette in - * extradata */ - if (s->avctx->extradata_size) { + /* figure out the colorspace based on the presence of a palette */ + if (s->avctx->palctrl) { s->mode_8bit = 1; - /* load up the palette */ - palette32 = (unsigned int *)s->palette; - raw_palette = (unsigned char *)s->avctx->extradata; - for (i = 0; i < s->avctx->extradata_size / 4; i++) { - b = *raw_palette++; - g = *raw_palette++; - r = *raw_palette++; - raw_palette++; - palette32[i] = (r << 16) | (g << 8) | (b); - } avctx->pix_fmt = PIX_FMT_PAL8; } else { s->mode_8bit = 0; @@ -105,7 +78,7 @@ static int msvideo1_decode_init(AVCodecContext *avctx) avctx->has_b_frames = 0; dsputil_init(&s->dsp, avctx); - s->frame.data[0] = s->prev_frame.data[0] = NULL; + s->frame.data[0] = NULL; return 0; } @@ -127,7 +100,6 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) int skip_blocks; unsigned char colors[8]; unsigned char *pixels = s->frame.data[0]; - unsigned char *prev_pixels = s->prev_frame.data[0]; int stride = s->frame.linesize[0]; stream_ptr = 0; @@ -143,7 +115,6 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) for (block_x = blocks_wide; block_x > 0; block_x--) { /* check if this block should be skipped */ if (skip_blocks) { - COPY_PREV_BLOCK(); block_ptr += block_inc; skip_blocks--; total_blocks--; @@ -163,7 +134,6 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) else if ((byte_b & 0xFC) == 0x84) { /* skip code, but don't count the current block */ skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1; - COPY_PREV_BLOCK(); } else if (byte_b < 0x80) { /* 2-color encoding */ flags = (byte_b << 8) | byte_a; @@ -209,8 +179,13 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) } /* make the palette available on the way out */ - if (s->avctx->pix_fmt == PIX_FMT_PAL8) - memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4); + if (s->avctx->pix_fmt == PIX_FMT_PAL8) { + memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); + if (s->avctx->palctrl->palette_changed) { + s->frame.palette_has_changed = 1; + s->avctx->palctrl->palette_changed = 0; + } + } } static void msvideo1_decode_16bit(Msvideo1Context *s) @@ -230,7 +205,6 @@ static void msvideo1_decode_16bit(Msvideo1Context *s) int skip_blocks; unsigned short colors[8]; unsigned short *pixels = (unsigned short *)s->frame.data[0]; - unsigned short *prev_pixels = (unsigned short *)s->prev_frame.data[0]; int stride = s->frame.linesize[0] / 2; stream_ptr = 0; @@ -246,7 +220,6 @@ static void msvideo1_decode_16bit(Msvideo1Context *s) for (block_x = blocks_wide; block_x > 0; block_x--) { /* check if this block should be skipped */ if (skip_blocks) { - COPY_PREV_BLOCK(); block_ptr += block_inc; skip_blocks--; total_blocks--; @@ -266,7 +239,6 @@ static void msvideo1_decode_16bit(Msvideo1Context *s) } else if ((byte_b & 0xFC) == 0x84) { /* skip code, but don't count the current block */ skip_blocks = ((byte_b - 0x84) << 8) + byte_a - 1; - COPY_PREV_BLOCK(); } else if (byte_b < 0x80) { /* 2- or 8-color encoding modes */ flags = (byte_b << 8) | byte_a; @@ -331,11 +303,17 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, { Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data; + /* no supplementary picture */ + if (buf_size == 0) + return 0; + s->buf = buf; s->size = buf_size; - if (avctx->get_buffer(avctx, &s->frame)) { - printf (" MS Video-1 Video: get_buffer() failed\n"); + s->frame.reference = 1; + s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; + if (avctx->reget_buffer(avctx, &s->frame)) { + av_log(s->avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } @@ -344,12 +322,6 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, else msvideo1_decode_16bit(s); - if (s->prev_frame.data[0]) - avctx->release_buffer(avctx, &s->prev_frame); - - /* shuffle frames */ - s->prev_frame = s->frame; - *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; @@ -361,8 +333,8 @@ static int msvideo1_decode_end(AVCodecContext *avctx) { Msvideo1Context *s = (Msvideo1Context *)avctx->priv_data; - if (s->prev_frame.data[0]) - avctx->release_buffer(avctx, &s->prev_frame); + if (s->frame.data[0]) + avctx->release_buffer(avctx, &s->frame); return 0; } diff --git a/src/libffmpeg/libavcodec/opts.c b/src/libffmpeg/libavcodec/opts.c index 2ce459d75..6f8d0c4fd 100644 --- a/src/libffmpeg/libavcodec/opts.c +++ b/src/libffmpeg/libavcodec/opts.c @@ -89,7 +89,7 @@ static int parse_double(const AVOption *c, char *s, double *var) d = atof(s); if (c->min != c->max) { if (d < c->min || d > c->max) { - fprintf(stderr, "Option: %s double value: %f out of range <%f, %f>\n", + av_log(NULL, AV_LOG_ERROR, "Option: %s double value: %f out of range <%f, %f>\n", c->name, d, c->min, c->max); return -1; } @@ -106,7 +106,7 @@ static int parse_int(const AVOption* c, char* s, int* var) i = atoi(s); if (c->min != c->max) { if (i < (int)c->min || i > (int)c->max) { - fprintf(stderr, "Option: %s integer value: %d out of range <%d, %d>\n", + av_log(NULL, AV_LOG_ERROR, "Option: %s integer value: %d out of range <%d, %d>\n", c->name, i, (int)c->min, (int)c->max); return -1; } @@ -136,7 +136,7 @@ static int parse_string(const AVOption *c, char *s, void* strct, char **var) //printf("parsed Rc: %d,%d,%d,%f (%d)\n", sf,ef,qs,qf, avctx->rc_override_count); } else { - printf("incorrect/unparsable Rc: \"%s\"\n", s); + av_log(NULL, AV_LOG_ERROR, "incorrect/unparsable Rc: \"%s\"\n", s); } } else *var = av_strdup(s); diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c index 635480784..d1a294365 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c @@ -45,7 +45,7 @@ static void sigill_handler (int sig) } #endif /* CONFIG_DARWIN */ -int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) +int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -57,7 +57,7 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) s = 0; sad = (vector unsigned int)vec_splat_u32(0); - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] @@ -92,7 +92,7 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) +int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -118,7 +118,7 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) tv = (vector unsigned char *) &pix2[0]; pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0])); - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] @@ -152,7 +152,7 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) +int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -194,7 +194,7 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) t1 = vec_add(pix2hv, pix2ihv); t2 = vec_add(pix2lv, pix2ilv); - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] @@ -253,7 +253,7 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) +int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -266,7 +266,7 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) sad = (vector unsigned int)vec_splat_u32(0); - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; @@ -295,7 +295,7 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) return s; } -int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) +int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -309,7 +309,7 @@ int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size) permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0); - for(i=0;i<8;i++) { + for(i=0;i<h;i++) { /* Read potentially unaligned pixels into t1 and t2 Since we're reading 16 pixels, and actually only want 8, mask out the last 8 pixels. The 0s don't change the sum. */ @@ -374,9 +374,9 @@ int pix_norm1_altivec(uint8_t *pix, int line_size) /** * Sum of Squared Errors for a 8x8 block. * AltiVec-enhanced. - * It's the pix_abs8x8_altivec code above w/ squaring added. + * It's the sad8_altivec code above w/ squaring added. */ -int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size) +int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -391,7 +391,7 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size) permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0); - for(i=0;i<8;i++) { + for(i=0;i<h;i++) { /* Read potentially unaligned pixels into t1 and t2 Since we're reading 16 pixels, and actually only want 8, mask out the last 8 pixels. The 0s don't change the sum. */ @@ -430,9 +430,9 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size) /** * Sum of Squared Errors for a 16x16 block. * AltiVec-enhanced. - * It's the pix_abs16x16_altivec code above w/ squaring added. + * It's the sad16_altivec code above w/ squaring added. */ -int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size) +int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s __attribute__((aligned(16))); @@ -444,7 +444,7 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size) sum = (vector unsigned int)vec_splat_u32(0); - for(i=0;i<16;i++) { + for(i=0;i<h;i++) { /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; @@ -609,14 +609,6 @@ void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1, } } -int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride) { - return pix_abs16x16_altivec(a,b,stride); -} - -int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride) { - return pix_abs8x8_altivec(a,b,stride); -} - void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) { #ifdef ALTIVEC_USE_REFERENCE_C_CODE int i; diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h index f04496d62..93448a1ad 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h @@ -24,16 +24,14 @@ #ifdef HAVE_ALTIVEC -extern int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size); -extern int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size); -extern int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size); -extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size); -extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size); -extern int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride); -extern int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride); +extern int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +extern int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +extern int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +extern int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +extern int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); extern int pix_norm1_altivec(uint8_t *pix, int line_size); -extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size); -extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size); +extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); +extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); extern int pix_sum_altivec(uint8_t * pix, int line_size); extern void diff_pixels_altivec(DCTELEM* block, const uint8_t* s1, const uint8_t* s2, int stride); extern void get_pixels_altivec(DCTELEM* block, const uint8_t * pixels, int line_size); diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c index 7af2aa002..9882e401f 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c @@ -25,6 +25,7 @@ #include "dsputil_altivec.h" #endif +extern void fdct_altivec(int16_t *block); extern void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block); extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block); @@ -48,6 +49,7 @@ static unsigned char* perfname[] = { "fft_calc_altivec", "gmc1_altivec", "dct_unquantize_h263_altivec", + "fdct_altivec", "idct_add_altivec", "idct_put_altivec", "put_pixels16_altivec", @@ -238,13 +240,13 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) mm_flags |= MM_ALTIVEC; // Altivec specific optimisations - c->pix_abs16x16_x2 = pix_abs16x16_x2_altivec; - c->pix_abs16x16_y2 = pix_abs16x16_y2_altivec; - c->pix_abs16x16_xy2 = pix_abs16x16_xy2_altivec; - c->pix_abs16x16 = pix_abs16x16_altivec; - c->pix_abs8x8 = pix_abs8x8_altivec; - c->sad[0]= sad16x16_altivec; - c->sad[1]= sad8x8_altivec; + c->pix_abs[0][1] = sad16_x2_altivec; + c->pix_abs[0][2] = sad16_y2_altivec; + c->pix_abs[0][3] = sad16_xy2_altivec; + c->pix_abs[0][0] = sad16_altivec; + c->pix_abs[1][0] = sad8_altivec; + c->sad[0]= sad16_altivec; + c->sad[1]= sad8_altivec; c->pix_norm1 = pix_norm1_altivec; c->sse[1]= sse8_altivec; c->sse[0]= sse16_altivec; @@ -270,6 +272,14 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) c->gmc1 = gmc1_altivec; +#ifdef CONFIG_ENCODERS + if (avctx->dct_algo == FF_DCT_AUTO || + avctx->dct_algo == FF_DCT_ALTIVEC) + { + c->fdct = fdct_altivec; + } +#endif //CONFIG_ENCODERS + if ((avctx->idct_algo == FF_IDCT_AUTO) || (avctx->idct_algo == FF_IDCT_ALTIVEC)) { diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h index 4cb299dd9..d672edfcb 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h @@ -40,6 +40,7 @@ enum powerpc_perf_index { altivec_fft_num = 0, altivec_gmc1_num, altivec_dct_unquantize_h263_num, + altivec_fdct, altivec_idct_add_num, altivec_idct_put_num, altivec_put_pixels16_num, diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c index 18e86dce9..a83a0aa5c 100644 --- a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c @@ -72,7 +72,8 @@ void MPV_common_init_ppc(MpegEncContext *s) (s->avctx->dct_algo == FF_DCT_ALTIVEC))
{
s->dct_quantize = dct_quantize_altivec;
- s->dct_unquantize_h263 = dct_unquantize_h263_altivec; + s->dct_unquantize_h263_intra = dct_unquantize_h263_altivec; + s->dct_unquantize_h263_inter = dct_unquantize_h263_altivec; }
} else
#endif
diff --git a/src/libffmpeg/libavcodec/ra144.c b/src/libffmpeg/libavcodec/ra144.c index fbc145af1..2d882f744 100644 --- a/src/libffmpeg/libavcodec/ra144.c +++ b/src/libffmpeg/libavcodec/ra144.c @@ -495,7 +495,7 @@ static int ra144_decode_frame(AVCodecContext * avctx, temp=glob->swapbuf2alt; glob->swapbuf2alt=glob->swapbuf2; glob->swapbuf2=temp; - *data_size=(char *)data-(char *)datao; + *data_size=data-datao; return 20; } diff --git a/src/libffmpeg/libavcodec/ra288.c b/src/libffmpeg/libavcodec/ra288.c index 979be2e7c..a44eb96f1 100644 --- a/src/libffmpeg/libavcodec/ra288.c +++ b/src/libffmpeg/libavcodec/ra288.c @@ -206,7 +206,7 @@ static void prodsum(float *tgt, float *src, int len, int n) } } -static void * decode_block(AVCodecContext * avctx, unsigned char *in, signed short int *out,unsigned len) +void * decode_block(AVCodecContext * avctx, unsigned char *in, signed short int *out,unsigned len) { int x,y; Real288_internal *glob=avctx->priv_data; @@ -244,7 +244,7 @@ static int ra288_decode_frame(AVCodecContext * avctx, int i,j; if(buf_size<w*h) { - fprintf(stderr,"ffra288: Error! Input buffer is too small [%d<%d]\n",buf_size,w*h); + av_log(avctx, AV_LOG_ERROR, "ffra288: Error! Input buffer is too small [%d<%d]\n",buf_size,w*h); return 0; } datao = data; @@ -255,12 +255,12 @@ static int ra288_decode_frame(AVCodecContext * avctx, data=decode_block(avctx,&buf[j*cfs+cfs*i*h/2],(signed short *)data,cfs); bret += cfs; } - *data_size = (char *)data - (char *)datao; + *data_size = data - datao; return bret; } else { - fprintf(stderr,"ffra288: Error: need extra data!!!\n"); + av_log(avctx, AV_LOG_ERROR, "ffra288: Error: need extra data!!!\n"); return 0; } } diff --git a/src/libffmpeg/libavcodec/ratecontrol.c b/src/libffmpeg/libavcodec/ratecontrol.c index a786ac215..6c90b1b6c 100644 --- a/src/libffmpeg/libavcodec/ratecontrol.c +++ b/src/libffmpeg/libavcodec/ratecontrol.c @@ -1,7 +1,7 @@ /* * Rate control for video encoders * - * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -39,7 +39,7 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f void ff_write_pass1_stats(MpegEncContext *s){ sprintf(s->avctx->stats_out, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d;\n", - s->picture_number, s->input_picture_number - s->max_b_frames, s->pict_type, + s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type, s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits, s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count); } @@ -62,7 +62,7 @@ int ff_rate_control_init(MpegEncContext *s) rcc->frame_count[i]= 1; // 1 is better cuz of 1/0 and such rcc->last_qscale_for[i]=FF_QP2LAMBDA * 5; } - rcc->buffer_index= s->avctx->rc_buffer_size/2; + rcc->buffer_index= s->avctx->rc_initial_buffer_occupancy; if(s->flags&CODEC_FLAG_PASS2){ int i; @@ -109,7 +109,7 @@ int ff_rate_control_init(MpegEncContext *s) &rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits, &rce->mv_bits, &rce->misc_bits, &rce->f_code, &rce->b_code, &rce->mc_mb_var_sum, &rce->mb_var_sum, &rce->i_count); if(e!=12){ - fprintf(stderr, "statistics are damaged at line %d, parser out=%d\n", i, e); + av_log(s->avctx, AV_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e); return -1; } p= next; @@ -183,40 +183,52 @@ void ff_rate_control_uninit(MpegEncContext *s) static inline double qp2bits(RateControlEntry *rce, double qp){ if(qp<=0.0){ - fprintf(stderr, "qp<=0.0\n"); + av_log(NULL, AV_LOG_ERROR, "qp<=0.0\n"); } return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ qp; } static inline double bits2qp(RateControlEntry *rce, double bits){ if(bits<0.9){ - fprintf(stderr, "bits<0.9\n"); + av_log(NULL, AV_LOG_ERROR, "bits<0.9\n"); } return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ bits; } -static void update_rc_buffer(MpegEncContext *s, int frame_size){ +int ff_vbv_update(MpegEncContext *s, int frame_size){ RateControlContext *rcc= &s->rc_context; const double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; - const double buffer_size= s->avctx->rc_buffer_size; + const int buffer_size= s->avctx->rc_buffer_size; const double min_rate= s->avctx->rc_min_rate/fps; const double max_rate= s->avctx->rc_max_rate/fps; - + +//printf("%d %f %d %f %f\n", buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate); if(buffer_size){ + int left; + rcc->buffer_index-= frame_size; - if(rcc->buffer_index < buffer_size/2 /*FIXME /2 */ || min_rate==0){ - rcc->buffer_index+= max_rate; - if(rcc->buffer_index >= buffer_size) - rcc->buffer_index= buffer_size-1; - }else{ - rcc->buffer_index+= min_rate; + if(rcc->buffer_index < 0){ + av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n"); + rcc->buffer_index= 0; + } + + left= buffer_size - rcc->buffer_index - 1; + rcc->buffer_index += clip(left, min_rate, max_rate); + + if(rcc->buffer_index > buffer_size){ + int stuffing= ceil((rcc->buffer_index - buffer_size)/8); + + if(stuffing < 4 && s->codec_id == CODEC_ID_MPEG4) + stuffing=4; + rcc->buffer_index -= 8*stuffing; + + if(s->avctx->debug & FF_DEBUG_RC) + av_log(s->avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing); + + return stuffing; } - - if(rcc->buffer_index < 0) - fprintf(stderr, "rc buffer underflow\n"); - if(rcc->buffer_index >= s->avctx->rc_buffer_size) - fprintf(stderr, "rc buffer overflow\n"); } + return 0; } /** @@ -224,6 +236,7 @@ static void update_rc_buffer(MpegEncContext *s, int frame_size){ */ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){ RateControlContext *rcc= &s->rc_context; + AVCodecContext *a= s->avctx; double q, bits; const int pict_type= rce->new_pict_type; const double mb_num= s->mb_num; @@ -244,7 +257,7 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f rce->pict_type == P_TYPE, rce->pict_type == B_TYPE, rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type], - s->qcompress, + a->qcompress, /* rcc->last_qscale_for[I_TYPE], rcc->last_qscale_for[P_TYPE], rcc->last_qscale_for[B_TYPE], @@ -385,8 +398,9 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, double bits; const int pict_type= rce->new_pict_type; const double buffer_size= s->avctx->rc_buffer_size; - const double min_rate= s->avctx->rc_min_rate; - const double max_rate= s->avctx->rc_max_rate; + const double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; + const double min_rate= s->avctx->rc_min_rate / fps; + const double max_rate= s->avctx->rc_max_rate / fps; get_qminmax(&qmin, &qmax, s, pict_type); @@ -399,6 +413,7 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, /* buffer overflow/underflow protection */ if(buffer_size){ double expected_size= rcc->buffer_index; + double q_limit; if(min_rate){ double d= 2*(buffer_size - expected_size)/buffer_size; @@ -406,7 +421,13 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, else if(d<0.0001) d=0.0001; q*= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); - q= FFMIN(q, bits2qp(rce, FFMAX((min_rate - buffer_size + rcc->buffer_index)*2, 1))); + q_limit= bits2qp(rce, FFMAX((min_rate - buffer_size + rcc->buffer_index)*3, 1)); + if(q > q_limit){ + if(s->avctx->debug&FF_DEBUG_RC){ + av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f\n", q, q_limit); + } + q= q_limit; + } } if(max_rate){ @@ -415,7 +436,13 @@ static double modify_qscale(MpegEncContext *s, RateControlEntry *rce, double q, else if(d<0.0001) d=0.0001; q/= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); - q= FFMAX(q, bits2qp(rce, FFMAX(rcc->buffer_index/2, 1))); + q_limit= bits2qp(rce, FFMAX(rcc->buffer_index/3, 1)); + if(q < q_limit){ + if(s->avctx->debug&FF_DEBUG_RC){ + av_log(s->avctx, AV_LOG_DEBUG, "limiting QP %f -> %f\n", q, q_limit); + } + q= q_limit; + } } } //printf("q:%f max:%f min:%f size:%f index:%d bits:%f agr:%f\n", q,max_rate, min_rate, buffer_size, rcc->buffer_index, bits, s->avctx->rc_buffer_aggressivity); @@ -493,7 +520,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){ if(spat_cplx < 4) spat_cplx= 4; //FIXME finetune if(temp_cplx < 4) temp_cplx= 4; //FIXME finetune - if((s->mb_type[mb_xy]&MB_TYPE_INTRA)){//FIXME hq mode + if((s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTRA)){//FIXME hq mode cplx= spat_cplx; factor= 1.0 + p_masking; }else{ @@ -564,6 +591,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) int picture_number= s->picture_number; int64_t wanted_bits; RateControlContext *rcc= &s->rc_context; + AVCodecContext *a= s->avctx; RateControlEntry local_rce, *rce; double bits; double rate_factor; @@ -593,7 +621,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) } diff= s->total_bits - wanted_bits; - br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance; + br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance; if(br_compensation<=0.0) br_compensation=0.001; var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum; @@ -615,9 +643,6 @@ float ff_rate_estimate_qscale(MpegEncContext *s) rce->b_code = s->b_code; rce->misc_bits= 1; - if(picture_number>0) - update_rc_buffer(s, s->frame_bits); - bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var)); if(pict_type== I_TYPE){ rce->i_count = s->mb_num; @@ -648,8 +673,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s) assert(q>0.0); if(pict_type==P_TYPE || s->intra_only){ //FIXME type dependant blur like in 2-pass - rcc->short_term_qsum*=s->qblur; - rcc->short_term_qcount*=s->qblur; + rcc->short_term_qsum*=a->qblur; + rcc->short_term_qcount*=a->qblur; rcc->short_term_qsum+= q; rcc->short_term_qcount++; @@ -667,7 +692,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) } if(s->avctx->debug&FF_DEBUG_RC){ - printf("%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f size:%d var:%d/%d br:%d fps:%d\n", + av_log(s->avctx, AV_LOG_DEBUG, "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f size:%d var:%d/%d br:%d fps:%d\n", av_get_pict_type_char(pict_type), qmin, q, qmax, picture_number, (int)wanted_bits/1000, (int)s->total_bits/1000, br_compensation, short_term_q, s->frame_bits, pic->mb_var_sum, pic->mc_mb_var_sum, s->bit_rate/1000, (int)fps ); @@ -701,6 +726,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s) static int init_pass2(MpegEncContext *s) { RateControlContext *rcc= &s->rc_context; + AVCodecContext *a= s->avctx; int i; double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1 @@ -712,7 +738,7 @@ static int init_pass2(MpegEncContext *s) double rate_factor=0; double step; //int last_i_frame=-10000000; - const int filter_size= (int)(s->qblur*4) | 1; + const int filter_size= (int)(a->qblur*4) | 1; double expected_bits; double *qscale, *blured_qscale; @@ -732,7 +758,7 @@ static int init_pass2(MpegEncContext *s) all_const_bits= const_bits[I_TYPE] + const_bits[P_TYPE] + const_bits[B_TYPE]; if(all_available_bits < all_const_bits){ - fprintf(stderr, "requested bitrate is to low\n"); + av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is to low\n"); return -1; } @@ -793,7 +819,7 @@ static int init_pass2(MpegEncContext *s) for(j=0; j<filter_size; j++){ int index= i+j-filter_size/2; double d= index-i; - double coeff= s->qblur==0 ? 1.0 : exp(-d*d/(s->qblur * s->qblur)); + double coeff= a->qblur==0 ? 1.0 : exp(-d*d/(a->qblur * a->qblur)); if(index < 0 || index >= rcc->num_entries) continue; if(pict_type != rcc->entry[index].new_pict_type) continue; @@ -810,7 +836,7 @@ static int init_pass2(MpegEncContext *s) rce->new_qscale= modify_qscale(s, rce, blured_qscale[i], i); bits= qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits; //printf("%d %f\n", rce->new_bits, blured_qscale[i]); - update_rc_buffer(s, bits); + bits += 8*ff_vbv_update(s, bits); rce->expected_bits= expected_bits; expected_bits += bits; @@ -823,7 +849,7 @@ static int init_pass2(MpegEncContext *s) av_free(blured_qscale); if(abs(expected_bits/all_available_bits - 1.0) > 0.01 ){ - fprintf(stderr, "Error: 2pass curve failed to converge\n"); + av_log(s->avctx, AV_LOG_ERROR, "Error: 2pass curve failed to converge\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/rational.c b/src/libffmpeg/libavcodec/rational.c index 92db8bbe3..ad085653a 100644 --- a/src/libffmpeg/libavcodec/rational.c +++ b/src/libffmpeg/libavcodec/rational.c @@ -51,6 +51,10 @@ AVRational av_sub_q(AVRational b, AVRational c){ return b; } +/** + * Converts a double precission floating point number to a AVRational. + * @param max the maximum allowed numerator and denominator + */ AVRational av_d2q(double d, int max){ AVRational a; int exponent= FFMAX( (int)(log(ABS(d) + 1e-20)/log(2)), 0); diff --git a/src/libffmpeg/libavcodec/roqvideo.c b/src/libffmpeg/libavcodec/roqvideo.c index f1cb52805..bd26961dc 100644 --- a/src/libffmpeg/libavcodec/roqvideo.c +++ b/src/libffmpeg/libavcodec/roqvideo.c @@ -411,7 +411,7 @@ static void roqvideo_decode_frame(RoqContext *ri) } break; default: - printf("Unknown vq code: %d\n", vqid); + av_log(ri->avctx, AV_LOG_ERROR, "Unknown vq code: %d\n", vqid); } } @@ -453,7 +453,7 @@ static int roq_decode_frame(AVCodecContext *avctx, *data_size = 0; if (avctx->get_buffer(avctx, &s->current_frame)) { - printf (" RoQ: get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, " RoQ: get_buffer() failed\n"); return -1; } s->y_stride = s->current_frame.linesize[0]; diff --git a/src/libffmpeg/libavcodec/rpza.c b/src/libffmpeg/libavcodec/rpza.c index e002c14dd..965fb729b 100644 --- a/src/libffmpeg/libavcodec/rpza.c +++ b/src/libffmpeg/libavcodec/rpza.c @@ -47,16 +47,12 @@ typedef struct RpzaContext { AVCodecContext *avctx; DSPContext dsp; AVFrame frame; - AVFrame prev_frame; unsigned char *buf; int size; } RpzaContext; -#undef BE_16 -#undef BE_32 - #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) #define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ (((uint8_t*)(x))[1] << 16) | \ @@ -74,7 +70,7 @@ typedef struct RpzaContext { total_blocks--; \ if (total_blocks < 0) \ { \ - printf("warning: block counter just went negative (this should not happen)\n"); \ + av_log(s->avctx, AV_LOG_ERROR, "warning: block counter just went negative (this should not happen)\n"); \ return; \ } \ } @@ -93,7 +89,6 @@ static void rpza_decode_stream(RpzaContext *s) unsigned char index, idx; unsigned short ta, tb; unsigned short *pixels = (unsigned short *)s->frame.data[0]; - unsigned short *prev_pixels = (unsigned short *)s->prev_frame.data[0]; int row_ptr = 0; int pixel_ptr = 0; @@ -103,7 +98,7 @@ static void rpza_decode_stream(RpzaContext *s) /* First byte is always 0xe1. Warn if it's different */ if (s->buf[stream_ptr] != 0xe1) - printf("First chunk byte is 0x%02x instead of 0x1e\n", + av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0x1e\n", s->buf[stream_ptr]); /* Get chunk size, ingnoring first byte */ @@ -112,7 +107,7 @@ static void rpza_decode_stream(RpzaContext *s) /* If length mismatch use size from MOV file and try to decode anyway */ if (chunk_size != s->size) - printf("MOV chunk size != encoded chunk size; using MOV chunk size\n"); + av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n"); chunk_size = s->size; @@ -143,15 +138,7 @@ static void rpza_decode_stream(RpzaContext *s) /* Skip blocks */ case 0x80: while (n_blocks--) { - block_ptr = row_ptr + pixel_ptr; - for (pixel_y = 0; pixel_y < 4; pixel_y++) { - for (pixel_x = 0; pixel_x < 4; pixel_x++){ - pixels[block_ptr] = prev_pixels[block_ptr]; - block_ptr++; - } - block_ptr += row_inc; - } - ADVANCE_BLOCK(); + ADVANCE_BLOCK(); } break; @@ -239,7 +226,7 @@ static void rpza_decode_stream(RpzaContext *s) /* Unknown opcode */ default: - printf("Unknown opcode %d in rpza chunk." + av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk." " Skip remaining %d bytes of chunk data.\n", opcode, chunk_size - stream_ptr); return; @@ -256,7 +243,7 @@ static int rpza_decode_init(AVCodecContext *avctx) avctx->has_b_frames = 0; dsputil_init(&s->dsp, avctx); - s->frame.data[0] = s->prev_frame.data[0] = NULL; + s->frame.data[0] = NULL; return 0; } @@ -267,23 +254,22 @@ static int rpza_decode_frame(AVCodecContext *avctx, { RpzaContext *s = (RpzaContext *)avctx->priv_data; + /* no supplementary picture */ + if (buf_size == 0) + return 0; + s->buf = buf; s->size = buf_size; s->frame.reference = 1; - if (avctx->get_buffer(avctx, &s->frame)) { - printf (" RPZA Video: get_buffer() failed\n"); + s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; + if (avctx->reget_buffer(avctx, &s->frame)) { + av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } rpza_decode_stream(s); - if (s->prev_frame.data[0]) - avctx->release_buffer(avctx, &s->prev_frame); - - /* shuffle frames */ - s->prev_frame = s->frame; - *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; @@ -295,8 +281,8 @@ static int rpza_decode_end(AVCodecContext *avctx) { RpzaContext *s = (RpzaContext *)avctx->priv_data; - if (s->prev_frame.data[0]) - avctx->release_buffer(avctx, &s->prev_frame); + if (s->frame.data[0]) + avctx->release_buffer(avctx, &s->frame); return 0; } diff --git a/src/libffmpeg/libavcodec/rv10.c b/src/libffmpeg/libavcodec/rv10.c index b057ae602..3b6346465 100644 --- a/src/libffmpeg/libavcodec/rv10.c +++ b/src/libffmpeg/libavcodec/rv10.c @@ -1,6 +1,7 @@ /* * RV10 codec * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -216,7 +217,7 @@ int rv_decode_dc(MpegEncContext *s, int n) get_bits(&s->gb, 9); code = 1; } else { - fprintf(stderr, "chroma dc error\n"); + av_log(s->avctx, AV_LOG_ERROR, "chroma dc error\n"); return 0xffff; } } else { @@ -257,7 +258,6 @@ void rv10_encode_picture_header(MpegEncContext *s, int picture_number) put_bits(&s->pb, 3, 0); /* ignored */ } -#if 0 static int get_num(GetBitContext *gb) { int n, n1; @@ -270,7 +270,6 @@ static int get_num(GetBitContext *gb) return (n << 16) | n1; } } -#endif #endif //CONFIG_ENCODERS @@ -288,7 +287,7 @@ static int rv10_decode_picture_header(MpegEncContext *s) else s->pict_type = I_TYPE; //printf("h:%X ver:%d\n",h,s->rv10_version); - if(!marker) printf("marker missing\n"); + if(!marker) av_log(s->avctx, AV_LOG_ERROR, "marker missing\n"); pb_frame = get_bits(&s->gb, 1); #ifdef DEBUG @@ -296,13 +295,13 @@ static int rv10_decode_picture_header(MpegEncContext *s) #endif if (pb_frame){ - fprintf(stderr, "pb frame not supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "pb frame not supported\n"); return -1; } s->qscale = get_bits(&s->gb, 5); if(s->qscale==0){ - fprintf(stderr, "error, qscale:0\n"); + av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n"); return -1; } @@ -339,6 +338,99 @@ static int rv10_decode_picture_header(MpegEncContext *s) return mb_count; } +static int rv20_decode_picture_header(MpegEncContext *s) +{ + int seq, mb_pos, i; + + if(s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){ + if (get_bits(&s->gb, 3)){ + av_log(s->avctx, AV_LOG_ERROR, "unknown triplet set\n"); + return -1; + } + } + + i= get_bits(&s->gb, 2); + switch(i){ + case 0: s->pict_type= I_TYPE; break; + case 1: s->pict_type= I_TYPE; break; //hmm ... + case 2: s->pict_type= P_TYPE; break; + case 3: s->pict_type= B_TYPE; break; + default: + av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); + return -1; + } + + if (get_bits(&s->gb, 1)){ + av_log(s->avctx, AV_LOG_ERROR, "unknown bit set\n"); + return -1; + } + + s->qscale = get_bits(&s->gb, 5); + if(s->qscale==0){ + av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n"); + return -1; + } + if(s->avctx->sub_id == 0x30203002){ + if (get_bits(&s->gb, 1)){ + av_log(s->avctx, AV_LOG_ERROR, "unknown bit2 set\n"); + return -1; + } + } + + if(s->avctx->has_b_frames){ + if (get_bits(&s->gb, 1)){ +// av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n"); +// return -1; + } + seq= get_bits(&s->gb, 15); + mb_pos= get_bits(&s->gb, av_log2(s->mb_num-1)+1); + s->mb_x= mb_pos % s->mb_width; + s->mb_y= mb_pos / s->mb_width; + }else{ + seq= get_bits(&s->gb, 8)*128; + mb_pos= ff_h263_decode_mba(s); + } +//printf("%d\n", seq); + seq |= s->time &~0x7FFF; + if(seq - s->time > 0x4000) seq -= 0x8000; + if(seq - s->time < -0x4000) seq += 0x8000; + if(seq != s->time){ + if(s->pict_type!=B_TYPE){ + s->time= seq; + s->pp_time= s->time - s->last_non_b_time; + s->last_non_b_time= s->time; + }else{ + s->time= seq; + s->pb_time= s->pp_time - (s->last_non_b_time - s->time); + if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){ + printf("messed up order, seeking?, skiping current b frame\n"); + return FRAME_SKIPED; + } + } + } +// printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time); + + s->no_rounding= get_bits1(&s->gb); + + s->f_code = 1; + s->unrestricted_mv = 1; + s->h263_aic= s->pict_type == I_TYPE; +// s->alt_inter_vlc=1; +// s->obmc=1; +// s->umvplus=1; + s->modified_quant=1; + s->loop_filter=1; + + if(s->avctx->debug & FF_DEBUG_PICT_INFO){ + av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n", + seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding); + } + + assert(s->pict_type != B_TYPE || !s->low_delay); + + return s->mb_width*s->mb_height - mb_pos; +} + static int rv10_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; @@ -346,37 +438,47 @@ static int rv10_decode_init(AVCodecContext *avctx) s->avctx= avctx; s->out_format = FMT_H263; + s->codec_id= avctx->codec_id; s->width = avctx->width; s->height = avctx->height; - s->h263_rv10 = 1; switch(avctx->sub_id){ case 0x10000000: s->rv10_version= 0; s->h263_long_vectors=0; + s->low_delay=1; break; case 0x10003000: s->rv10_version= 3; s->h263_long_vectors=1; + s->low_delay=1; break; case 0x10003001: s->rv10_version= 3; s->h263_long_vectors=0; + s->low_delay=1; + break; + case 0x20001000: + case 0x20100001: + case 0x20101001: + s->low_delay=1; + break; + case 0x20200002: + case 0x30202002: + case 0x30203002: + s->low_delay=0; + s->avctx->has_b_frames=1; break; default: - fprintf(stderr, "unknown header %X\n", avctx->sub_id); + av_log(s->avctx, AV_LOG_ERROR, "unknown header %X\n", avctx->sub_id); } //printf("ver:%X\n", avctx->sub_id); - s->flags= avctx->flags; - if (MPV_common_init(s) < 0) return -1; h263_decode_init_vlc(s); - s->y_dc_scale_table= - s->c_dc_scale_table= ff_mpeg1_dc_scale_table; s->progressive_sequence=1; /* init rv vlc */ @@ -410,43 +512,72 @@ static int rv10_decode_packet(AVCodecContext *avctx, int i, mb_count, mb_pos, left; init_get_bits(&s->gb, buf, buf_size*8); - - mb_count = rv10_decode_picture_header(s); +#if 0 + for(i=0; i<buf_size*8 && i<200; i++) + printf("%d", get_bits1(&s->gb)); + printf("\n"); + return 0; +#endif + if(s->codec_id ==CODEC_ID_RV10) + mb_count = rv10_decode_picture_header(s); + else + mb_count = rv20_decode_picture_header(s); if (mb_count < 0) { - fprintf(stderr, "HEADER ERROR\n"); + av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n"); return -1; } if (s->mb_x >= s->mb_width || s->mb_y >= s->mb_height) { - fprintf(stderr, "POS ERROR %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y); return -1; } mb_pos = s->mb_y * s->mb_width + s->mb_x; left = s->mb_width * s->mb_height - mb_pos; if (mb_count > left) { - fprintf(stderr, "COUNT ERROR\n"); + av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n"); return -1; } +//if(s->pict_type == P_TYPE) return 0; - if (s->mb_x == 0 && s->mb_y == 0) { + if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) { if(MPV_frame_start(s, avctx) < 0) return -1; } + if(s->pict_type == B_TYPE){ //FIXME remove after cleaning mottion_val indexing + memset(s->current_picture.motion_val[0], 0, sizeof(int16_t)*2*(s->mb_width*2+2)*(s->mb_height*2+2)); + } + #ifdef DEBUG printf("qscale=%d\n", s->qscale); #endif /* default quantization values */ - s->y_dc_scale = 8; - s->c_dc_scale = 8; + if(s->codec_id== CODEC_ID_RV10){ + if(s->mb_y==0) s->first_slice_line=1; + }else{ + s->first_slice_line=1; + s->resync_mb_x= s->mb_x; + s->resync_mb_y= s->mb_y; + } + if(s->h263_aic){ + s->y_dc_scale_table= + s->c_dc_scale_table= ff_aic_dc_scale_table; + }else{ + s->y_dc_scale_table= + s->c_dc_scale_table= ff_mpeg1_dc_scale_table; + } + + if(s->modified_quant) + s->chroma_qscale_table= ff_h263_chroma_qscale_table; + + ff_set_qscale(s, s->qscale); + s->rv10_first_dc_coded[0] = 0; s->rv10_first_dc_coded[1] = 0; s->rv10_first_dc_coded[2] = 0; - - if(s->mb_y==0) s->first_slice_line=1; - +//printf("%d %X %X\n", s->pict_type, s->current_picture.motion_val[0], s->current_picture.motion_val[1]); s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= @@ -456,6 +587,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, ff_init_block_index(s); /* decode each macroblock */ for(i=0;i<mb_count;i++) { + int ret; ff_update_block_index(s); #ifdef DEBUG printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); @@ -464,18 +596,26 @@ static int rv10_decode_packet(AVCodecContext *avctx, s->dsp.clear_blocks(s->block[0]); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; - if (ff_h263_decode_mb(s, s->block) == SLICE_ERROR) { - fprintf(stderr, "ERROR at MB %d %d\n", s->mb_x, s->mb_y); + ret=ff_h263_decode_mb(s, s->block); + + if (ret == SLICE_ERROR) { + av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y); return -1; } - ff_h263_update_motion_val(s); + if(s->pict_type != B_TYPE) + ff_h263_update_motion_val(s); MPV_decode_mb(s, s->block); + if(s->loop_filter) + ff_h263_loop_filter(s); + if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; ff_init_block_index(s); - s->first_slice_line=0; } + if(s->mb_x == s->resync_mb_x) + s->first_slice_line=0; + if(ret == SLICE_END) break; } return buf_size; @@ -498,7 +638,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, *data_size = 0; return 0; } - + if(avctx->slice_count){ for(i=0; i<avctx->slice_count; i++){ int offset= avctx->slice_offset[i]; @@ -516,12 +656,22 @@ static int rv10_decode_frame(AVCodecContext *avctx, if( rv10_decode_packet(avctx, buf, buf_size) < 0 ) return -1; } + + if(s->pict_type == B_TYPE){ //FIXME remove after cleaning mottion_val indexing + memset(s->current_picture.motion_val[0], 0, sizeof(int16_t)*2*(s->mb_width*2+2)*(s->mb_height*2+2)); + } if(s->mb_y>=s->mb_height){ MPV_frame_end(s); - - *pict= *(AVFrame*)&s->current_picture; + if(s->pict_type==B_TYPE || s->low_delay){ + *pict= *(AVFrame*)&s->current_picture; + ff_print_debug_info(s, pict); + } else { + *pict= *(AVFrame*)&s->last_picture; + ff_print_debug_info(s, pict); + } + *data_size = sizeof(AVFrame); }else{ *data_size = 0; @@ -541,3 +691,16 @@ AVCodec rv10_decoder = { rv10_decode_frame, CODEC_CAP_DR1 }; + +AVCodec rv20_decoder = { + "rv20", + CODEC_TYPE_VIDEO, + CODEC_ID_RV20, + sizeof(MpegEncContext), + rv10_decode_init, + NULL, + rv10_decode_end, + rv10_decode_frame, + CODEC_CAP_DR1 +}; + diff --git a/src/libffmpeg/libavcodec/svq1.c b/src/libffmpeg/libavcodec/svq1.c index 00e8d5504..6a15270b7 100644 --- a/src/libffmpeg/libavcodec/svq1.c +++ b/src/libffmpeg/libavcodec/svq1.c @@ -556,7 +556,6 @@ static uint16_t svq1_packet_checksum (uint8_t *data, int length, int value) { return value; } -#if 0 static uint16_t svq1_component_checksum (uint16_t *pixels, int pitch, int width, int height, int value) { int x, y; @@ -571,7 +570,6 @@ static uint16_t svq1_component_checksum (uint16_t *pixels, int pitch, return value; } -#endif static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) { uint8_t seed; @@ -615,7 +613,7 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) { svq1_parse_string (bitbuf, (char *) msg); - printf ("embedded message: \"%s\"\n", (char *) msg); + av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg); } skip_bits (bitbuf, 2); @@ -791,7 +789,6 @@ static int svq1_decode_init(AVCodecContext *avctx) s->codec_id= avctx->codec->id; avctx->pix_fmt = PIX_FMT_YUV410P; avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames - s->flags= avctx->flags; if (MPV_common_init(s) < 0) return -1; init_vlc(&svq1_block_type, 2, 4, diff --git a/src/libffmpeg/libavcodec/svq3.c b/src/libffmpeg/libavcodec/svq3.c index 8f9f545e7..c8720c07a 100644 --- a/src/libffmpeg/libavcodec/svq3.c +++ b/src/libffmpeg/libavcodec/svq3.c @@ -690,15 +690,17 @@ static int svq3_decode_slice_header (H264Context *h) { if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) { /* TODO: what? */ - fprintf (stderr, "unsupported slice header (%02X)\n", header); + av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header); return -1; } else { int length = (header >> 5) & 3; h->next_slice_index = s->gb.index + 8*show_bits (&s->gb, 8*length) + 8*length; - if (h->next_slice_index > s->gb.size_in_bits) + if (h->next_slice_index > s->gb.size_in_bits){ + av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n"); return -1; + } s->gb.size_in_bits = h->next_slice_index - 8*(length - 1); s->gb.index += 8; @@ -709,8 +711,10 @@ static int svq3_decode_slice_header (H264Context *h) { } } - if ((i = svq3_get_ue_golomb (&s->gb)) == INVALID_VLC || i >= 3) + if ((i = svq3_get_ue_golomb (&s->gb)) == INVALID_VLC || i >= 3){ + av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i); return -1; + } h->slice_type = golomb_to_pict_type[i]; @@ -766,7 +770,9 @@ static int svq3_decode_frame (AVCodecContext *avctx, *data_size = 0; s->flags = avctx->flags; - + s->flags2 = avctx->flags2; + s->unrestricted_mv = 1; + if (!s->context_initialized) { s->width = avctx->width; s->height = avctx->height; @@ -841,7 +847,7 @@ static int svq3_decode_frame (AVCodecContext *avctx, s->picture_number = h->slice_num; if(avctx->debug&FF_DEBUG_PICT_INFO){ - printf("%c hpel:%d, tpel:%d aqp:%d qp:%d\n", + av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d\n", av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag, s->adaptive_quant, s->qscale ); @@ -874,7 +880,7 @@ static int svq3_decode_frame (AVCodecContext *avctx, h->frame_num_offset += 256; } if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) { - printf ("error in B-frame picture id\n"); + av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n"); return -1; } } else { @@ -920,7 +926,7 @@ static int svq3_decode_frame (AVCodecContext *avctx, mb_type += 4; } if (mb_type > 33 || svq3_decode_mb (h, mb_type)) { - fprintf (stderr, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); + av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y); return -1; } diff --git a/src/libffmpeg/libavcodec/utils.c b/src/libffmpeg/libavcodec/utils.c index 2ba3e90c3..2fb82c347 100644 --- a/src/libffmpeg/libavcodec/utils.c +++ b/src/libffmpeg/libavcodec/utils.c @@ -1,6 +1,8 @@ /* * utils for libavcodec * Copyright (c) 2001 Fabrice Bellard. + * Copyright (c) 2003 Michel Bardiaux for the av_log API + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -25,6 +27,7 @@ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" +#include <stdarg.h> void *av_mallocz(unsigned int size) { @@ -123,13 +126,14 @@ typedef struct InternalBuffer{ int last_pic_num; uint8_t *base[4]; uint8_t *data[4]; + int linesize[4]; }InternalBuffer; #define INTERNAL_BUFFER_SIZE 32 #define ALIGN(x, a) (((x)+(a)-1)&~((a)-1)) -static void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ int w_align= 1; int h_align= 1; @@ -170,6 +174,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ int w= s->width; int h= s->height; InternalBuffer *buf; + int *picture_number; assert(pic->data[0]==NULL); assert(INTERNAL_BUFFER_SIZE > s->internal_buffer_count); @@ -186,10 +191,12 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ #endif buf= &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; - + picture_number= &(((InternalBuffer*)s->internal_buffer)[INTERNAL_BUFFER_SIZE-1]).last_pic_num; //FIXME ugly hack + (*picture_number)++; + if(buf->base[0]){ - pic->age= pic->coded_picture_number - buf->last_pic_num; - buf->last_pic_num= pic->coded_picture_number; + pic->age= *picture_number - buf->last_pic_num; + buf->last_pic_num= *picture_number; }else{ int h_chroma_shift, v_chroma_shift; int s_align, pixel_size; @@ -231,24 +238,25 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ const int h_shift= i==0 ? 0 : h_chroma_shift; const int v_shift= i==0 ? 0 : v_chroma_shift; - pic->linesize[i]= ALIGN(pixel_size*w>>h_shift, s_align); + buf->linesize[i]= ALIGN(pixel_size*w>>h_shift, s_align); - buf->base[i]= av_mallocz((pic->linesize[i]*h>>v_shift)+16); //FIXME 16 + buf->base[i]= av_mallocz((buf->linesize[i]*h>>v_shift)+16); //FIXME 16 if(buf->base[i]==NULL) return -1; - memset(buf->base[i], 128, pic->linesize[i]*h>>v_shift); + memset(buf->base[i], 128, buf->linesize[i]*h>>v_shift); if(s->flags&CODEC_FLAG_EMU_EDGE) buf->data[i] = buf->base[i]; else - buf->data[i] = buf->base[i] + ALIGN((pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift), s_align); + buf->data[i] = buf->base[i] + ALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift), s_align); } pic->age= 256*256*256*64; - pic->type= FF_BUFFER_TYPE_INTERNAL; } + pic->type= FF_BUFFER_TYPE_INTERNAL; for(i=0; i<4; i++){ pic->base[i]= buf->base[i]; pic->data[i]= buf->data[i]; + pic->linesize[i]= buf->linesize[i]; } s->internal_buffer_count++; @@ -283,7 +291,39 @@ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ //printf("R%X\n", pic->opaque); } -static enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, enum PixelFormat * fmt){ +int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){ + AVFrame temp_pic; + int i; + + /* If no picture return a new buffer */ + if(pic->data[0] == NULL) { + /* We will copy from buffer, so must be readable */ + pic->buffer_hints |= FF_BUFFER_HINTS_READABLE; + return s->get_buffer(s, pic); + } + + /* If internal buffer type return the same buffer */ + if(pic->type == FF_BUFFER_TYPE_INTERNAL) + return 0; + + /* + * Not internal type and reget_buffer not overridden, emulate cr buffer + */ + temp_pic = *pic; + for(i = 0; i < 4; i++) + pic->data[i] = pic->base[i] = NULL; + pic->opaque = NULL; + /* Allocate new frame */ + if (s->get_buffer(s, pic)) + return -1; + /* Copy image data from old buffer to new buffer */ + img_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width, + s->height); + s->release_buffer(s, &temp_pic); // Release old frame + return 0; +} + +enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, enum PixelFormat * fmt){ return fmt[0]; } @@ -315,10 +355,12 @@ void avcodec_get_context_defaults(AVCodecContext *s){ s->lmin= FF_QP2LAMBDA * s->qmin; s->lmax= FF_QP2LAMBDA * s->qmax; s->sample_aspect_ratio= (AVRational){0,1}; + s->ildct_cmp= FF_CMP_VSAD; s->intra_quant_bias= FF_DEFAULT_QUANT_BIAS; s->inter_quant_bias= FF_DEFAULT_QUANT_BIAS; s->palctrl = NULL; + s->reget_buffer= avcodec_default_reget_buffer; } /** @@ -491,8 +533,7 @@ AVCodec *avcodec_find_decoder_by_name(const char *name) return NULL; } -#if 0 -static AVCodec *avcodec_find(enum CodecID id) +AVCodec *avcodec_find(enum CodecID id) { AVCodec *p; p = first_avcodec; @@ -503,7 +544,6 @@ static AVCodec *avcodec_find(enum CodecID id) } return NULL; } -#endif void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) { @@ -763,3 +803,57 @@ int64_t av_rescale(int64_t a, int b, int c){ return ((h/c)<<32) + l/c; } + +/* av_log API */ + +#ifdef AV_LOG_TRAP_PRINTF +#undef stderr +#undef fprintf +#endif + +static int av_log_level = AV_LOG_DEBUG; + +static void av_log_default_callback(AVCodecContext* avctx, int level, const char* fmt, va_list vl) +{ + static int print_prefix=1; + + if(level>av_log_level) + return; + if(avctx && print_prefix) + fprintf(stderr, "[%s @ %p]", avctx->codec ? avctx->codec->name : "?", avctx); + + print_prefix= (int)strstr(fmt, "\n"); + + vfprintf(stderr, fmt, vl); +} + +static void (*av_log_callback)(AVCodecContext*, int, const char*, va_list) = av_log_default_callback; + +void av_log(AVCodecContext* avctx, int level, const char *fmt, ...) +{ + va_list vl; + va_start(vl, fmt); + av_vlog(avctx, level, fmt, vl); + va_end(vl); +} + +void av_vlog(AVCodecContext* avctx, int level, const char *fmt, va_list vl) +{ + av_log_callback(avctx, level, fmt, vl); +} + +int av_log_get_level(void) +{ + return av_log_level; +} + +void av_log_set_level(int level) +{ + av_log_level = level; +} + +void av_log_set_callback(void (*callback)(AVCodecContext*, int, const char*, va_list)) +{ + av_log_callback = callback; +} + diff --git a/src/libffmpeg/libavcodec/vcr1.c b/src/libffmpeg/libavcodec/vcr1.c index de1688443..4b8c9fc41 100644 --- a/src/libffmpeg/libavcodec/vcr1.c +++ b/src/libffmpeg/libavcodec/vcr1.c @@ -57,7 +57,7 @@ static int decode_frame(AVCodecContext *avctx, p->reference= 0; if(avctx->get_buffer(avctx, p) < 0){ - fprintf(stderr, "get_buffer() failed\n"); + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type= I_TYPE; @@ -158,14 +158,12 @@ static int decode_init(AVCodecContext *avctx){ return 0; } -#ifdef CONFIG_ENCODERS static int encode_init(AVCodecContext *avctx){ common_init(avctx); return 0; } -#endif static int decode_end(AVCodecContext *avctx){ diff --git a/src/libffmpeg/libavcodec/vp3.c b/src/libffmpeg/libavcodec/vp3.c index 430212ad8..636d2dfd8 100644 --- a/src/libffmpeg/libavcodec/vp3.c +++ b/src/libffmpeg/libavcodec/vp3.c @@ -216,12 +216,14 @@ static int ModeAlphabet[7][CODING_MODE_COUNT] = typedef struct Vp3DecodeContext { AVCodecContext *avctx; int theora, theora_tables; + int version; int width, height; AVFrame golden_frame; AVFrame last_frame; AVFrame current_frame; int keyframe; DSPContext dsp; + int flipped_image; int quality_index; int last_quality_index; @@ -301,6 +303,9 @@ typedef struct Vp3DecodeContext { uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16 } Vp3DecodeContext; +static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb); +static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb); + /************************************************************************ * VP3 I/DCT ************************************************************************/ @@ -314,7 +319,7 @@ typedef struct Vp3DecodeContext { #define xC6S2 25080 #define xC7S1 12785 -static void vp3_idct_c(int16_t *input_data, int16_t *dequant_matrix, +void vp3_idct_c(int16_t *input_data, int16_t *dequant_matrix, int16_t *output_data) { int32_t intermediate_data[64]; @@ -548,7 +553,7 @@ static void vp3_idct_c(int16_t *input_data, int16_t *dequant_matrix, } } -static void vp3_idct_put(int16_t *input_data, int16_t *dequant_matrix, +void vp3_idct_put(int16_t *input_data, int16_t *dequant_matrix, uint8_t *dest, int stride) { int16_t transformed_data[64]; @@ -574,7 +579,7 @@ static void vp3_idct_put(int16_t *input_data, int16_t *dequant_matrix, } } -static void vp3_idct_add(int16_t *input_data, int16_t *dequant_matrix, +void vp3_idct_add(int16_t *input_data, int16_t *dequant_matrix, uint8_t *dest, int stride) { int16_t transformed_data[64]; @@ -1109,7 +1114,7 @@ static void unpack_token(GetBitContext *gb, int token, int *zero_run, break; default: - printf (" vp3: help! Got a bad token: %d > 31\n", token); + av_log(NULL, AV_LOG_ERROR, " vp3: help! Got a bad token: %d > 31\n", token); break; } @@ -1549,7 +1554,7 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) /* if the fragment is in bounds, check its coding status */ current_fragment = s->superblock_fragments[i * 16 + j]; if (current_fragment >= s->fragment_count) { - printf (" vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n", current_fragment, s->fragment_count); return 1; } @@ -1685,7 +1690,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) (s->macroblock_coding[current_macroblock] == MODE_COPY)) continue; if (current_macroblock >= s->macroblock_count) { - printf (" vp3:unpack_modes(): bad macroblock number (%d >= %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n", current_macroblock, s->macroblock_count); return 1; } @@ -1703,7 +1708,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) if (current_fragment == -1) continue; if (current_fragment >= s->fragment_count) { - printf (" vp3:unpack_modes(): bad fragment number (%d >= %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n", current_fragment, s->fragment_count); return 1; } @@ -1764,14 +1769,14 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) (s->macroblock_coding[current_macroblock] == MODE_COPY)) continue; if (current_macroblock >= s->macroblock_count) { - printf (" vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n", current_macroblock, s->macroblock_count); return 1; } current_fragment = s->macroblock_fragments[current_macroblock * 6]; if (current_fragment >= s->fragment_count) { - printf (" vp3:unpack_vectors(): bad fragment number (%d >= %d\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n", current_fragment, s->fragment_count); return 1; } @@ -1887,7 +1892,7 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) if (current_fragment == -1) continue; if (current_fragment >= s->fragment_count) { - printf (" vp3:unpack_vectors(): bad fragment number (%d >= %d)\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n", current_fragment, s->fragment_count); return 1; } @@ -1929,7 +1934,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, if ((first_fragment >= s->fragment_count) || (last_fragment >= s->fragment_count)) { - printf (" vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n", + av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n", first_fragment, last_fragment); return 0; } @@ -2365,7 +2370,8 @@ static void render_fragments(Vp3DecodeContext *s, output_plane = s->current_frame.data[0]; last_plane = s->last_frame.data[0]; golden_plane = s->golden_frame.data[0]; - stride = -s->current_frame.linesize[0]; + stride = s->current_frame.linesize[0]; + if (!s->flipped_image) stride = -stride; upper_motion_limit = 7 * s->current_frame.linesize[0]; lower_motion_limit = height * s->current_frame.linesize[0] + width - 8; } else if (plane == 1) { @@ -2373,7 +2379,8 @@ static void render_fragments(Vp3DecodeContext *s, output_plane = s->current_frame.data[1]; last_plane = s->last_frame.data[1]; golden_plane = s->golden_frame.data[1]; - stride = -s->current_frame.linesize[1]; + stride = s->current_frame.linesize[1]; + if (!s->flipped_image) stride = -stride; upper_motion_limit = 7 * s->current_frame.linesize[1]; lower_motion_limit = height * s->current_frame.linesize[1] + width - 8; } else { @@ -2381,7 +2388,8 @@ static void render_fragments(Vp3DecodeContext *s, output_plane = s->current_frame.data[2]; last_plane = s->last_frame.data[2]; golden_plane = s->golden_frame.data[2]; - stride = -s->current_frame.linesize[2]; + stride = s->current_frame.linesize[2]; + if (!s->flipped_image) stride = -stride; upper_motion_limit = 7 * s->current_frame.linesize[2]; lower_motion_limit = height * s->current_frame.linesize[2] + width - 8; } @@ -2393,12 +2401,13 @@ static void render_fragments(Vp3DecodeContext *s, for (x = 0; x < width; x += 8, i++) { if ((i < 0) || (i >= s->fragment_count)) { - printf (" vp3:render_fragments(): bad fragment number (%d)\n", i); + av_log(s->avctx, AV_LOG_ERROR, " vp3:render_fragments(): bad fragment number (%d)\n", i); return; } /* transform if this block was coded */ - if (s->all_fragments[i].coding_method != MODE_COPY) { + if ((s->all_fragments[i].coding_method != MODE_COPY) && + !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) { if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) @@ -2424,7 +2433,7 @@ static void render_fragments(Vp3DecodeContext *s, src_x= (motion_x>>1) + x; src_y= (motion_y>>1) + y; if ((motion_x == 0xbeef) || (motion_y == 0xbeef)) -printf (" help! got beefy vector! (%X, %X)\n", motion_x, motion_y); +av_log(s->avctx, AV_LOG_ERROR, " help! got beefy vector! (%X, %X)\n", motion_x, motion_y); motion_halfpel_index = motion_x & 0x01; motion_source += (motion_x >> 1); @@ -2436,6 +2445,7 @@ printf (" help! got beefy vector! (%X, %X)\n", motion_x, motion_y); if(src_x<0 || src_y<0 || src_x + 9 >= width || src_y + 9 >= height){ uint8_t *temp= s->edge_emu_buffer; if(stride<0) temp -= 9*stride; + else temp += 9*stride; ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, width, height); motion_source= temp; @@ -2547,6 +2557,53 @@ static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s) } } +/* FIXME: this should be merged with the above! */ +static void theora_calculate_pixel_addresses(Vp3DecodeContext *s) +{ + + int i, x, y; + + /* figure out the first pixel addresses for each of the fragments */ + /* Y plane */ + i = 0; + for (y = 1; y <= s->fragment_height; y++) { + for (x = 0; x < s->fragment_width; x++) { + s->all_fragments[i++].first_pixel = + s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS - + s->golden_frame.linesize[0] + + x * FRAGMENT_PIXELS; + debug_init(" fragment %d, first pixel @ %d\n", + i-1, s->all_fragments[i-1].first_pixel); + } + } + + /* U plane */ + i = s->u_fragment_start; + for (y = 1; y <= s->fragment_height / 2; y++) { + for (x = 0; x < s->fragment_width / 2; x++) { + s->all_fragments[i++].first_pixel = + s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS - + s->golden_frame.linesize[1] + + x * FRAGMENT_PIXELS; + debug_init(" fragment %d, first pixel @ %d\n", + i-1, s->all_fragments[i-1].first_pixel); + } + } + + /* V plane */ + i = s->v_fragment_start; + for (y = 1; y <= s->fragment_height / 2; y++) { + for (x = 0; x < s->fragment_width / 2; x++) { + s->all_fragments[i++].first_pixel = + s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS - + s->golden_frame.linesize[2] + + x * FRAGMENT_PIXELS; + debug_init(" fragment %d, first pixel @ %d\n", + i-1, s->all_fragments[i-1].first_pixel); + } + } +} + /* * This is the ffmpeg/libavcodec API init function. */ @@ -2559,6 +2616,11 @@ static int vp3_decode_init(AVCodecContext *avctx) int y_superblock_count; int c_superblock_count; + if (avctx->codec_tag == MKTAG('V','P','3','0')) + s->version = 0; + else + s->version = 1; + s->avctx = avctx; #if 0 s->width = avctx->width; @@ -2704,32 +2766,60 @@ static int vp3_decode_frame(AVCodecContext *avctx, if (s->theora && get_bits1(&gb)) { - printf("Theora: bad frame indicator\n"); - return -1; + int ptype = get_bits(&gb, 7); + + skip_bits(&gb, 6*8); /* "theora" */ + + switch(ptype) + { + case 1: + theora_decode_comments(avctx, gb); + break; + case 2: + theora_decode_tables(avctx, gb); + init_dequantizer(s); + break; + default: + av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype); + } + return buf_size; } s->keyframe = !get_bits1(&gb); - if (s->theora && s->keyframe) - { - if (get_bits1(&gb)) - printf("Theora: warning, unsupported keyframe coding type?!\n"); - skip_bits(&gb, 2); /* reserved? */ - } - else + if (!s->theora) skip_bits(&gb, 1); s->last_quality_index = s->quality_index; s->quality_index = get_bits(&gb, 6); + if (s->theora >= 0x030300) + skip_bits1(&gb); - debug_vp3(" VP3 %sframe #%d: Q index = %d\n", - s->keyframe?"key":"", counter, s->quality_index); + if (s->avctx->debug & FF_DEBUG_PICT_INFO) + av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", + s->keyframe?"key":"", counter, s->quality_index); counter++; if (s->quality_index != s->last_quality_index) init_dequantizer(s); if (s->keyframe) { - /* skip the other 2 header bytes for now */ - if (!s->theora) skip_bits(&gb, 16); + if (!s->theora) + { + skip_bits(&gb, 4); /* width code */ + skip_bits(&gb, 4); /* height code */ + if (s->version) + { + s->version = get_bits(&gb, 5); + if (counter == 1) + av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); + } + } + if (s->version || s->theora) + { + if (get_bits1(&gb)) + av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); + skip_bits(&gb, 2); /* reserved? */ + } + if (s->last_frame.data[0] == s->golden_frame.data[0]) { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); @@ -2743,7 +2833,7 @@ static int vp3_decode_frame(AVCodecContext *avctx, s->golden_frame.reference = 3; if(avctx->get_buffer(avctx, &s->golden_frame) < 0) { - printf("vp3: get_buffer() failed\n"); + av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); return -1; } @@ -2752,13 +2842,17 @@ static int vp3_decode_frame(AVCodecContext *avctx, /* time to figure out pixel addresses? */ if (!s->pixel_addresses_inited) - vp3_calculate_pixel_addresses(s); - + { + if (!s->flipped_image) + vp3_calculate_pixel_addresses(s); + else + theora_calculate_pixel_addresses(s); + } } else { /* allocate a new current frame */ s->current_frame.reference = 3; if(avctx->get_buffer(avctx, &s->current_frame) < 0) { - printf("vp3: get_buffer() failed\n"); + av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); return -1; } } @@ -2786,7 +2880,7 @@ if (!s->keyframe) { unpack_vectors(s, &gb) || unpack_dct_coeffs(s, &gb)) { - printf(" vp3: could not decode frame\n"); + av_log(s->avctx, AV_LOG_ERROR, " vp3: could not decode frame\n"); return -1; } @@ -2850,16 +2944,28 @@ static int vp3_decode_end(AVCodecContext *avctx) return 0; } -/* current version is 3.2.0 */ - static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb) { Vp3DecodeContext *s = avctx->priv_data; + int major, minor, micro; + + major = get_bits(&gb, 8); /* version major */ + minor = get_bits(&gb, 8); /* version minor */ + micro = get_bits(&gb, 8); /* version micro */ + av_log(avctx, AV_LOG_INFO, "Theora bitstream version %d.%d.%d\n", + major, minor, micro); + + /* FIXME: endianess? */ + s->theora = (major << 16) | (minor << 8) | micro; + + /* 3.3.0 aka alpha3 has the same frame orientation as original vp3 */ + /* but previous versions have the image flipped relative to vp3 */ + if (s->theora < 0x030300) + { + s->flipped_image = 1; + av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n"); + } - skip_bits(&gb, 8); /* version major */ - skip_bits(&gb, 8); /* version minor */ - skip_bits(&gb, 8); /* version micro */ - s->width = get_bits(&gb, 16) << 4; s->height = get_bits(&gb, 16) << 4; @@ -2874,12 +2980,19 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb) skip_bits(&gb, 24); /* aspect numerator */ skip_bits(&gb, 24); /* aspect denumerator */ - skip_bits(&gb, 5); /* keyframe frequency force */ + if (s->theora < 0x030300) + skip_bits(&gb, 5); /* keyframe frequency force */ skip_bits(&gb, 8); /* colorspace */ skip_bits(&gb, 24); /* bitrate */ skip_bits(&gb, 6); /* last(?) quality index */ + if (s->theora >= 0x030300) + { + skip_bits(&gb, 5); /* keyframe frequency force */ + skip_bits(&gb, 5); /* spare bits */ + } + // align_get_bits(&gb); avctx->width = s->width; @@ -2895,14 +3008,17 @@ static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb) int nb_comments, i, tmp; tmp = get_bits(&gb, 32); - while(tmp-=8) - skip_bits(&gb, 8); + tmp = be2me_32(tmp); + while(tmp--) + skip_bits(&gb, 8); nb_comments = get_bits(&gb, 32); + nb_comments = be2me_32(nb_comments); for (i = 0; i < nb_comments; i++) { tmp = get_bits(&gb, 32); - while(tmp-=8) + tmp = be2me_32(tmp); + while(tmp--) skip_bits(&gb, 8); } @@ -2933,6 +3049,8 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb) /* inter coeffs */ for (i = 0; i < 64; i++) s->coded_inter_dequant[i] = get_bits(&gb, 8); + + /* FIXME: read huffmann tree.. */ s->theora_tables = 1; diff --git a/src/libffmpeg/libavcodec/vqavideo.c b/src/libffmpeg/libavcodec/vqavideo.c index b3b310b64..97cbbd6d9 100644 --- a/src/libffmpeg/libavcodec/vqavideo.c +++ b/src/libffmpeg/libavcodec/vqavideo.c @@ -82,10 +82,6 @@ #define MAX_VECTORS (MAX_CODEBOOK_VECTORS + SOLID_PIXEL_VECTORS) #define MAX_CODEBOOK_SIZE (MAX_VECTORS * 4 * 4) -#undef LE_16 -#undef BE_16 -#undef BE_32 - #define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) #define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ @@ -159,7 +155,7 @@ static int vqa_decode_init(AVCodecContext *avctx) /* make sure the extradata made it */ if (s->avctx->extradata_size != VQA_HEADER_SIZE) { - printf(" VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE); + av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE); return -1; } @@ -210,8 +206,8 @@ static int vqa_decode_init(AVCodecContext *avctx) #define CHECK_COUNT() \ if (dest_index + count > dest_size) { \ - printf (" VQA video: decode_format80 problem: next op would overflow dest_index\n"); \ - printf (" VQA video: current dest_index = %d, count = %d, dest_size = %d\n", \ + av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: next op would overflow dest_index\n"); \ + av_log(NULL, AV_LOG_ERROR, " VQA video: current dest_index = %d, count = %d, dest_size = %d\n", \ dest_index, count, dest_size); \ return; \ } @@ -235,7 +231,7 @@ static void decode_format80(unsigned char *src, int src_size, return; if (dest_index >= dest_size) { - printf (" VQA video: decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\n", + av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: dest_index (%d) exceeded dest_size (%d)\n", dest_index, dest_size); return; } @@ -303,7 +299,7 @@ static void decode_format80(unsigned char *src, int src_size, * not every entry needs to be filled */ if (check_size) if (dest_index < dest_size) - printf (" VQA video: decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n", + av_log(NULL, AV_LOG_ERROR, " VQA video: decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n", dest_index, dest_size); } @@ -371,7 +367,7 @@ static void vqa_decode_chunk(VqaContext *s) break; default: - printf (" VQA video: Found unknown chunk type: %c%c%c%c (%08X)\n", + av_log(s->avctx, AV_LOG_ERROR, " VQA video: Found unknown chunk type: %c%c%c%c (%08X)\n", (chunk_type >> 24) & 0xFF, (chunk_type >> 16) & 0xFF, (chunk_type >> 8) & 0xFF, @@ -388,7 +384,7 @@ static void vqa_decode_chunk(VqaContext *s) if ((cpl0_chunk != -1) && (cplz_chunk != -1)) { /* a chunk should not have both chunk types */ - printf (" VQA video: problem: found both CPL0 and CPLZ chunks\n"); + av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CPL0 and CPLZ chunks\n"); return; } @@ -405,7 +401,7 @@ static void vqa_decode_chunk(VqaContext *s) chunk_size = BE_32(&s->buf[cpl0_chunk + 4]); /* sanity check the palette size */ if (chunk_size / 3 > 256) { - printf (" VQA video: problem: found a palette chunk with %d colors\n", + av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found a palette chunk with %d colors\n", chunk_size / 3); return; } @@ -423,7 +419,7 @@ static void vqa_decode_chunk(VqaContext *s) if ((cbf0_chunk != -1) && (cbfz_chunk != -1)) { /* a chunk should not have both chunk types */ - printf (" VQA video: problem: found both CBF0 and CBFZ chunks\n"); + av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CBF0 and CBFZ chunks\n"); return; } @@ -442,7 +438,7 @@ static void vqa_decode_chunk(VqaContext *s) chunk_size = BE_32(&s->buf[cbf0_chunk + 4]); /* sanity check the full codebook size */ if (chunk_size > MAX_CODEBOOK_SIZE) { - printf (" VQA video: problem: CBF0 chunk too large (0x%X bytes)\n", + av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: CBF0 chunk too large (0x%X bytes)\n", chunk_size); return; } @@ -455,7 +451,7 @@ static void vqa_decode_chunk(VqaContext *s) if (vptz_chunk == -1) { /* something is wrong if there is no VPTZ chunk */ - printf (" VQA video: problem: no VPTZ chunk found\n"); + av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: no VPTZ chunk found\n"); return; } @@ -512,7 +508,7 @@ static void vqa_decode_chunk(VqaContext *s) /* handle partial codebook */ if ((cbp0_chunk != -1) && (cbpz_chunk != -1)) { /* a chunk should not have both chunk types */ - printf (" VQA video: problem: found both CBP0 and CBPZ chunks\n"); + av_log(s->avctx, AV_LOG_ERROR, " VQA video: problem: found both CBP0 and CBPZ chunks\n"); return; } @@ -577,7 +573,7 @@ static int vqa_decode_frame(AVCodecContext *avctx, avctx->release_buffer(avctx, &s->frame); if (avctx->get_buffer(avctx, &s->frame)) { - printf (" VQA Video: get_buffer() failed\n"); + av_log(s->avctx, AV_LOG_ERROR, " VQA Video: get_buffer() failed\n"); return -1; } diff --git a/src/libffmpeg/libavcodec/wmadec.c b/src/libffmpeg/libavcodec/wmadec.c index 15b15f23f..25498c4d2 100644 --- a/src/libffmpeg/libavcodec/wmadec.c +++ b/src/libffmpeg/libavcodec/wmadec.c @@ -1187,6 +1187,11 @@ static int wma_decode_superframe(AVCodecContext *avctx, tprintf("***decode_superframe:\n"); + if(buf_size==0){ + s->last_superframe_len = 0; + return 0; + } + samples = data; init_get_bits(&s->gb, buf, buf_size*8); diff --git a/src/libffmpeg/libavcodec/wmv2.c b/src/libffmpeg/libavcodec/wmv2.c index b403a6b17..130a7f89d 100644 --- a/src/libffmpeg/libavcodec/wmv2.c +++ b/src/libffmpeg/libavcodec/wmv2.c @@ -59,8 +59,6 @@ static void wmv2_common_init(Wmv2Context * w){ ff_init_scantable(s->dsp.idct_permutation, &w->abt_scantable[1], wmv2_scantableB); } -#ifdef CONFIG_ENCODERS - static int encode_ext_header(Wmv2Context *w){ MpegEncContext * const s= &w->s; PutBitContext pb; @@ -86,6 +84,7 @@ static int encode_ext_header(Wmv2Context *w){ return 0; } +#ifdef CONFIG_ENCODERS static int wmv2_encode_init(AVCodecContext *avctx){ Wmv2Context * const w= avctx->priv_data; @@ -339,7 +338,7 @@ static int decode_ext_header(Wmv2Context *w){ s->slice_height = s->mb_height / code; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, flag3:%d, slices:%d\n", + av_log(s->avctx, AV_LOG_DEBUG, "fps:%d, br:%d, qpbit:%d, abt_flag:%d, j_type_bit:%d, tl_mv_flag:%d, mbrl_bit:%d, code:%d, flag3:%d, slices:%d\n", fps, s->bit_rate, w->mspel_bit, w->abt_flag, w->j_type_bit, w->top_left_mv_flag, w->per_mb_rl_bit, code, w->flag3, code); } @@ -367,9 +366,9 @@ return -1; s->pict_type = get_bits(&s->gb, 1) + 1; if(s->pict_type == I_TYPE){ code = get_bits(&s->gb, 7); - printf("I7:%X/\n", code); + av_log(s->avctx, AV_LOG_ERROR, "I7:%X/\n", code); } - s->qscale = get_bits(&s->gb, 5); + s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); if(s->qscale < 0) return -1; @@ -398,7 +397,7 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s) s->inter_intra_pred= 0; s->no_rounding = 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n", + av_log(s->avctx, AV_LOG_DEBUG, "qscale:%d rlc:%d rl:%d dc:%d mbrl:%d j_type:%d \n", s->qscale, s->rl_chroma_table_index, s->rl_table_index, @@ -448,7 +447,7 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s) s->no_rounding ^= 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ - printf("rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n", + av_log(s->avctx, AV_LOG_DEBUG, "rl:%d rlc:%d dc:%d mv:%d mbrl:%d qp:%d mspel:%d per_mb_abt:%d abt_type:%d cbp:%d ii:%d\n", s->rl_table_index, s->rl_chroma_table_index, s->dc_table_index, @@ -472,17 +471,15 @@ s->picture_number++; //FIXME ? // return wmv2_decode_j_picture(w); //FIXME if(w->j_type){ - printf("J-type picture isnt supported\n"); + av_log(s->avctx, AV_LOG_ERROR, "J-type picture isnt supported\n"); return -1; } return 0; } -#if 0 static void ff_wmv2_decode_init(MpegEncContext *s){ } -#endif static inline int wmv2_decode_motion(Wmv2Context *w, int *mx_ptr, int *my_ptr){ MpegEncContext * const s= &w->s; @@ -510,11 +507,11 @@ static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){ wrap = s->block_wrap[0]; xy = s->block_index[0]; - mot_val = s->motion_val[xy]; + mot_val = s->current_picture.motion_val[0][xy]; - A = s->motion_val[xy - 1]; - B = s->motion_val[xy - wrap]; - C = s->motion_val[xy + 2 - wrap]; + A = s->current_picture.motion_val[0][xy - 1]; + B = s->current_picture.motion_val[0][xy - wrap]; + C = s->current_picture.motion_val[0][xy + 2 - wrap]; diff= FFMAX(ABS(A[0] - B[0]), ABS(A[1] - B[1])); @@ -607,7 +604,7 @@ static void wmv2_add_block(Wmv2Context *w, DCTELEM *block1, uint8_t *dst, int st memset(w->abt_block2[n], 0, 64*sizeof(DCTELEM)); break; default: - fprintf(stderr, "internal error in WMV2 abt\n"); + av_log(s->avctx, AV_LOG_ERROR, "internal error in WMV2 abt\n"); } } @@ -741,7 +738,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) s->mb_intra = 1; code = get_vlc2(&s->gb, mb_intra_vlc.table, MB_INTRA_VLC_BITS, 2); if (code < 0){ - fprintf(stderr, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y); + av_log(s->avctx, AV_LOG_ERROR, "II-cbp illegal at %d %d\n", s->mb_x, s->mb_y); return -1; } /* predict coded block pattern */ @@ -787,7 +784,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) for (i = 0; i < 6; i++) { if (wmv2_decode_inter_block(w, block[i], i, (cbp >> (5 - i)) & 1) < 0) { - fprintf(stderr,"\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); + av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding inter block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } @@ -808,7 +805,7 @@ static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) for (i = 0; i < 6; i++) { if (msmpeg4_decode_block(s, block[i], i, (cbp >> (5 - i)) & 1, NULL) < 0) { - fprintf(stderr,"\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); + av_log(s->avctx, AV_LOG_ERROR, "\nerror while decoding intra block: %d x %d (%d)\n", s->mb_x, s->mb_y, i); return -1; } } diff --git a/src/libffmpeg/libavcodec/xan.c b/src/libffmpeg/libavcodec/xan.c index 742bd3705..4dad2e214 100644 --- a/src/libffmpeg/libavcodec/xan.c +++ b/src/libffmpeg/libavcodec/xan.c @@ -58,10 +58,6 @@ typedef struct XanContext { } XanContext; -#undef BE_16 -#undef LE_16 -#undef LE_32 - #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) #define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) #define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \ @@ -73,8 +69,6 @@ typedef struct XanContext { #define SCALEFACTOR 65536 #define CENTERSAMPLE 128 -#undef COMPUTE_Y - #define COMPUTE_Y(r, g, b) \ (unsigned char) \ ((y_r_table[r] + y_g_table[g] + y_b_table[b]) / SCALEFACTOR) @@ -122,7 +116,7 @@ static int xan_decode_init(AVCodecContext *avctx) if ((avctx->codec->id == CODEC_ID_XAN_WC3) && (s->avctx->palctrl == NULL)) { - printf (" WC3 Xan video: palette expected.\n"); + av_log(avctx, AV_LOG_ERROR, " WC3 Xan video: palette expected.\n"); return -1; } @@ -342,7 +336,7 @@ static void inline xan_wc3_build_palette(XanContext *s, break; default: - printf (" Xan WC3: Unhandled colorspace\n"); + av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n"); break; } } @@ -463,7 +457,7 @@ static void inline xan_wc3_output_pixel_run(XanContext *s, break; default: - printf (" Xan WC3: Unhandled colorspace\n"); + av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n"); break; } } @@ -606,7 +600,7 @@ static void inline xan_wc3_copy_pixel_run(XanContext *s, break; default: - printf (" Xan WC3: Unhandled colorspace\n"); + av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n"); break; } } @@ -767,7 +761,7 @@ static int xan_decode_frame(AVCodecContext *avctx, } if (avctx->get_buffer(avctx, &s->current_frame)) { - printf (" Xan Video: get_buffer() failed\n"); + av_log(s->avctx, AV_LOG_ERROR, " Xan Video: get_buffer() failed\n"); return -1; } s->current_frame.reference = 3; |