summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/mpegvideo.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/libffmpeg/libavcodec/mpegvideo.c')
-rw-r--r--src/libffmpeg/libavcodec/mpegvideo.c811
1 files changed, 433 insertions, 378 deletions
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/src/libffmpeg/libavcodec/mpegvideo.c
index 31535c263..393e3828b 100644
--- a/src/libffmpeg/libavcodec/mpegvideo.c
+++ b/src/libffmpeg/libavcodec/mpegvideo.c
@@ -19,12 +19,16 @@
* 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
*/
+/**
+ * @file mpegvideo.c
+ * The simplest mpeg encoder (well, it was the simplest!).
+ */
+
#include <ctype.h>
#include <limits.h>
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
-#include "simple_idct.h"
#ifdef USE_FASTMEMCPY
#include "fastmemcpy.h"
@@ -32,18 +36,22 @@
#define CONFIG_RISKY
+#ifdef CONFIG_ENCODERS
static void encode_picture(MpegEncContext *s, int picture_number);
+#endif //CONFIG_ENCODERS
static void dct_unquantize_mpeg1_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
static void dct_unquantize_mpeg2_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
static void dct_unquantize_h263_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
-static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w);
+static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
+#ifdef CONFIG_ENCODERS
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
+#endif //CONFIG_ENCODERS
-void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c;
+void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
/* enable all paranoid tests for rounding, overflows, etc... */
@@ -67,66 +75,55 @@ static const uint16_t aanscales[64] = {
4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
};
-/* Input permutation for the simple_idct_mmx */
-static const uint8_t simple_mmx_permutation[64]={
- 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
- 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
- 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
- 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
- 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
- 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
- 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
- 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
-};
-
static const uint8_t h263_chroma_roundtab[16] = {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
};
-static UINT16 (*default_mv_penalty)[MAX_MV*2+1]=NULL;
-static UINT8 default_fcode_tab[MAX_MV*2+1];
+#ifdef CONFIG_ENCODERS
+static uint16_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
+static uint8_t default_fcode_tab[MAX_MV*2+1];
enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
- const UINT16 *quant_matrix, int bias, int qmin, int qmax)
+ const uint16_t *quant_matrix, int bias, int qmin, int qmax)
{
int qscale;
for(qscale=qmin; qscale<=qmax; qscale++){
int i;
- if (s->fdct == ff_jpeg_fdct_islow) {
+ if (s->dsp.fdct == ff_jpeg_fdct_islow) {
for(i=0;i<64;i++) {
- const int j= s->idct_permutation[i];
+ const int j= s->dsp.idct_permutation[i];
/* 16 <= qscale * quant_matrix[i] <= 7905 */
/* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
/* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
/* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
- qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
+ qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
(qscale * quant_matrix[j]));
}
- } else if (s->fdct == fdct_ifast) {
+ } else if (s->dsp.fdct == fdct_ifast) {
for(i=0;i<64;i++) {
- const int j= s->idct_permutation[i];
+ const int j= s->dsp.idct_permutation[i];
/* 16 <= qscale * quant_matrix[i] <= 7905 */
/* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
/* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
/* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
- qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
+ qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
(aanscales[i] * qscale * quant_matrix[j]));
}
} else {
for(i=0;i<64;i++) {
- const int j= s->idct_permutation[i];
+ const int j= s->dsp.idct_permutation[i];
/* We can safely suppose that 16 <= quant_matrix[i] <= 255
So 16 <= qscale * quant_matrix[i] <= 7905
so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
*/
- qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
+ qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
// qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
@@ -136,6 +133,8 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16
}
}
}
+#endif //CONFIG_ENCODERS
+
// move into common.c perhaps
#define CHECKED_ALLOCZ(p, size)\
{\
@@ -146,7 +145,7 @@ static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16
}\
}
-void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scantable){
+void ff_init_scantable(MpegEncContext *s, ScanTable *st, const uint8_t *src_scantable){
int i;
int end;
@@ -155,7 +154,7 @@ void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scanta
for(i=0; i<64; i++){
int j;
j = src_scantable[i];
- st->permutated[i] = s->idct_permutation[j];
+ st->permutated[i] = s->dsp.idct_permutation[j];
#ifdef ARCH_POWERPC
st->inverse[j] = i;
#endif
@@ -170,49 +169,16 @@ void ff_init_scantable(MpegEncContext *s, ScanTable *st, const UINT8 *src_scanta
}
}
-/* XXX: those functions should be suppressed ASAP when all IDCTs are
- converted */
-// *FIXME* this is ugly hack using local static
-static void (*ff_put_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
-static void (*ff_add_pixels_clamped)(const DCTELEM *block, UINT8 *pixels, int line_size);
-static void ff_jref_idct_put(UINT8 *dest, int line_size, DCTELEM *block)
-{
- j_rev_dct (block);
- ff_put_pixels_clamped(block, dest, line_size);
-}
-static void ff_jref_idct_add(UINT8 *dest, int line_size, DCTELEM *block)
-{
- j_rev_dct (block);
- ff_add_pixels_clamped(block, dest, line_size);
-}
-
/* init common dct for both encoder and decoder */
int DCT_common_init(MpegEncContext *s)
{
- int i;
-
- ff_put_pixels_clamped = s->dsp.put_pixels_clamped;
- ff_add_pixels_clamped = s->dsp.add_pixels_clamped;
-
s->dct_unquantize_h263 = dct_unquantize_h263_c;
s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
- s->dct_quantize= dct_quantize_c;
-
- if(s->avctx->dct_algo==FF_DCT_FASTINT)
- s->fdct = fdct_ifast;
- else
- s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
- if(s->avctx->idct_algo==FF_IDCT_INT){
- s->idct_put= ff_jref_idct_put;
- s->idct_add= ff_jref_idct_add;
- s->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
- }else{ //accurate/default
- s->idct_put= simple_idct_put;
- s->idct_add= simple_idct_add;
- s->idct_permutation_type= FF_NO_IDCT_PERM;
- }
+#ifdef CONFIG_ENCODERS
+ s->dct_quantize= dct_quantize_c;
+#endif
#ifdef HAVE_MMX
MPV_common_init_mmx(s);
@@ -233,34 +199,14 @@ int DCT_common_init(MpegEncContext *s)
MPV_common_init_ppc(s);
#endif
+#ifdef CONFIG_ENCODERS
s->fast_dct_quantize= s->dct_quantize;
if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
}
- switch(s->idct_permutation_type){
- case FF_NO_IDCT_PERM:
- for(i=0; i<64; i++)
- s->idct_permutation[i]= i;
- break;
- case FF_LIBMPEG2_IDCT_PERM:
- for(i=0; i<64; i++)
- s->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
- break;
- case FF_SIMPLE_IDCT_PERM:
- for(i=0; i<64; i++)
- s->idct_permutation[i]= simple_mmx_permutation[i];
- break;
- case FF_TRANSPOSE_IDCT_PERM:
- for(i=0; i<64; i++)
- s->idct_permutation[i]= ((i&7)<<3) | (i>>3);
- break;
- default:
- fprintf(stderr, "Internal error, IDCT permutation not set\n");
- return -1;
- }
-
+#endif //CONFIG_ENCODERS
/* load & permutate scantables
note: only wmv uses differnt ones
@@ -270,6 +216,8 @@ int DCT_common_init(MpegEncContext *s)
ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan);
ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan);
+ s->picture_structure= PICT_FRAME;
+
return 0;
}
@@ -280,13 +228,13 @@ int DCT_common_init(MpegEncContext *s)
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
if(shared){
- XINE_ASSERT(pic->data[0], "pic->data[0] is NULL.");
- XINE_ASSERT((pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED), "Invalid pic->type: %d", pic->type);
+ assert(pic->data[0]);
+ assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
pic->type= FF_BUFFER_TYPE_SHARED;
}else{
int r;
- XINE_ASSERT(!pic->data[0],"pic->data[0] is not NULL.");
+ assert(!pic->data[0]);
r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
@@ -311,14 +259,14 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
if(pic->qscale_table==NULL){
if (s->encoding) {
- CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(INT16))
- CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(INT16))
- CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(INT8))
+ CHECKED_ALLOCZ(pic->mb_var , s->mb_num * sizeof(int16_t))
+ CHECKED_ALLOCZ(pic->mc_mb_var, s->mb_num * sizeof(int16_t))
+ CHECKED_ALLOCZ(pic->mb_mean , s->mb_num * sizeof(int8_t))
CHECKED_ALLOCZ(pic->mb_cmp_score, s->mb_num * sizeof(int32_t))
}
- CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(UINT8)+1) //the +1 is for the slice end check
- CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(UINT8))
+ CHECKED_ALLOCZ(pic->mbskip_table , s->mb_num * sizeof(uint8_t)+1) //the +1 is for the slice end check
+ CHECKED_ALLOCZ(pic->qscale_table , s->mb_num * sizeof(uint8_t))
pic->qstride= s->mb_width;
}
@@ -371,7 +319,7 @@ int MPV_common_init(MpegEncContext *s)
{
int y_size, c_size, yc_size, i;
- dsputil_init(&s->dsp, s->avctx->dsp_mask);
+ dsputil_init(&s->dsp, s->avctx);
DCT_common_init(s);
s->flags= s->avctx->flags;
@@ -390,10 +338,10 @@ int MPV_common_init(MpegEncContext *s)
yc_size = y_size + 2 * c_size;
/* convert fourcc to upper case */
- s->avctx->fourcc= toupper( s->avctx->fourcc &0xFF)
- + (toupper((s->avctx->fourcc>>8 )&0xFF)<<8 )
- + (toupper((s->avctx->fourcc>>16)&0xFF)<<16)
- + (toupper((s->avctx->fourcc>>24)&0xFF)<<24);
+ s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
+ + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
+ + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
+ + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
@@ -404,12 +352,12 @@ int MPV_common_init(MpegEncContext *s)
int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
/* Allocate MV tables */
- CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(INT16))
- CHECKED_ALLOCZ(s->b_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
- CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(INT16))
- CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
- CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16))
- CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16))
+ CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_forw_mv_table , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(int16_t))
//FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
@@ -428,29 +376,29 @@ int MPV_common_init(MpegEncContext *s)
CHECKED_ALLOCZ(s->avctx->stats_out, 256);
}
- CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(UINT8))
+ CHECKED_ALLOCZ(s->error_status_table, s->mb_num*sizeof(uint8_t))
if (s->out_format == FMT_H263 || s->encoding) {
int size;
/* Allocate MB type table */
- CHECKED_ALLOCZ(s->mb_type , s->mb_num * sizeof(UINT8))
+ CHECKED_ALLOCZ(s->mb_type , s->mb_num * sizeof(uint8_t))
/* MV prediction */
size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
- CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(INT16));
+ CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
}
if(s->codec_id==CODEC_ID_MPEG4){
/* interlaced direct mode decoding tables */
- CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16))
- CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8))
+ CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(int16_t))
+ CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(int8_t))
}
/* 4mv b frame decoding table */
//note this is needed for h263 without b frames too (segfault on damaged streams otherwise)
- CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8))
+ CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(uint8_t))
if (s->out_format == FMT_H263) {
/* ac values */
- CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(INT16) * 16);
+ CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
s->ac_val[1] = s->ac_val[0] + y_size;
s->ac_val[2] = s->ac_val[1] + c_size;
@@ -461,14 +409,14 @@ int MPV_common_init(MpegEncContext *s)
CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
/* cbp, ac_pred, pred_dir */
- CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(UINT8))
- CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8))
+ CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(uint8_t))
+ CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(uint8_t))
}
if (s->h263_pred || s->h263_plus || !s->encoding) {
/* dc values */
//MN: we need these for error resilience of intra-frames
- CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(INT16));
+ CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
s->dc_val[1] = s->dc_val[0] + y_size;
s->dc_val[2] = s->dc_val[1] + c_size;
for(i=0;i<yc_size;i++)
@@ -543,6 +491,8 @@ void MPV_common_end(MpegEncContext *s)
s->context_initialized = 0;
}
+#ifdef CONFIG_ENCODERS
+
/* init video encoder */
int MPV_encode_init(AVCodecContext *avctx)
{
@@ -553,7 +503,6 @@ int MPV_encode_init(AVCodecContext *avctx)
s->bit_rate = avctx->bit_rate;
s->bit_rate_tolerance = avctx->bit_rate_tolerance;
- s->frame_rate = avctx->frame_rate;
s->width = avctx->width;
s->height = avctx->height;
if(avctx->gop_size > 600){
@@ -565,8 +514,6 @@ int MPV_encode_init(AVCodecContext *avctx)
s->rtp_payload_size = avctx->rtp_payload_size;
if (avctx->rtp_callback)
s->rtp_callback = avctx->rtp_callback;
- s->qmin= avctx->qmin;
- s->qmax= avctx->qmax;
s->max_qdiff= avctx->max_qdiff;
s->qcompress= avctx->qcompress;
s->qblur= avctx->qblur;
@@ -603,10 +550,27 @@ int MPV_encode_init(AVCodecContext *avctx)
s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
+ if(s->codec_id==CODEC_ID_MJPEG){
+ s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
+ s->inter_quant_bias= 0;
+ }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO){
+ s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
+ s->inter_quant_bias= 0;
+ }else{
+ s->intra_quant_bias=0;
+ s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
+ }
+
+ if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
+ s->intra_quant_bias= avctx->intra_quant_bias;
+ if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
+ s->inter_quant_bias= avctx->inter_quant_bias;
+
switch(avctx->codec->id) {
case CODEC_ID_MPEG1VIDEO:
s->out_format = FMT_MPEG1;
- avctx->delay=0; //FIXME not sure, should check the spec
+ s->low_delay= 0; //s->max_b_frames ? 0 : 1;
+ avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
break;
case CODEC_ID_MJPEG:
s->out_format = FMT_MJPEG;
@@ -637,12 +601,12 @@ int MPV_encode_init(AVCodecContext *avctx)
case CODEC_ID_H263P:
s->out_format = FMT_H263;
s->h263_plus = 1;
- s->unrestricted_mv = 1;
- s->h263_aic = 1;
-
+ /* Fx */
+ s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
+ s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
+ /* /Fx */
/* These are just to be sure */
- s->umvplus = 0;
- s->umvplus_dec = 0;
+ s->umvplus = 1;
avctx->delay=0;
s->low_delay=1;
break;
@@ -715,9 +679,9 @@ int MPV_encode_init(AVCodecContext *avctx)
int i;
done=1;
- default_mv_penalty= av_mallocz( sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1) );
- memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1));
- memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1));
+ default_mv_penalty= av_mallocz( sizeof(uint16_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
+ memset(default_mv_penalty, 0, sizeof(uint16_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
+ memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
for(i=-16; i<16; i++){
default_fcode_tab[i + MAX_MV]= 1;
@@ -754,7 +718,7 @@ int MPV_encode_init(AVCodecContext *avctx)
/* init default q matrix */
for(i=0;i<64;i++) {
- int j= s->idct_permutation[i];
+ int j= s->dsp.idct_permutation[i];
#ifdef CONFIG_RISKY
if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
@@ -809,10 +773,12 @@ int MPV_encode_end(AVCodecContext *avctx)
return 0;
}
+#endif //CONFIG_ENCODERS
+
void init_rl(RLTable *rl)
{
- INT8 max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
- UINT8 index_run[MAX_RUN+1];
+ int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
+ uint8_t index_run[MAX_RUN+1];
int last, run, level, start, end, i;
/* compute max_level[], max_run[] and index_run[] */
@@ -849,9 +815,9 @@ void init_rl(RLTable *rl)
/* draw the edges of width 'w' of an image of size width, height */
//FIXME check that this is ok for mpeg4 interlaced
-static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w)
+static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
{
- UINT8 *ptr, *last_line;
+ uint8_t *ptr, *last_line;
int i;
last_line = buf + (height - 1) * wrap;
@@ -892,7 +858,7 @@ static int find_unused_picture(MpegEncContext *s, int shared){
}
}
- XINE_ASSERT(i<MAX_PICTURE_COUNT,"value 'i' is >= MAX_PICTURE_COUNT: %d >= %d", i, MAX_PICTURE_COUNT);
+ assert(i<MAX_PICTURE_COUNT);
return i;
}
@@ -900,57 +866,66 @@ static int find_unused_picture(MpegEncContext *s, int shared){
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
int i;
- AVFrame *pic = NULL;
+ AVFrame *pic;
s->mb_skiped = 0;
/* mark&release old frames */
- if (s->pict_type != B_TYPE && s->last_picture.data[0]) {
- for(i=0; i<MAX_PICTURE_COUNT; i++){
-//printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
- if(s->picture[i].data[0] == s->last_picture.data[0]){
-// s->picture[i].reference=0;
- avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
- break;
- }
- }
- XINE_ASSERT(i<MAX_PICTURE_COUNT,"value 'i' is >= MAX_PICTURE_COUNT: %d >= %d", i, MAX_PICTURE_COUNT);
+ if (s->pict_type != B_TYPE && s->last_picture_ptr) {
+ avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
/* release forgotten pictures */
/* if(mpeg124/h263) */
if(!s->encoding){
for(i=0; i<MAX_PICTURE_COUNT; i++){
- if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
+ if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
fprintf(stderr, "releasing zombie picture\n");
avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
}
}
}
}
-
+
+alloc:
if(!s->encoding){
i= find_unused_picture(s, 0);
pic= (AVFrame*)&s->picture[i];
pic->reference= s->pict_type != B_TYPE;
- pic->coded_picture_number= s->current_picture.coded_picture_number+1;
+
+ if(s->current_picture_ptr)
+ pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
alloc_picture(s, (Picture*)pic, 0);
- s->current_picture= s->picture[i];
+ s->current_picture_ptr= &s->picture[i];
}
if (s->pict_type != B_TYPE) {
- s->last_picture= s->next_picture;
- s->next_picture= s->current_picture;
+ s->last_picture_ptr= s->next_picture_ptr;
+ s->next_picture_ptr= s->current_picture_ptr;
+ }
+ s->current_picture= *s->current_picture_ptr;
+ if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
+ if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
+ if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
+
+ if(s->picture_structure!=PICT_FRAME){
+ int i;
+ for(i=0; i<4; i++){
+ if(s->picture_structure == PICT_BOTTOM_FIELD){
+ s->current_picture.data[i] += s->current_picture.linesize[i];
+ }
+ s->current_picture.linesize[i] *= 2;
+ s->last_picture.linesize[i] *=2;
+ s->next_picture.linesize[i] *=2;
+ }
}
- if(s->pict_type != I_TYPE && s->last_picture.data[0]==NULL){
+ if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
fprintf(stderr, "warning: first frame is no keyframe\n");
- XINE_ASSERT(s->pict_type != B_TYPE, "These should have been dropped if we dont have a reference");
- free_picture(s, (Picture*) pic);
- s->last_picture.data[0] = s->next_picture.data[0] = NULL;
- return -1;
+ assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
+ goto alloc;
}
s->hurry_up= s->avctx->hurry_up;
@@ -987,19 +962,19 @@ void MPV_frame_end(MpegEncContext *s)
if(s->pict_type!=B_TYPE){
s->last_non_b_pict_type= s->pict_type;
}
-
- s->current_picture.quality= s->qscale; //FIXME get average of qscale_table
- s->current_picture.pict_type= s->pict_type;
- s->current_picture.key_frame= s->pict_type == I_TYPE;
-
- /* copy back current_picture variables */
+#if 0
+ /* copy back current_picture variables */
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] == s->current_picture.data[0]){
s->picture[i]= s->current_picture;
break;
}
}
- XINE_ASSERT(i<MAX_PICTURE_COUNT,"value 'i' is >= MAX_PICTURE_COUNT: %d >= %d", i, MAX_PICTURE_COUNT);
+ assert(i<MAX_PICTURE_COUNT);
+#endif
+ s->current_picture_ptr->quality= s->qscale; //FIXME get average of qscale_table
+ s->current_picture_ptr->pict_type= s->pict_type;
+ s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
/* release non refernce frames */
for(i=0; i<MAX_PICTURE_COUNT; i++){
@@ -1018,8 +993,17 @@ void MPV_frame_end(MpegEncContext *s)
}
printf("pict type: %d\n", s->pict_type);
}
+
+ // clear copies, to avoid confusion
+#if 0
+ memset(&s->last_picture, 0, sizeof(Picture));
+ memset(&s->next_picture, 0, sizeof(Picture));
+ memset(&s->current_picture, 0, sizeof(Picture));
+#endif
}
+#ifdef CONFIG_ENCODERS
+
static int get_sae(uint8_t *src, int ref, int stride){
int x,y;
int acc=0;
@@ -1085,6 +1069,12 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
pic->reference= 1;
alloc_picture(s, (Picture*)pic, 0);
+ for(i=0; i<4; i++){
+ /* the input will be 16 pixels to the right relative to the actual buffer start
+ * and the current_pic, so the buffer can be reused, yes its not beatifull
+ */
+ pic->data[i]+= 16;
+ }
if( pic->data[0] == pic_arg->data[0]
&& pic->data[1] == pic_arg->data[1]
@@ -1135,7 +1125,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
static void select_input_picture(MpegEncContext *s){
int i;
-
+ const int encoding_delay= s->max_b_frames;
int coded_pic_num=0;
if(s->reordered_input_picture[0])
@@ -1147,7 +1137,7 @@ static void select_input_picture(MpegEncContext *s){
/* set next picture types & ordering */
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
- if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture.data[0]==NULL || s->intra_only){
+ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
s->reordered_input_picture[0]= s->input_picture[0];
s->reordered_input_picture[0]->pict_type= I_TYPE;
s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
@@ -1221,19 +1211,22 @@ static void select_input_picture(MpegEncContext *s){
}
if(s->reordered_input_picture[0]){
- s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE;
+ s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE;
+
+ s->new_picture= *s->reordered_input_picture[0];
if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
+ // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
+
int i= find_unused_picture(s, 0);
Picture *pic= &s->picture[i];
- s->new_picture= *s->reordered_input_picture[0];
-
/* mark us unused / free shared pic */
for(i=0; i<4; i++)
s->reordered_input_picture[0]->data[i]= NULL;
s->reordered_input_picture[0]->type= 0;
+ //FIXME bad, copy * except
pic->pict_type = s->reordered_input_picture[0]->pict_type;
pic->quality = s->reordered_input_picture[0]->quality;
pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
@@ -1241,21 +1234,20 @@ static void select_input_picture(MpegEncContext *s){
alloc_picture(s, pic, 0);
- s->current_picture= *pic;
+ s->current_picture_ptr= pic;
}else{
- XINE_ASSERT(
- (s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
- || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL),
- "s->reordered_input_picture[0]->type is incorrect: %d",
- s->reordered_input_picture[0]->type);
-
- s->new_picture= *s->reordered_input_picture[0];
+ // input is not a shared pix -> reuse buffer for current_pix
+ assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
+ || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
+
+ s->current_picture_ptr= s->reordered_input_picture[0];
for(i=0; i<4; i++){
- s->reordered_input_picture[0]->data[i]-=16; //FIXME dirty
+ //reverse the +16 we did before storing the input
+ s->current_picture_ptr->data[i]-=16;
}
- s->current_picture= *s->reordered_input_picture[0];
}
+ s->current_picture= *s->current_picture_ptr;
s->picture_number= s->new_picture.display_picture_number;
//printf("dpn:%d\n", s->picture_number);
@@ -1285,7 +1277,7 @@ int MPV_encode_picture(AVCodecContext *avctx,
s->pict_type= s->new_picture.pict_type;
if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
s->qscale= (int)(s->new_picture.quality+0.5);
- XINE_ASSERT(s->qscale,"s->qscale is NULL");
+ assert(s->qscale);
}
//emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
@@ -1310,6 +1302,10 @@ int MPV_encode_picture(AVCodecContext *avctx,
if(s->flags&CODEC_FLAG_PASS1)
ff_write_pass1_stats(s);
+
+ for(i=0; i<4; i++){
+ avctx->error[i] += s->current_picture_ptr->error[i];
+ }
}
s->input_picture_number++;
@@ -1319,20 +1315,18 @@ int MPV_encode_picture(AVCodecContext *avctx,
s->total_bits += s->frame_bits;
avctx->frame_bits = s->frame_bits;
-
- for(i=0; i<4; i++){
- avctx->error[i] += s->current_picture.error[i];
- }
return pbBufPtr(&s->pb) - s->pb.buf;
}
+#endif //CONFIG_ENCODERS
+
static inline void gmc1_motion(MpegEncContext *s,
- UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int dest_offset,
- UINT8 **ref_picture, int src_offset)
+ uint8_t **ref_picture, int src_offset)
{
- UINT8 *ptr;
+ uint8_t *ptr;
int offset, src_x, src_y, linesize, uvlinesize;
int motion_x, motion_y;
int emu=0;
@@ -1416,11 +1410,11 @@ static inline void gmc1_motion(MpegEncContext *s,
}
static inline void gmc_motion(MpegEncContext *s,
- UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int dest_offset,
- UINT8 **ref_picture, int src_offset)
+ uint8_t **ref_picture, int src_offset)
{
- UINT8 *ptr;
+ uint8_t *ptr;
int linesize, uvlinesize;
const int a= s->sprite_warping_accuracy;
int ox, oy;
@@ -1479,11 +1473,11 @@ static inline void gmc_motion(MpegEncContext *s,
}
-void ff_emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h,
+void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
int src_x, int src_y, int w, int h){
int x, y;
int start_y, start_x, end_y, end_x;
- UINT8 *buf= s->edge_emu_buffer;
+ uint8_t *buf= s->edge_emu_buffer;
if(src_y>= h){
src+= (h-1-src_y)*linesize;
@@ -1542,13 +1536,13 @@ void ff_emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_
/* apply one mpeg motion vector to the three components */
static inline void mpeg_motion(MpegEncContext *s,
- UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int dest_offset,
- UINT8 **ref_picture, int src_offset,
+ uint8_t **ref_picture, int src_offset,
int field_based, op_pixels_func (*pix_op)[4],
int motion_x, int motion_y, int h)
{
- UINT8 *ptr;
+ uint8_t *ptr;
int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
int emu=0;
#if 0
@@ -1571,15 +1565,15 @@ if(s->quarter_sample)
src_y = clip(src_y, -16, height);
if (src_y == height)
dxy &= ~2;
- linesize = s->linesize << field_based;
- uvlinesize = s->uvlinesize << field_based;
+ linesize = s->current_picture.linesize[0] << field_based;
+ uvlinesize = s->current_picture.linesize[1] << field_based;
ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
dest_y += dest_offset;
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
|| src_y + (motion_y&1) + h > v_edge_pos){
- ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based,
+ ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + src_offset;
emu=1;
@@ -1632,14 +1626,14 @@ if(s->quarter_sample)
}
static inline void qpel_motion(MpegEncContext *s,
- UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int dest_offset,
- UINT8 **ref_picture, int src_offset,
+ uint8_t **ref_picture, int src_offset,
int field_based, op_pixels_func (*pix_op)[4],
qpel_mc_func (*qpix_op)[16],
int motion_x, int motion_y, int h)
{
- UINT8 *ptr;
+ uint8_t *ptr;
int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
int emu=0;
@@ -1697,6 +1691,7 @@ static inline void qpel_motion(MpegEncContext *s,
}
mx= (mx>>1)|(mx&1);
my= (my>>1)|(my&1);
+
dxy= (mx&1) | ((my&1)<<1);
mx>>=1;
my>>=1;
@@ -1738,13 +1733,13 @@ inline int ff_h263_round_chroma(int x){
}
static inline void MPV_motion(MpegEncContext *s,
- UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
- int dir, UINT8 **ref_picture,
+ uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
+ int dir, uint8_t **ref_picture,
op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
{
int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
int mb_x, mb_y, i;
- UINT8 *ptr, *dest;
+ uint8_t *ptr, *dest;
int emu=0;
mb_x = s->mb_x;
@@ -1909,8 +1904,18 @@ static inline void MPV_motion(MpegEncContext *s,
s->mv[dir][1][0], s->mv[dir][1][1], 8);
}
} else {
-
+ int offset;
+ if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
+ offset= s->field_select[dir][0] ? s->linesize : 0;
+ }else{
+ ref_picture= s->current_picture.data;
+ offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
+ }
+ mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
+ ref_picture, offset,
+ 0, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 16);
}
break;
}
@@ -1919,28 +1924,28 @@ static inline void MPV_motion(MpegEncContext *s,
/* put block[] to dest[] */
static inline void put_dct(MpegEncContext *s,
- DCTELEM *block, int i, UINT8 *dest, int line_size)
+ DCTELEM *block, int i, uint8_t *dest, int line_size)
{
s->dct_unquantize(s, block, i, s->qscale);
- s->idct_put (dest, line_size, block);
+ s->dsp.idct_put (dest, line_size, block);
}
/* add block[] to dest[] */
static inline void add_dct(MpegEncContext *s,
- DCTELEM *block, int i, UINT8 *dest, int line_size)
+ DCTELEM *block, int i, uint8_t *dest, int line_size)
{
if (s->block_last_index[i] >= 0) {
- s->idct_add (dest, line_size, block);
+ s->dsp.idct_add (dest, line_size, block);
}
}
static inline void add_dequant_dct(MpegEncContext *s,
- DCTELEM *block, int i, UINT8 *dest, int line_size)
+ DCTELEM *block, int i, uint8_t *dest, int line_size)
{
if (s->block_last_index[i] >= 0) {
s->dct_unquantize(s, block, i, s->qscale);
- s->idct_add (dest, line_size, block);
+ s->dsp.idct_add (dest, line_size, block);
}
}
@@ -1957,8 +1962,8 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
s->dc_val[0][xy + wrap] =
s->dc_val[0][xy + 1 + wrap] = 1024;
/* ac pred */
- memset(s->ac_val[0][xy ], 0, 32 * sizeof(INT16));
- memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(INT16));
+ memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
+ memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
if (s->msmpeg4_version>=3) {
s->coded_block[xy ] =
s->coded_block[xy + 1 ] =
@@ -1971,8 +1976,8 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
s->dc_val[1][xy] =
s->dc_val[2][xy] = 1024;
/* ac pred */
- memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16));
- memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16));
+ memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
+ memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
}
@@ -2056,10 +2061,12 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
}
if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
- UINT8 *dest_y, *dest_cb, *dest_cr;
+ uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
+ const int uvlinesize= s->current_picture.linesize[1];
/* avoid copy if macroblock skipped in last frame too */
if (s->pict_type != B_TYPE) {
@@ -2068,14 +2075,14 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
/* skip only during decoding as we might trash the buffers during encoding a bit */
if(!s->encoding){
- UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
+ uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
const int age= s->current_picture.age;
- XINE_ASSERT(age, "value 'age' is NULL");
+ assert(age);
if (s->mb_skiped) {
s->mb_skiped= 0;
- XINE_ASSERT(s->pict_type!=I_TYPE, "s->pict_type (%d) != I_TYPE (%d)", s->pict_type, I_TYPE);
+ assert(s->pict_type!=I_TYPE);
(*mbskip_ptr) ++; /* indicate that this time we skiped it */
if(*mbskip_ptr >99) *mbskip_ptr= 99;
@@ -2093,22 +2100,22 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
}else
s->mb_skiped= 0;
- if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band){
+ if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ //FIXME precalc
dest_y = s->current_picture.data[0] + mb_x * 16;
dest_cb = s->current_picture.data[1] + mb_x * 8;
dest_cr = s->current_picture.data[2] + mb_x * 8;
}else{
- dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
- dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
- dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
+ dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
+ dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8;
+ dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8;
}
if (s->interlaced_dct) {
- dct_linesize = s->linesize * 2;
- dct_offset = s->linesize;
+ dct_linesize = linesize * 2;
+ dct_offset = linesize;
} else {
- dct_linesize = s->linesize;
- dct_offset = s->linesize * 8;
+ dct_linesize = linesize;
+ dct_offset = linesize * 8;
}
if (!s->mb_intra) {
@@ -2145,8 +2152,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
- add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
- add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
+ add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
+ add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
}
} else if(s->codec_id != CODEC_ID_WMV2){
add_dct(s, block[0], 0, dest_y, dct_linesize);
@@ -2155,8 +2162,8 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
- add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
- add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
+ add_dct(s, block[4], 4, dest_cb, uvlinesize);
+ add_dct(s, block[5], 5, dest_cr, uvlinesize);
}
}
#ifdef CONFIG_RISKY
@@ -2173,24 +2180,26 @@ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
if(!(s->flags&CODEC_FLAG_GRAY)){
- put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
- put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
+ put_dct(s, block[4], 4, dest_cb, uvlinesize);
+ put_dct(s, block[5], 5, dest_cr, uvlinesize);
}
}else{
- s->idct_put(dest_y , dct_linesize, block[0]);
- s->idct_put(dest_y + 8, dct_linesize, block[1]);
- s->idct_put(dest_y + dct_offset , dct_linesize, block[2]);
- s->idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
+ s->dsp.idct_put(dest_y , dct_linesize, block[0]);
+ s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
+ s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
+ s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
if(!(s->flags&CODEC_FLAG_GRAY)){
- s->idct_put(dest_cb, s->uvlinesize, block[4]);
- s->idct_put(dest_cr, s->uvlinesize, block[5]);
+ s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
+ s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
}
}
}
}
}
+#ifdef CONFIG_ENCODERS
+
static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
{
static const char tab[64]=
@@ -2257,80 +2266,13 @@ static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index
if (level>maxlevel) level=maxlevel;
else if(level<minlevel) level=minlevel;
- block[j]= level;
- }
-}
-
-static inline void requantize_coeffs(MpegEncContext *s, DCTELEM block[64], int oldq, int newq, int n)
-{
- int i;
- if(s->mb_intra){
- i=1; //skip clipping of intra dc
- //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
- }else
- i=0;
-
- for(;i<=s->block_last_index[n]; i++){
- const int j = s->intra_scantable.permutated[i];
- int level = block[j];
-
- block[j]= ROUNDED_DIV(level*oldq, newq);
- }
-
- for(i=s->block_last_index[n]; i>=0; i--){
- const int j = s->intra_scantable.permutated[i];
- if(block[j]) break;
+ block[j]= level;
}
- s->block_last_index[n]= i;
}
-static inline void auto_requantize_coeffs(MpegEncContext *s, DCTELEM block[6][64])
-{
- int i,n, newq;
- const int maxlevel= s->max_qcoeff;
- const int minlevel= s->min_qcoeff;
- int largest=0, smallest=0;
-
- XINE_ASSERT(s->adaptive_quant, "s->adaptive_quant is NULL");
-
- for(n=0; n<6; n++){
- if(s->mb_intra){
- i=1; //skip clipping of intra dc
- //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
- }else
- i=0;
-
- for(;i<=s->block_last_index[n]; i++){
- const int j = s->intra_scantable.permutated[i];
- int level = block[n][j];
- if(largest < level) largest = level;
- if(smallest > level) smallest= level;
- }
- }
-
- for(newq=s->qscale+1; newq<32; newq++){
- if( ROUNDED_DIV(smallest*s->qscale, newq) >= minlevel
- && ROUNDED_DIV(largest *s->qscale, newq) <= maxlevel)
- break;
- }
-
- if(s->out_format==FMT_H263){
- /* h263 like formats cannot change qscale by more than 2 easiely */
- if(s->avctx->qmin + 2 < newq)
- newq= s->avctx->qmin + 2;
- }
-
- for(n=0; n<6; n++){
- requantize_coeffs(s, block[n], s->qscale, newq, n);
- clip_coeffs(s, block[n], s->block_last_index[n]);
- }
-
- s->dquant+= newq - s->qscale;
- s->qscale= newq;
-}
#if 0
-static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
+static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
int score=0;
int x,y;
@@ -2345,7 +2287,7 @@ static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimiz
return score;
}
-static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
+static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
int score=0;
int x,y;
@@ -2362,7 +2304,7 @@ static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to d
#else
#define SQ(a) ((a)*(a))
-static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
+static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
int score=0;
int x,y;
@@ -2377,7 +2319,7 @@ static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimiz
return score;
}
-static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
+static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
int score=0;
int x,y;
@@ -2394,17 +2336,20 @@ static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to d
#endif
-void ff_draw_horiz_band(MpegEncContext *s){
+#endif //CONFIG_ENCODERS
+
+/**
+ *
+ * @param h is the normal height, this will be reduced automatically if needed for the last row
+ */
+void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
if ( s->avctx->draw_horiz_band
- && (s->last_picture.data[0] || s->low_delay) ) {
- UINT8 *src_ptr[3];
- int y, h, offset;
- y = s->mb_y * 16;
- h = s->height - y;
- if (h > 16)
- h = 16;
-
- if(s->pict_type==B_TYPE)
+ && (s->last_picture_ptr || s->low_delay) ) {
+ uint8_t *src_ptr[3];
+ int offset;
+ h= FFMIN(h, s->height - y);
+
+ if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME)
offset = 0;
else
offset = y * s->linesize;
@@ -2425,6 +2370,8 @@ void ff_draw_horiz_band(MpegEncContext *s){
}
}
+#ifdef CONFIG_ENCODERS
+
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
{
const int mb_x= s->mb_x;
@@ -2445,7 +2392,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
if(s->codec_id==CODEC_ID_MPEG4){
if(!s->mb_intra){
- XINE_ASSERT((s->dquant==0 || s->mv_type!=MV_TYPE_8X8), "?");
+ assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
if(s->mv_dir&MV_DIRECT)
s->dquant=0;
@@ -2457,7 +2404,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
}
if (s->mb_intra) {
- UINT8 *ptr;
+ uint8_t *ptr;
int wrap_y;
int emu=0;
@@ -2512,8 +2459,8 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
}else{
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
- UINT8 *dest_y, *dest_cb, *dest_cr;
- UINT8 *ptr_y, *ptr_cb, *ptr_cr;
+ uint8_t *dest_y, *dest_cb, *dest_cr;
+ uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int wrap_y, wrap_c;
int emu=0;
@@ -2661,7 +2608,6 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
}
-#ifdef CONFIG_ENCODERS
/* huffman encode */
switch(s->codec_id){ //FIXME funct ptr could be slightly faster
case CODEC_ID_MPEG1VIDEO:
@@ -2683,11 +2629,12 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
case CODEC_ID_MJPEG:
mjpeg_encode_mb(s, s->block); break;
default:
- XINE_ASSERT(0, "We have no default case. So if program control reaches here something is really wrong");
+ assert(0);
}
-#endif
}
+#endif //CONFIG_ENCODERS
+
/**
* combines the (truncated) bitstream to a complete frame
* @returns -1 if no complete frame could be created
@@ -2717,7 +2664,8 @@ int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size)
return 0;
}
-void ff_copy_bits(PutBitContext *pb, UINT8 *src, int length)
+#ifdef CONFIG_ENCODERS
+void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
{
int bytes= length>>4;
int bits= length&15;
@@ -2838,7 +2786,7 @@ static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, in
}
}
- XINE_ASSERT(acc>=0,"value 'acc' is < 0: %d", acc);
+ assert(acc>=0);
return acc;
}
@@ -2849,9 +2797,9 @@ static void encode_picture(MpegEncContext *s, int picture_number)
int i;
int bits;
MpegEncContext best_s, backup_s;
- UINT8 bit_buf[2][3000];
- UINT8 bit_buf2[2][3000];
- UINT8 bit_buf_tex[2][3000];
+ uint8_t bit_buf[2][3000];
+ uint8_t bit_buf2[2][3000];
+ uint8_t bit_buf_tex[2][3000];
PutBitContext pb[2], pb2[2], tex_pb[2];
for(i=0; i<2; i++){
@@ -2875,7 +2823,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
#ifdef CONFIG_RISKY
/* we need to initialize some time vars before we can encode b-frames */
- if (s->h263_pred && !s->h263_msmpeg4)
+ // RAL: Condition added for MPEG1VIDEO
+ if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4))
ff_set_mpeg4_time(s, s->picture_number);
#endif
@@ -2883,17 +2832,14 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
- if(s->msmpeg4_version){
- if(s->pict_type==I_TYPE)
- s->no_rounding=1;
- else if(s->flipflop_rounding)
- s->no_rounding ^= 1;
- }else if(s->out_format == FMT_H263){
- if(s->pict_type==I_TYPE)
- s->no_rounding=0;
- else if(s->pict_type!=B_TYPE)
+ if(s->pict_type==I_TYPE){
+ if(s->msmpeg4_version) s->no_rounding=1;
+ else s->no_rounding=0;
+ }else if(s->pict_type!=B_TYPE){
+ if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
s->no_rounding ^= 1;
}
+
/* Estimate motion for every MB */
s->mb_intra=0; //for the rate distoration & bit compare functions
if(s->pict_type != I_TYPE){
@@ -2937,9 +2883,9 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}else /* if(s->pict_type == I_TYPE) */{
/* I-Frame */
//FIXME do we need to zero them?
- memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
- memset(s->p_mv_table , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2);
- memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
+ memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
+ memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_width+2)*(s->mb_height+2)*2);
+ memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height);
if(!s->fixed_qscale){
/* finding spatial complexity for I-frame rate control */
@@ -2964,21 +2910,33 @@ static void encode_picture(MpegEncContext *s, int picture_number)
if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
s->pict_type= I_TYPE;
- memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
+ memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height);
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
}
- if(s->pict_type==P_TYPE || s->pict_type==S_TYPE)
- s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
- ff_fix_long_p_mvs(s);
- if(s->pict_type==B_TYPE){
- s->f_code= ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
- s->b_code= ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
+ if(!s->umvplus){
+ if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
+ s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
+
+ ff_fix_long_p_mvs(s);
+ }
+
+ if(s->pict_type==B_TYPE){
+ int a, b;
+
+ a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
+ b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
+ s->f_code = FFMAX(a, b);
- ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
- ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
- ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
- ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
+ a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
+ b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
+ s->b_code = FFMAX(a, b);
+
+ ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
+ ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
+ ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
+ ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
+ }
}
if (s->fixed_qscale)
@@ -3007,7 +2965,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
/* for mjpeg, we do include qscale in the matrix */
s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
for(i=1;i<64;i++){
- int j= s->idct_permutation[i];
+ int j= s->dsp.idct_permutation[i];
s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
}
@@ -3062,11 +3020,15 @@ static void encode_picture(MpegEncContext *s, int picture_number)
/* note: quant matrix value (8) is implied here */
s->last_dc[i] = 128;
- s->current_picture.error[i] = 0;
+ s->current_picture_ptr->error[i] = 0;
}
s->mb_incr = 1;
s->last_mv[0][0][0] = 0;
s->last_mv[0][0][1] = 0;
+ s->last_mv[1][0][0] = 0;
+ s->last_mv[1][0][1] = 0;
+
+ s->last_mv_dir = 0;
#ifdef CONFIG_RISKY
if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
@@ -3230,7 +3192,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
&dmin, &next_block, mx, my);
}
if(mb_type&MB_TYPE_INTRA){
- s->mv_dir = MV_DIR_FORWARD;
+ s->mv_dir = 0;
s->mv_type = MV_TYPE_16X16;
s->mb_intra= 1;
s->mv[0][0][0] = 0;
@@ -3351,7 +3313,7 @@ static void encode_picture(MpegEncContext *s, int picture_number)
switch(mb_type){
case MB_TYPE_INTRA:
- s->mv_dir = MV_DIR_FORWARD;
+ s->mv_dir = 0;
s->mb_intra= 1;
motion_x= s->mv[0][0][0] = 0;
motion_y= s->mv[0][0][1] = 0;
@@ -3408,8 +3370,13 @@ static void encode_picture(MpegEncContext *s, int picture_number)
motion_x=motion_y=0; //gcc warning fix
printf("illegal MB type\n");
}
+
encode_mb(s, motion_x, motion_y);
+
+ // RAL: Update last macrobloc type
+ s->last_mv_dir = s->mv_dir;
}
+
/* clean the MV table in IPS frames for direct mode in B frames */
if(s->mb_intra /* && I,P,S_TYPE */){
s->p_mv_table[xy][0]=0;
@@ -3425,17 +3392,17 @@ static void encode_picture(MpegEncContext *s, int picture_number)
if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
- s->current_picture.error[0] += sse(
+ s->current_picture_ptr->error[0] += sse(
s,
s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
w, h, s->linesize);
- s->current_picture.error[1] += sse(
+ s->current_picture_ptr->error[1] += sse(
s,
s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
w>>1, h>>1, s->uvlinesize);
- s->current_picture.error[2] += sse(
+ s->current_picture_ptr->error[2] += sse(
s,
s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
@@ -3487,7 +3454,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow){
const int *qmat;
- const UINT8 *scantable= s->intra_scantable.scantable;
+ const uint8_t *scantable= s->intra_scantable.scantable;
int max=0;
unsigned int threshold1, threshold2;
int bias=0;
@@ -3507,7 +3474,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
int score_limit=0;
int left_limit= 0;
- s->fdct (block);
+ s->dsp.fdct (block);
qmul= qscale*16;
qadd= ((qscale-1)|1)*8;
@@ -3597,7 +3564,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
int level= coeff[level_index][i];
int unquant_coeff;
- XINE_ASSERT(level, "value 'level' is NULL");
+ assert(level);
if(s->out_format == FMT_H263){
if(level>0){
@@ -3606,7 +3573,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
unquant_coeff= level*qmul - qadd;
}
}else{ //MPEG1
- j= s->idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
+ j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
if(s->mb_intra){
if (level < 0) {
unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
@@ -3716,16 +3683,16 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
return last_non_zero;
i= last_i;
- XINE_ASSERT(last_level, "value 'last_level' is NULL");
+ assert(last_level);
//FIXME use permutated scantable
- block[ s->idct_permutation[ scantable[last_non_zero] ] ]= last_level;
+ block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level;
i -= last_run + 1;
for(;i>0 ; i -= run_tab[i] + 1){
- const int j= s->idct_permutation[ scantable[i - 1 + start_i] ];
+ const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ];
block[j]= level_tab[i];
- XINE_ASSERT(block[j], "value 'block[j]' is NULL");
+ assert(block[j]);
}
return last_non_zero;
@@ -3737,12 +3704,12 @@ static int dct_quantize_c(MpegEncContext *s,
{
int i, j, level, last_non_zero, q;
const int *qmat;
- const UINT8 *scantable= s->intra_scantable.scantable;
+ const uint8_t *scantable= s->intra_scantable.scantable;
int bias;
int max=0;
unsigned int threshold1, threshold2;
- s->fdct (block);
+ s->dsp.fdct (block);
if (s->mb_intra) {
if (!s->h263_aic) {
@@ -3794,17 +3761,19 @@ static int dct_quantize_c(MpegEncContext *s,
*overflow= s->max_qcoeff < max; //overflow might have happend
/* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
- if (s->idct_permutation_type != FF_NO_IDCT_PERM)
- ff_block_permute(block, s->idct_permutation, scantable, last_non_zero);
+ if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
+ ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
return last_non_zero;
}
+#endif //CONFIG_ENCODERS
+
static void dct_unquantize_mpeg1_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
int i, level, nCoeffs;
- const UINT16 *quant_matrix;
+ const uint16_t *quant_matrix;
nCoeffs= s->block_last_index[n];
@@ -3867,7 +3836,7 @@ static void dct_unquantize_mpeg2_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
int i, level, nCoeffs;
- const UINT16 *quant_matrix;
+ const uint16_t *quant_matrix;
if(s->alternate_scan) nCoeffs= 63;
else nCoeffs= s->block_last_index[n];
@@ -3932,7 +3901,7 @@ static void dct_unquantize_h263_c(MpegEncContext *s,
int i, level, qmul, qadd;
int nCoeffs;
- XINE_ASSERT(s->block_last_index[n]>=0 , "s->block_last_index[%d] is < 0: %d", n, s->block_last_index[n]);
+ assert(s->block_last_index[n]>=0);
qadd = (qscale - 1) | 1;
qmul = qscale << 1;
@@ -3949,7 +3918,7 @@ static void dct_unquantize_h263_c(MpegEncContext *s,
nCoeffs= 63; //does not allways use zigzag table
} else {
i = 0;
- nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
}
for(;i<=nCoeffs;i++) {
@@ -3969,6 +3938,7 @@ static void dct_unquantize_h263_c(MpegEncContext *s,
}
}
+
char ff_get_pict_type_char(int pict_type){
switch(pict_type){
case I_TYPE: return 'I';
@@ -3979,6 +3949,83 @@ char ff_get_pict_type_char(int pict_type){
}
}
+static const AVOption mpeg4_options[] =
+{
+ AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
+ AVOPTION_CODEC_FLAG("vhq", "very high quality", flags, CODEC_FLAG_HQ, 0),
+ AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
+ "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
+ bit_rate_tolerance, 4, 240000000, 8000),
+ AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
+ AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
+ AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
+ rc_eq, "tex^qComp,option1,options2", 0),
+ AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
+ rc_min_rate, 4, 24000000, 0),
+ AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
+ rc_max_rate, 4, 24000000, 0),
+ AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
+ rc_buffer_aggressivity, 4, 24000000, 0),
+ AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
+ rc_initial_cplx, 0., 9999999., 0),
+ AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
+ i_quant_factor, 0., 0., 0),
+ AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
+ i_quant_factor, -999999., 999999., 0),
+ AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
+ dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
+ AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
+ lumi_masking, 0., 999999., 0),
+ AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
+ temporal_cplx_masking, 0., 999999., 0),
+ AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
+ spatial_cplx_masking, 0., 999999., 0),
+ AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
+ p_masking, 0., 999999., 0),
+ AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
+ dark_masking, 0., 999999., 0),
+ AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
+ idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
+
+ AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
+ mb_qmin, 0, 8, 0),
+ AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
+ mb_qmin, 0, 8, 0),
+
+ AVOPTION_CODEC_INT("me_cmp", "ME compare function",
+ me_cmp, 0, 24000000, 0),
+ AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
+ me_sub_cmp, 0, 24000000, 0),
+
+
+ AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
+ dia_size, 0, 24000000, 0),
+ AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
+ last_predictor_count, 0, 24000000, 0),
+
+ AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
+ pre_me, 0, 24000000, 0),
+ AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
+ me_pre_cmp, 0, 24000000, 0),
+
+ AVOPTION_CODEC_INT("me_range", "maximum ME search range",
+ me_range, 0, 24000000, 0),
+ AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
+ pre_dia_size, 0, 24000000, 0),
+ AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
+ me_subpel_quality, 0, 24000000, 0),
+ AVOPTION_CODEC_INT("me_range", "maximum ME search range",
+ me_range, 0, 24000000, 0),
+ AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
+ flags, CODEC_FLAG_PSNR, 0),
+ AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
+ rc_override),
+ AVOPTION_SUB(avoptions_common),
+ AVOPTION_END()
+};
+
+#ifdef CONFIG_ENCODERS
+
AVCodec mpeg1video_encoder = {
"mpeg1video",
CODEC_TYPE_VIDEO,
@@ -4029,6 +4076,7 @@ AVCodec mpeg4_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .options = mpeg4_options,
};
AVCodec msmpeg4v1_encoder = {
@@ -4039,6 +4087,7 @@ AVCodec msmpeg4v1_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .options = mpeg4_options,
};
AVCodec msmpeg4v2_encoder = {
@@ -4049,6 +4098,7 @@ AVCodec msmpeg4v2_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .options = mpeg4_options,
};
AVCodec msmpeg4v3_encoder = {
@@ -4059,6 +4109,7 @@ AVCodec msmpeg4v3_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .options = mpeg4_options,
};
AVCodec wmv1_encoder = {
@@ -4069,6 +4120,7 @@ AVCodec wmv1_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .options = mpeg4_options,
};
#endif
@@ -4082,3 +4134,6 @@ AVCodec mjpeg_encoder = {
MPV_encode_picture,
MPV_encode_end,
};
+
+#endif //CONFIG_ENCODERS
+