From 79fe3a7e9b9d4e22123a788d44458d17eaaeb36f Mon Sep 17 00:00:00 2001 From: Petri Hintukainen Date: Thu, 17 Jan 2013 12:50:54 +0200 Subject: Pass frame as parameter to ff_convert_frame() (merged from https://github.com/huceke/xine-lib-vaapi) --- src/combined/ffmpeg/ff_video_decoder.c | 66 +++++++++++++++++----------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'src') diff --git a/src/combined/ffmpeg/ff_video_decoder.c b/src/combined/ffmpeg/ff_video_decoder.c index 2e8b80efe..c15e3cfd1 100644 --- a/src/combined/ffmpeg/ff_video_decoder.c +++ b/src/combined/ffmpeg/ff_video_decoder.c @@ -608,7 +608,7 @@ static int ff_handle_mpeg_sequence(ff_video_decoder_t *this, mpeg_parser_t *pars return 1; } -static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { +static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img, AVFrame *av_frame) { int y; uint8_t *dy, *du, *dv, *sy, *su, *sv; @@ -622,9 +622,9 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { dy = img->base[0]; du = img->base[1]; dv = img->base[2]; - sy = this->av_frame->data[0]; - su = this->av_frame->data[1]; - sv = this->av_frame->data[2]; + sy = av_frame->data[0]; + su = av_frame->data[1]; + sv = av_frame->data[2]; /* Some segfaults & heap corruption have been observed with img->height, * so we use this->bih.biHeight instead (which is the displayed height) @@ -634,18 +634,18 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { yuv9_to_yv12( /* Y */ - this->av_frame->data[0], - this->av_frame->linesize[0], + av_frame->data[0], + av_frame->linesize[0], img->base[0], img->pitches[0], /* U */ - this->av_frame->data[1], - this->av_frame->linesize[1], + av_frame->data[1], + av_frame->linesize[1], img->base[1], img->pitches[1], /* V */ - this->av_frame->data[2], - this->av_frame->linesize[2], + av_frame->data[2], + av_frame->linesize[2], img->base[2], img->pitches[2], /* width x height */ @@ -656,18 +656,18 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { yuv411_to_yv12( /* Y */ - this->av_frame->data[0], - this->av_frame->linesize[0], + av_frame->data[0], + av_frame->linesize[0], img->base[0], img->pitches[0], /* U */ - this->av_frame->data[1], - this->av_frame->linesize[1], + av_frame->data[1], + av_frame->linesize[1], img->base[1], img->pitches[1], /* V */ - this->av_frame->data[2], - this->av_frame->linesize[2], + av_frame->data[2], + av_frame->linesize[2], img->base[2], img->pitches[2], /* width x height */ @@ -697,7 +697,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { this->yuv.v[plane_ptr] = COMPUTE_V(r, g, b); plane_ptr++; } - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } yuv444_to_yuy2(&this->yuv, img->base[0], img->pitches[0]); @@ -726,7 +726,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { this->yuv.v[plane_ptr] = COMPUTE_V(r, g, b); plane_ptr++; } - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } yuv444_to_yuy2(&this->yuv, img->base[0], img->pitches[0]); @@ -755,7 +755,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { this->yuv.v[plane_ptr] = COMPUTE_V(r, g, b); plane_ptr++; } - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } yuv444_to_yuy2(&this->yuv, img->base[0], img->pitches[0]); @@ -779,7 +779,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { this->yuv.v[plane_ptr] = COMPUTE_V(r, g, b); plane_ptr++; } - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } yuv444_to_yuy2(&this->yuv, img->base[0], img->pitches[0]); @@ -803,7 +803,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { this->yuv.v[plane_ptr] = COMPUTE_V(r, g, b); plane_ptr++; } - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } yuv444_to_yuy2(&this->yuv, img->base[0], img->pitches[0]); @@ -842,7 +842,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { this->yuv.v[plane_ptr] = v_palette[pixel]; plane_ptr++; } - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } yuv444_to_yuy2(&this->yuv, img->base[0], img->pitches[0]); @@ -866,7 +866,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { q = dy; for (x = img->width; x > 0; x--) *q++ = ytab[*p++]; dy += img->pitches[0]; - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } for (y = 0; y < this->bih.biHeight / 2; y++) { @@ -884,11 +884,11 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { du += img->pitches[1]; dv += img->pitches[2]; if (subsampv) { - su += 2 * this->av_frame->linesize[1]; - sv += 2 * this->av_frame->linesize[2]; + su += 2 * av_frame->linesize[1]; + sv += 2 * av_frame->linesize[2]; } else { - su += this->av_frame->linesize[1]; - sv += this->av_frame->linesize[2]; + su += av_frame->linesize[1]; + sv += av_frame->linesize[2]; } } @@ -897,7 +897,7 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { for (y = 0; y < this->bih.biHeight; y++) { xine_fast_memcpy (dy, sy, img->width); dy += img->pitches[0]; - sy += this->av_frame->linesize[0]; + sy += av_frame->linesize[0]; } for (y = 0; y < this->bih.biHeight / 2; y++) { @@ -926,11 +926,11 @@ static void ff_convert_frame(ff_video_decoder_t *this, vo_frame_t *img) { du += img->pitches[1]; dv += img->pitches[2]; if (subsampv) { - su += 2*this->av_frame->linesize[1]; - sv += 2*this->av_frame->linesize[2]; + su += 2*av_frame->linesize[1]; + sv += 2*av_frame->linesize[2]; } else { - su += this->av_frame->linesize[1]; - sv += this->av_frame->linesize[2]; + su += av_frame->linesize[1]; + sv += av_frame->linesize[2]; } } @@ -1600,7 +1600,7 @@ static void ff_handle_buffer (ff_video_decoder_t *this, buf_element_t *buf) { } else if (!this->av_frame->opaque) { /* colorspace conversion or copy */ - ff_convert_frame(this, img); + ff_convert_frame(this, img, this->av_frame); } img->pts = ff_untag_pts(this, this->av_frame->reordered_opaque); -- cgit v1.2.3