summaryrefslogtreecommitdiff
path: root/src/video_dec/libvdpau
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_dec/libvdpau')
-rw-r--r--src/video_dec/libvdpau/Makefile.am42
-rw-r--r--src/video_dec/libvdpau/alterh264_bits_reader.h127
-rw-r--r--src/video_dec/libvdpau/alterh264_decode.c2448
-rw-r--r--src/video_dec/libvdpau/alterh264_decode.h339
-rw-r--r--src/video_dec/libvdpau/bits_reader.h82
-rw-r--r--src/video_dec/libvdpau/cpb.c49
-rw-r--r--src/video_dec/libvdpau/cpb.h67
-rw-r--r--src/video_dec/libvdpau/dpb.c622
-rw-r--r--src/video_dec/libvdpau/dpb.h117
-rw-r--r--src/video_dec/libvdpau/h264_parser.c2038
-rw-r--r--src/video_dec/libvdpau/h264_parser.h110
-rw-r--r--src/video_dec/libvdpau/nal.c213
-rw-r--r--src/video_dec/libvdpau/nal.h501
-rw-r--r--src/video_dec/libvdpau/vdpau_h264.c1014
-rw-r--r--src/video_dec/libvdpau/vdpau_mpeg12.c1101
-rw-r--r--src/video_dec/libvdpau/vdpau_mpeg4.c1194
-rw-r--r--src/video_dec/libvdpau/vdpau_vc1.c1176
17 files changed, 11240 insertions, 0 deletions
diff --git a/src/video_dec/libvdpau/Makefile.am b/src/video_dec/libvdpau/Makefile.am
new file mode 100644
index 000000000..781001a04
--- /dev/null
+++ b/src/video_dec/libvdpau/Makefile.am
@@ -0,0 +1,42 @@
+include $(top_srcdir)/misc/Makefile.quiet
+include $(top_srcdir)/misc/Makefile.common
+
+AM_CFLAGS = $(DEFAULT_OCFLAGS) $(VISIBILITY_FLAG)
+AM_LDFLAGS = $(xineplug_ldflags)
+
+noinst_HEADERS = alterh264_decode.h alterh264_bits_reader.h bits_reader.h dpb.h cpb.h h264_parser.h nal.h
+
+if ENABLE_VDPAU
+vdpau_h264_module = xineplug_decode_vdpau_h264.la
+VDPAU_CFLAGS += -D_ISOC99_SOURCE
+
+vdpau_h264_alter_module = xineplug_decode_vdpau_h264_alter.la
+
+vdpau_mpeg12_module = xineplug_decode_vdpau_mpeg12.la
+
+vdpau_vc1_module = xineplug_decode_vdpau_vc1.la
+
+vdpau_mpeg4_module = xineplug_decode_vdpau_mpeg4.la
+endif
+
+xineplug_LTLIBRARIES = $(vdpau_h264_module) $(vdpau_h264_alter_module) $(vdpau_mpeg12_module) $(vdpau_vc1_module) $(vdpau_mpeg4_module)
+
+xineplug_decode_vdpau_h264_alter_la_SOURCES = alterh264_decode.c
+xineplug_decode_vdpau_h264_alter_la_CFLAGS = $(AM_CFLAGS) -fno-strict-aliasing
+xineplug_decode_vdpau_h264_alter_la_LIBADD = $(XINE_LIB) $(DYNAMIC_LD_LIBS)
+
+xineplug_decode_vdpau_h264_la_SOURCES = nal.c dpb.c cpb.c h264_parser.c vdpau_h264.c
+xineplug_decode_vdpau_h264_la_CFLAGS = $(AM_CFLAGS) $(VDPAU_CFLAGS) -fno-strict-aliasing
+xineplug_decode_vdpau_h264_la_LIBADD = $(XINE_LIB) $(DYNAMIC_LD_LIBS) -lm
+
+xineplug_decode_vdpau_mpeg12_la_SOURCES = vdpau_mpeg12.c
+xineplug_decode_vdpau_mpeg12_la_CFLAGS = $(AM_CFLAGS) -fno-strict-aliasing
+xineplug_decode_vdpau_mpeg12_la_LIBADD = $(XINE_LIB) $(DYNAMIC_LD_LIBS)
+
+xineplug_decode_vdpau_vc1_la_SOURCES = vdpau_vc1.c
+xineplug_decode_vdpau_vc1_la_CFLAGS = $(AM_CFLAGS) -fno-strict-aliasing
+xineplug_decode_vdpau_vc1_la_LIBADD = $(XINE_LIB) $(DYNAMIC_LD_LIBS)
+
+xineplug_decode_vdpau_mpeg4_la_SOURCES = vdpau_mpeg4.c
+xineplug_decode_vdpau_mpeg4_la_CFLAGS = $(AM_CFLAGS) -fno-strict-aliasing
+xineplug_decode_vdpau_mpeg4_la_LIBADD = $(XINE_LIB) $(DYNAMIC_LD_LIBS)
diff --git a/src/video_dec/libvdpau/alterh264_bits_reader.h b/src/video_dec/libvdpau/alterh264_bits_reader.h
new file mode 100644
index 000000000..47a26aca1
--- /dev/null
+++ b/src/video_dec/libvdpau/alterh264_bits_reader.h
@@ -0,0 +1,127 @@
+/* kate: tab-indent on; indent-width 4; mixedindent off; indent-mode cstyle; remove-trailing-space on; */
+#ifndef ALTERH264_BITS_READER_H
+#define ALTERH264_BITS_READER_H
+#include <sys/types.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+
+
+typedef struct {
+ uint8_t *buffer, *start;
+ int offbits, length, oflow;
+} bits_reader_t;
+
+
+
+static void
+bits_reader_set (bits_reader_t * br, uint8_t * buf, int len)
+{
+ br->buffer = br->start = buf;
+ br->offbits = 0;
+ br->length = len;
+ br->oflow = 0;
+}
+
+
+
+static inline uint32_t
+more_rbsp_data (bits_reader_t * br)
+{
+ uint8_t val[8] = { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 };
+ uint8_t *buf = br->start + br->length;
+ int bit;
+
+ while (--buf >= br->buffer)
+ {
+ for (bit = 7; bit > -1; bit--)
+ if (*buf & val[bit])
+ return ((buf - br->buffer) * 8) - br->offbits + bit;
+ }
+ return 0;
+}
+
+
+
+static inline uint8_t
+bits_reader_shift (bits_reader_t * br)
+{
+ br->offbits = 0;
+ if ((br->buffer + 1) > (br->start + br->length - 1))
+ {
+ br->oflow = 1;
+ //printf("!!!!! buffer overflow !!!!!\n");
+ return 0;
+ }
+ ++br->buffer;
+ if ((*(br->buffer) == 3) && ((br->buffer - br->start) > 2)
+ && (*(br->buffer - 2) == 0) && (*(br->buffer - 1) == 0))
+ {
+ if ((br->buffer + 1) > (br->start + br->length - 1))
+ {
+ br->oflow = 1;
+ //printf("!!!!! buffer overflow !!!!!\n");
+ return 0;
+ }
+ ++br->buffer;
+ }
+ return 1;
+}
+
+
+
+static inline uint32_t
+read_bits (bits_reader_t * br, int nbits)
+{
+ uint8_t val[8] = { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 };
+ uint32_t res = 0;
+
+ while (nbits)
+ {
+ res = (res << 1) + ((*br->buffer & val[br->offbits]) ? 1 : 0);
+ --nbits;
+ ++br->offbits;
+ if (br->offbits > 7)
+ if (!bits_reader_shift (br))
+ return 1;
+ }
+ return res;
+}
+
+
+
+static inline void
+skip_bits (bits_reader_t * br, int nbits)
+{
+ while (nbits)
+ {
+ --nbits;
+ ++br->offbits;
+ if (br->offbits > 7)
+ bits_reader_shift (br);
+ }
+}
+
+
+
+static inline uint32_t
+read_exp_ue (bits_reader_t * br)
+{
+ int leading = -1;
+ uint8_t b;
+
+ for (b = 0; !b; leading++)
+ b = read_bits (br, 1);
+
+ return (1 << leading) - 1 + read_bits (br, leading);
+}
+
+
+
+static inline int32_t
+read_exp_se (bits_reader_t * br)
+{
+ uint32_t res = read_exp_ue (br);
+ return (res & 0x01) ? (res + 1) / 2 : -(res / 2);
+}
+#endif /* ALTERH264_BITS_READER_H */
diff --git a/src/video_dec/libvdpau/alterh264_decode.c b/src/video_dec/libvdpau/alterh264_decode.c
new file mode 100644
index 000000000..f11162f3e
--- /dev/null
+++ b/src/video_dec/libvdpau/alterh264_decode.c
@@ -0,0 +1,2448 @@
+/* kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; remove-trailing-space on;
+ * Copyright (C) 2008 the xine project
+ * Copyright (C) 2008 Christophe Thommeret <hftom@free.fr>
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * alterh264_decode.c, a H264 video stream parser using VDPAU hardware decoder
+ *
+ */
+
+#include "alterh264_decode.h"
+
+
+#define MAX_DPB_SIZE 16
+#define MIN_BUFFER_SIZE 10000
+#define MAX_BUFFER_SIZE 3145728
+
+#define NAL_UNSPECIFIED 0
+#define NAL_SLICE_NO_IDR 1
+#define NAL_SLICE_IDR 5
+#define NAL_SEI 6
+#define NAL_SEQUENCE 7
+#define NAL_PICTURE 8
+#define NAL_ACCES 9
+#define NAL_END_SEQUENCE 10
+#define NAL_END_STREAM 11
+#define NAL_SEQUENCE_EXT 13
+
+#define SLICE_TYPE_P 0
+#define SLICE_TYPE_B 1
+#define SLICE_TYPE_I 2
+#define SLICE_TYPE_SP 3
+#define SLICE_TYPE_SI 4
+
+#define START_IDR_FLAG 1000
+
+#define MAX_POC 2147483647
+
+#define DPB_DRAW_CLEAR 1
+#define DPB_DRAW_REFS 2
+#define DPB_DRAW_CURRENT 3
+
+//#define MAKE_DAT /*do NOT define this, unless you know what you do */
+#ifdef MAKE_DAT
+static int nframes;
+static FILE *outfile;
+#endif
+
+
+/*-------- DPB -------------------------------------------*/
+static void
+dpb_print (sequence_t * sequence)
+{
+ int i;
+ dpb_frame_t *frame;
+ uint32_t sf;
+
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ frame = sequence->dpb[i];
+ if (!frame->used)
+ break;
+ vo_frame_t *vo = (vo_frame_t *) frame->videoSurface;
+ vdpau_accel_t *accel;
+ if (vo)
+ accel = (vdpau_accel_t *) vo->accel_data;
+ sf = (vo) ? accel->surface : -1;
+ fprintf (stderr,
+ "{ i:%d u:%d c:%d pn:%d-%d ir:%d-%d tpoc:%d bpoc:%d sf:%u }\n",
+ i, frame->used, frame->completed, frame->PicNum[0],
+ frame->PicNum[1], frame->is_reference[0], frame->is_reference[1],
+ frame->TopFieldOrderCnt, frame->BottomFieldOrderCnt, sf);
+ }
+}
+
+
+
+static void
+dpb_clear_all_pts (sequence_t * sequence)
+{
+ int i;
+
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ if (!sequence->dpb[i]->used)
+ break;
+ sequence->dpb[i]->pts = 0;
+ }
+ sequence->cur_pic.pts = 0;
+ sequence->cur_pic.drop_pts = 1;
+}
+
+
+static void
+dpb_reset (sequence_t * sequence)
+{
+ int i;
+
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ if (sequence->dpb[i]->videoSurface)
+ sequence->dpb[i]->videoSurface->free (sequence->dpb[i]->videoSurface);
+ memset (sequence->dpb[i], 0, sizeof (dpb_frame_t));
+ }
+ if (sequence->cur_pic.videoSurface && !sequence->cur_pic.is_reference[0]
+ && !sequence->cur_pic.is_reference[1])
+ {
+ //fprintf(stderr, "freeing cur_pic\n");
+ sequence->cur_pic.videoSurface->free (sequence->cur_pic.videoSurface);
+ }
+}
+
+
+
+static void
+dpb_remove (sequence_t * sequence, int index)
+{
+ lprintf ("|||||||||||||||||||||||||||||||||||||||| dbp_remove\n");
+ int i;
+
+ dpb_frame_t *frame = sequence->dpb[index];
+ if (frame->videoSurface)
+ frame->videoSurface->free (frame->videoSurface);
+ memset (frame, 0, sizeof (dpb_frame_t));
+ for (i = index; i < (MAX_DPB_SIZE - 1); i++)
+ {
+ sequence->dpb[i] = sequence->dpb[i + 1];
+ if (!sequence->dpb[i]->used)
+ {
+ sequence->dpb[i + 1] = frame;
+ break;
+ }
+ }
+ if (i == (MAX_DPB_SIZE - 1))
+ sequence->dpb[i] = frame;
+}
+
+
+
+static dpb_frame_t *
+dpb_get_prev_ref (sequence_t * sequence)
+{
+ int i = MAX_DPB_SIZE - 1;
+
+ while (i > -1)
+ {
+ if (sequence->dpb[i]->used)
+ return sequence->dpb[i];
+ --i;
+ }
+
+ return NULL;
+}
+
+
+
+static void
+dpb_draw_frames (vdpau_h264_alter_decoder_t * this_gen, int32_t curpoc,
+ int draw_mode)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ int i, index = 0;
+ int32_t poc, tpoc;
+ dpb_frame_t *frame;
+
+ while (index > -1)
+ {
+ index = -1;
+ poc = curpoc;
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ frame = seq->dpb[i];
+ if (!frame->used)
+ break;
+ tpoc =
+ (frame->TopFieldOrderCnt >
+ frame->BottomFieldOrderCnt) ? frame->TopFieldOrderCnt : frame->
+ BottomFieldOrderCnt;
+ if (!frame->videoSurface->drawn && (tpoc <= poc))
+ {
+ poc = tpoc;
+ index = i;
+ }
+ }
+ if ((index > -1) && (poc <= curpoc))
+ {
+ //fprintf(stderr,"|||||||||||||||||||||||||||||||||||||||| dpb_draw_frame = %d\n", poc);
+ frame = seq->dpb[index];
+ frame->videoSurface->pts = frame->pts;
+ //fprintf(stderr,"H264 PTS = %llu\n", frame->pts);
+ frame->videoSurface->top_field_first = frame->top_field_first;
+ frame->videoSurface->draw (frame->videoSurface, this_gen->stream);
+ frame->videoSurface->drawn++;
+ if ((draw_mode != DPB_DRAW_CLEAR) && !frame->is_reference[0]
+ && !frame->is_reference[1])
+ dpb_remove (seq, index);
+ }
+ else
+ index = -1;
+ }
+
+ if (draw_mode == DPB_DRAW_CURRENT)
+ {
+ //fprintf(stderr,"|||||||||||||||||||||||||||||||||||||||| dpb_draw_frame = %d\n", curpoc);
+ frame = &seq->cur_pic;
+ frame->videoSurface->pts = frame->pts;
+ //fprintf(stderr,"H264 PTS = %llu\n", frame->pts);
+ frame->videoSurface->top_field_first = frame->top_field_first;
+ frame->videoSurface->draw (frame->videoSurface, this_gen->stream);
+ frame->videoSurface->free (frame->videoSurface);
+ }
+ else if (draw_mode == DPB_DRAW_CLEAR)
+ dpb_reset (seq);
+}
+
+
+
+static dpb_frame_t *
+dpb_get_PicNum (sequence_t * sequence, int32_t pic_num, int *index)
+{
+ dpb_frame_t *frame;
+ int i = 0;
+
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ frame = sequence->dpb[i];
+ if (!frame->used)
+ break;
+ if ((frame->PicNum[0] == pic_num) || (frame->PicNum[1] == pic_num))
+ {
+ *index = i;
+ return frame;
+ }
+ }
+ return 0;
+}
+
+
+
+static void
+dpb_mmc1 (vdpau_h264_alter_decoder_t * this_gen, int32_t picnum)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ int index;
+
+ lprintf ("dpb_mmc1\n");
+
+ dpb_frame_t *frame = dpb_get_PicNum (seq, picnum, &index);
+
+ if (frame)
+ {
+ frame->is_reference[0] = frame->is_reference[1] = 0;
+ if (frame->videoSurface->drawn)
+ dpb_remove (seq, index);
+ else
+ dpb_draw_frames (this_gen,
+ (frame->TopFieldOrderCnt >
+ frame->BottomFieldOrderCnt) ? frame->
+ TopFieldOrderCnt : frame->BottomFieldOrderCnt,
+ DPB_DRAW_REFS);
+ }
+}
+
+
+
+static void
+dbp_append (vdpau_h264_alter_decoder_t * this_gen, int second_field)
+{
+ sequence_t *sequence = (sequence_t *) & this_gen->sequence;
+ int i, index = 0, refs = 0;
+ int32_t fnw = MAX_POC;
+ slice_param_t *sl = &sequence->slice_param;
+ pic_param_t *pic = sequence->pic_param[sl->pic_parameter_set_id];
+ seq_param_t *sp = sequence->seq_param[pic->seq_parameter_set_id];
+ dpb_frame_t *tmp = 0, *cur_pic = &sequence->cur_pic;
+ int max = sp->num_ref_frames ? sp->num_ref_frames : 1;
+ max = (max > MAX_DPB_SIZE) ? MAX_DPB_SIZE : max;
+
+ vo_frame_t *vo = (vo_frame_t *) cur_pic->videoSurface;
+ vdpau_accel_t *accel = (vdpau_accel_t *) vo->accel_data;
+ lprintf
+ ("|||||||||||||||||||||||||||||||||||||||| dbp_append surface = %d\n",
+ accel->surface);
+
+ if (second_field)
+ {
+ tmp = dpb_get_prev_ref (sequence);
+ if (tmp)
+ {
+ memcpy (tmp, cur_pic, sizeof (dpb_frame_t));
+ cur_pic->videoSurface = NULL;
+ }
+ else
+ fprintf (stderr, "OOPS, no frame to store the second field ?!\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ if (!sequence->dpb[i]->used)
+ break;
+ if (sequence->dpb[i]->FrameNumWrap < fnw)
+ {
+ fnw = sequence->dpb[i]->FrameNumWrap;
+ index = i;
+ }
+ refs++;
+ }
+
+ if (refs >= max)
+ {
+ lprintf ("sliding window\n");
+ tmp = sequence->dpb[index],
+ tmp->is_reference[0] = tmp->is_reference[1] = 0;
+ if (tmp->videoSurface->drawn)
+ dpb_remove (sequence, index);
+ else
+ dpb_draw_frames (this_gen,
+ (tmp->TopFieldOrderCnt >
+ tmp->BottomFieldOrderCnt) ? tmp->
+ TopFieldOrderCnt : tmp->BottomFieldOrderCnt,
+ DPB_DRAW_REFS);
+
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ {
+ if (!sequence->dpb[i]->used)
+ break;
+ }
+ }
+
+ if (i < MAX_DPB_SIZE)
+ {
+ memcpy (sequence->dpb[i], cur_pic, sizeof (dpb_frame_t));
+ if (!cur_pic->field_pic_flag)
+ cur_pic->videoSurface = NULL;
+ }
+}
+
+/*--------------------------------------------------------*/
+
+
+
+static void
+reset_slices (sequence_t * sequence)
+{
+ sequence->slices_count = 0;
+ sequence->slice_mode = 0;
+}
+
+
+
+static void
+reset_sequence (sequence_t * sequence)
+{
+ sequence->prevFrameNum = 0;
+ sequence->prevFrameNumOffset = 0;
+ sequence->prevMMC5 = 0;
+
+ sequence->startup_frame = 0;
+ sequence->reset = 0;
+ sequence->chroma = 0;
+ sequence->pic_pts = 0;
+ sequence->bufpos = 0;
+ sequence->bufseek = 0;
+ sequence->start = -1;
+ reset_slices (sequence);
+ dpb_reset (sequence);
+ memset (&sequence->cur_pic, 0, sizeof (dpb_frame_t));
+ sequence->reset = VO_NEW_SEQUENCE_FLAG;
+ sequence->color_standard = VDP_COLOR_STANDARD_ITUR_BT_601;
+}
+
+
+
+static void
+set_ratio (sequence_t * seq, seq_param_t * sp)
+{
+ if (seq->mode_frame && seq->ratio)
+ return;
+ if (!seq->coded_height)
+ seq->coded_height = 1;
+ seq->ratio = (double) seq->coded_width / (double) seq->coded_height;
+ if (sp->vui.aspect_ratio_info)
+ {
+ switch (sp->vui.aspect_ratio_idc)
+ {
+ case ASPECT_1_1:
+ seq->ratio = 1 * seq->ratio;
+ break;
+ case ASPECT_12_11:
+ seq->ratio *= 12.0 / 11.0;
+ break;
+ case ASPECT_10_11:
+ seq->ratio *= 10.0 / 11.0;
+ break;
+ case ASPECT_16_11:
+ seq->ratio *= 16.0 / 11.0;
+ break;
+ case ASPECT_40_33:
+ seq->ratio *= 40.0 / 33.0;
+ break;
+ case ASPECT_24_11:
+ seq->ratio *= 24.0 / 11.0;
+ break;
+ case ASPECT_20_11:
+ seq->ratio *= 20.0 / 11.0;
+ break;
+ case ASPECT_32_11:
+ seq->ratio *= 32.0 / 11.0;
+ break;
+ case ASPECT_80_33:
+ seq->ratio *= 80.0 / 33.0;
+ break;
+ case ASPECT_18_11:
+ seq->ratio *= 18.0 / 11.0;
+ break;
+ case ASPECT_15_11:
+ seq->ratio *= 15.0 / 11.0;
+ break;
+ case ASPECT_64_33:
+ seq->ratio *= 64.0 / 33.0;
+ break;
+ case ASPECT_160_99:
+ seq->ratio *= 160.0 / 99.0;
+ break;
+ case ASPECT_4_3:
+ seq->ratio *= 4.0 / 3.0;
+ break;
+ case ASPECT_3_2:
+ seq->ratio *= 3.0 / 2.0;
+ break;
+ case ASPECT_2_1:
+ seq->ratio *= 2.0 / 1.0;
+ break;
+ case ASPECT_EXTENDED_SAR:
+ if (sp->vui.sar_height)
+ seq->ratio *= (double) sp->vui.sar_width / sp->vui.sar_height;
+ break;
+ }
+ }
+}
+
+
+
+static void
+parse_scaling_list (bits_reader_t * br, uint8_t * scaling_list, int len,
+ int index)
+{
+ int last_scale = 8;
+ int next_scale = 8;
+ int32_t delta_scale;
+ uint8_t use_default_scaling_matrix_flag = 0;
+ int i;
+
+ const uint8_t *zigzag = (len == 64) ? zigzag_8x8 : zigzag_4x4;
+
+ for (i = 0; i < len; i++)
+ {
+ if (next_scale != 0)
+ {
+ delta_scale = read_exp_se (br);
+ next_scale = (last_scale + delta_scale + 256) % 256;
+ if (i == 0 && next_scale == 0)
+ {
+ use_default_scaling_matrix_flag = 1;
+ break;
+ }
+ }
+ scaling_list[zigzag[i]] = last_scale =
+ (next_scale == 0) ? last_scale : next_scale;
+ }
+
+ if (use_default_scaling_matrix_flag)
+ {
+ switch (index)
+ {
+ case 0:
+ case 1:
+ case 2:
+ {
+ for (i = 0; i < sizeof (default_4x4_intra); i++)
+ scaling_list[zigzag_4x4[i]] = default_4x4_intra[i];
+ break;
+ }
+ case 3:
+ case 4:
+ case 5:
+ {
+ for (i = 0; i < sizeof (default_4x4_inter); i++)
+ scaling_list[zigzag_4x4[i]] = default_4x4_inter[i];
+ break;
+ }
+ case 6:
+ {
+ for (i = 0; i < sizeof (default_8x8_intra); i++)
+ scaling_list[zigzag_8x8[i]] = default_8x8_intra[i];
+ break;
+ }
+ case 7:
+ {
+ for (i = 0; i < sizeof (default_8x8_inter); i++)
+ scaling_list[zigzag_8x8[i]] = default_8x8_inter[i];
+ break;
+ }
+ }
+ }
+}
+
+
+
+static void
+scaling_list_fallback_A (uint8_t * scaling_lists_4x4,
+ uint8_t * scaling_lists_8x8, int i)
+{
+ int j;
+ switch (i)
+ {
+ case 0:
+ {
+ for (j = 0; j < sizeof (default_4x4_intra); j++)
+ scaling_lists_4x4[(i * 16) + zigzag_4x4[j]] = default_4x4_intra[j];
+ break;
+ }
+ case 3:
+ {
+ for (j = 0; j < sizeof (default_4x4_inter); j++)
+ scaling_lists_4x4[(i * 16) + zigzag_4x4[j]] = default_4x4_inter[j];
+ break;
+ }
+ case 1:
+ case 2:
+ case 4:
+ case 5:
+ memcpy (&scaling_lists_4x4[i * 16], &scaling_lists_4x4[(i - 1) * 16],
+ 6 * 16);
+ break;
+ case 6:
+ {
+ for (j = 0; j < sizeof (default_8x8_intra); j++)
+ scaling_lists_8x8[(i - 6) * 64 + zigzag_8x8[j]] =
+ default_8x8_intra[j];
+ break;
+ }
+ case 7:
+ {
+ for (j = 0; j < sizeof (default_8x8_inter); j++)
+ scaling_lists_8x8[(i - 6) * 64 + zigzag_8x8[j]] =
+ default_8x8_inter[j];
+ break;
+ }
+
+ }
+}
+
+
+
+static void
+scaling_list_fallback_B (seq_param_t * sp, pic_param_t * pic, int i)
+{
+ switch (i)
+ {
+ case 0:
+ case 3:
+ memcpy (pic->scaling_lists_4x4[i], sp->scaling_lists_4x4[i],
+ sizeof (pic->scaling_lists_4x4[i]));
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 5:
+ memcpy (pic->scaling_lists_4x4[i], pic->scaling_lists_4x4[i - 1],
+ sizeof (pic->scaling_lists_4x4[i]));
+ break;
+ case 6:
+ case 7:
+ memcpy (pic->scaling_lists_8x8[i - 6], sp->scaling_lists_8x8[i - 6],
+ sizeof (pic->scaling_lists_8x8[i - 6]));
+ break;
+ }
+}
+
+
+
+static void
+vui_parameters (sequence_t * seq, vui_param_t * vui)
+{
+ bits_reader_t *br = &seq->br;
+
+ vui->aspect_ratio_info = read_bits (br, 1);
+ lprintf ("aspect_ratio_info_present_flag = %d\n", vui->aspect_ratio_info);
+ if (vui->aspect_ratio_info)
+ {
+ vui->aspect_ratio_idc = read_bits (br, 8);
+ lprintf ("aspect_ratio_idc = %d\n", vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255)
+ {
+ vui->sar_width = read_bits (br, 16);
+ lprintf ("sar_width = %d\n", vui->sar_width);
+ vui->sar_height = read_bits (br, 16);
+ lprintf ("sar_height = %d\n", vui->sar_height);
+ }
+ }
+ if (read_bits (br, 1)) /* overscan_info_present_flag */
+ skip_bits (br, 1); /* overscan_appropriate_falg */
+ if (read_bits (br, 1))
+ { /* video_signal_type_present_flag */
+ skip_bits (br, 3); /*video_format */
+ skip_bits (br, 1); /*video_full_range_flag */
+ vui->colour_desc = read_bits (br, 1);
+ lprintf ("colour_desc = %d\n", vui->colour_desc);
+ if (vui->colour_desc)
+ {
+ vui->colour_primaries = read_bits (br, 8);
+ lprintf ("colour_primaries = %d\n", vui->colour_primaries);
+ skip_bits (br, 8); /* transfer_characteristics */
+ skip_bits (br, 8); /* matrix_coefficients */
+ switch (vui->colour_primaries)
+ {
+ case 1:
+ seq->color_standard = VDP_COLOR_STANDARD_ITUR_BT_709;
+ break;
+ case 6:
+ case 7:
+ seq->color_standard = VDP_COLOR_STANDARD_SMPTE_240M;
+ break;
+ }
+ }
+ }
+ if (read_bits (br, 1))
+ { /* chroma_loc_info_present_flag */
+ read_exp_ue (br); /* chroma_sample_loc_type_top_field */
+ read_exp_ue (br); /* chroma_sample_loc_type_bottom_field */
+ }
+ vui->timing_info = read_bits (br, 1);
+ lprintf ("timing_info = %d\n", vui->timing_info);
+ if (vui->timing_info)
+ {
+ vui->num_units_in_tick = read_bits (br, 32);
+ lprintf ("num_units_in_tick = %u\n", vui->num_units_in_tick);
+ vui->time_scale = read_bits (br, 32);
+ lprintf ("time_scale = %u\n", vui->time_scale);
+ if (vui->time_scale > 0)
+ seq->video_step =
+ 180000. * (double) vui->num_units_in_tick / (double) vui->time_scale;
+ }
+}
+
+
+
+static void
+seq_parameter_set_data (vdpau_h264_alter_decoder_t * this_gen)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ seq_param_t *sp;
+ int i;
+
+ uint8_t profile_idc = read_bits (&seq->br, 8);
+ lprintf ("profile_idc = %d\n", profile_idc);
+ uint8_t constraint_set0_flag = read_bits (&seq->br, 1);
+ lprintf ("constraint_set0_flag = %d\n", constraint_set0_flag);
+ uint8_t constraint_set1_flag = read_bits (&seq->br, 1);
+ lprintf ("constraint_set1_flag = %d\n", constraint_set1_flag);
+ uint8_t constraint_set2_flag = read_bits (&seq->br, 1);
+ lprintf ("constraint_set2_flag = %d\n", constraint_set2_flag);
+ uint8_t constraint_set3_flag = read_bits (&seq->br, 1);
+ lprintf ("constraint_set3_flag = %d\n", constraint_set3_flag);
+ skip_bits (&seq->br, 4);
+ uint8_t level_idc = read_bits (&seq->br, 8);
+ lprintf ("level_idc = %d\n", level_idc);
+
+ uint8_t seq_parameter_set_id = read_exp_ue (&seq->br);
+ lprintf ("seq_parameter_set_id = %d\n", seq_parameter_set_id);
+ if (seq_parameter_set_id > 31)
+ {
+ lprintf ("OOPS : seq_parameter_set_id > 31 !!\n");
+ return;
+ }
+
+ if (!seq->seq_param[seq_parameter_set_id])
+ seq->seq_param[seq_parameter_set_id] =
+ (seq_param_t *) calloc (1, sizeof (seq_param_t));
+ if (!seq->seq_param[seq_parameter_set_id])
+ {
+ lprintf ("OOPS : can't allocate SPS %d !!\n", seq_parameter_set_id);
+ return;
+ }
+
+ sp = seq->seq_param[seq_parameter_set_id];
+ sp->profile_idc = profile_idc;
+ switch (profile_idc)
+ {
+ case 100:
+ seq->profile = VDP_DECODER_PROFILE_H264_HIGH;
+ break;
+ case 77:
+ seq->profile = VDP_DECODER_PROFILE_H264_MAIN;
+ break;
+ case 66: /* nvidia's vdpau doesn't suppot baseline, force main */
+ default:
+ seq->profile = VDP_DECODER_PROFILE_H264_MAIN;
+ }
+ sp->constraint_set0_flag = constraint_set0_flag;
+ sp->constraint_set1_flag = constraint_set1_flag;
+ sp->constraint_set2_flag = constraint_set2_flag;
+ sp->constraint_set3_flag = constraint_set3_flag;
+ sp->level_idc = level_idc;
+
+ memset (&sp->scaling_lists_4x4, 16, sizeof (sp->scaling_lists_4x4));
+ memset (&sp->scaling_lists_8x8, 16, sizeof (sp->scaling_lists_8x8));
+
+ sp->chroma_format_idc = 1;
+ sp->separate_colour_plane_flag = 0;
+ if (sp->profile_idc == 100 || sp->profile_idc == 110
+ || sp->profile_idc == 122 || sp->profile_idc == 244
+ || sp->profile_idc == 44 || sp->profile_idc == 83
+ || sp->profile_idc == 86)
+ {
+ sp->chroma_format_idc = read_exp_ue (&seq->br);
+ lprintf ("chroma_format_idc = %u\n", sp->chroma_format_idc);
+ if (sp->chroma_format_idc == 3)
+ {
+ sp->separate_colour_plane_flag = read_bits (&seq->br, 1);
+ lprintf ("separate_colour_plane_flag = %d\n",
+ sp->separate_colour_plane_flag);
+ }
+ sp->bit_depth_luma_minus8 = read_exp_ue (&seq->br);
+ lprintf ("bit_depth_luma_minus8 = %u\n", sp->bit_depth_luma_minus8);
+ sp->bit_depth_chroma_minus8 = read_exp_ue (&seq->br);
+ lprintf ("bit_depth_chroma_minus8 = %u\n", sp->bit_depth_chroma_minus8);
+ sp->qpprime_y_zero_transform_bypass_flag = read_bits (&seq->br, 1);
+ lprintf ("qpprime_y_zero_transform_bypass_flag = %u\n",
+ sp->qpprime_y_zero_transform_bypass_flag);
+ sp->seq_scaling_matrix_present_flag = read_bits (&seq->br, 1);
+ lprintf ("seq_scaling_matrix_present_flag = %u\n",
+ sp->seq_scaling_matrix_present_flag);
+ if (sp->seq_scaling_matrix_present_flag)
+ {
+ for (i = 0; i < 8; i++)
+ {
+ int scaling_flag = read_bits (&seq->br, 1);
+ if (scaling_flag)
+ {
+ if (i < 6)
+ parse_scaling_list (&seq->br, &sp->scaling_lists_4x4[i][0], 16,
+ i);
+ else
+ parse_scaling_list (&seq->br, &sp->scaling_lists_8x8[i - 6][0],
+ 64, i);
+ }
+ else
+ scaling_list_fallback_A ((uint8_t *) sp->scaling_lists_4x4,
+ (uint8_t *) sp->scaling_lists_8x8, i);
+ }
+ }
+ }
+ sp->log2_max_frame_num_minus4 = read_exp_ue (&seq->br);
+ lprintf ("log2_max_frame_num_minus4 = %u\n", sp->log2_max_frame_num_minus4);
+ sp->pic_order_cnt_type = read_exp_ue (&seq->br);
+ lprintf ("pic_order_cnt_type = %u\n", sp->pic_order_cnt_type);
+ if (sp->pic_order_cnt_type == 0)
+ {
+ sp->log2_max_pic_order_cnt_lsb_minus4 = read_exp_ue (&seq->br);
+ lprintf ("log2_max_pic_order_cnt_lsb_minus4 = %u\n",
+ sp->log2_max_pic_order_cnt_lsb_minus4);
+ }
+ else if (sp->pic_order_cnt_type == 1)
+ {
+ sp->delta_pic_order_always_zero_flag = read_bits (&seq->br, 1);
+ lprintf ("delta_pic_order_always_zero_flag = %u\n",
+ sp->delta_pic_order_always_zero_flag);
+ sp->offset_for_non_ref_pic = read_exp_se (&seq->br);
+ lprintf ("offset_for_non_ref_pic = %d\n", sp->offset_for_non_ref_pic);
+ sp->offset_for_top_to_bottom_field = read_exp_se (&seq->br);
+ lprintf ("offset_for_top_to_bottom_field = %d\n",
+ sp->offset_for_top_to_bottom_field);
+ sp->num_ref_frames_in_pic_order_cnt_cycle = read_exp_ue (&seq->br);
+ lprintf ("num_ref_frames_in_pic_order_cnt_cycle = %u\n",
+ sp->num_ref_frames_in_pic_order_cnt_cycle);
+ for (i = 0; i < sp->num_ref_frames_in_pic_order_cnt_cycle; i++)
+ {
+ sp->offset_for_ref_frame[i] = read_exp_se (&seq->br);
+ lprintf ("offset_for_ref_frame[%d] = %d\n", i,
+ sp->offset_for_ref_frame[i]);
+ }
+ }
+ sp->num_ref_frames = read_exp_ue (&seq->br);
+ if (sp->num_ref_frames > 16)
+ sp->num_ref_frames = 16;
+ lprintf ("num_ref_frames = %u\n", sp->num_ref_frames);
+ sp->gaps_in_frame_num_value_allowed_flag = read_bits (&seq->br, 1);
+ lprintf ("gaps_in_frame_num_value_allowed_flag = %u\n",
+ sp->gaps_in_frame_num_value_allowed_flag);
+ sp->pic_width_in_mbs_minus1 = read_exp_ue (&seq->br);
+ lprintf ("pic_width_in_mbs_minus1 = %u\n", sp->pic_width_in_mbs_minus1);
+ sp->pic_height_in_map_units_minus1 = read_exp_ue (&seq->br);
+ lprintf ("pic_height_in_map_units_minus1 = %u\n",
+ sp->pic_height_in_map_units_minus1);
+ sp->frame_mbs_only_flag = read_bits (&seq->br, 1);
+ lprintf ("frame_mbs_only_flag = %u\n", sp->frame_mbs_only_flag);
+
+ seq->coded_width = (sp->pic_width_in_mbs_minus1 + 1) * 16;
+ seq->coded_height =
+ (2 - sp->frame_mbs_only_flag) * (sp->pic_height_in_map_units_minus1 +
+ 1) * 16;
+
+ if (!sp->frame_mbs_only_flag)
+ {
+ sp->mb_adaptive_frame_field_flag = read_bits (&seq->br, 1);
+ lprintf ("mb_adaptive_frame_field_flag = %u\n",
+ sp->mb_adaptive_frame_field_flag);
+ }
+ sp->direct_8x8_inference_flag = read_bits (&seq->br, 1);
+ lprintf ("direct_8x8_inference_flag = %u\n", sp->direct_8x8_inference_flag);
+ sp->frame_cropping_flag = read_bits (&seq->br, 1);
+ lprintf ("frame_cropping_flag = %u\n", sp->frame_cropping_flag);
+ if (sp->frame_cropping_flag)
+ {
+ sp->frame_crop_left_offset = read_exp_ue (&seq->br);
+ lprintf ("frame_crop_left_offset = %u\n", sp->frame_crop_left_offset);
+ sp->frame_crop_right_offset = read_exp_ue (&seq->br);
+ lprintf ("frame_crop_right_offset = %u\n", sp->frame_crop_right_offset);
+ sp->frame_crop_top_offset = read_exp_ue (&seq->br);
+ lprintf ("frame_crop_top_offset = %u\n", sp->frame_crop_top_offset);
+ sp->frame_crop_bottom_offset = read_exp_ue (&seq->br);
+ lprintf ("frame_crop_bottom_offset = %u\n", sp->frame_crop_bottom_offset);
+ seq->coded_height -=
+ (2 - sp->frame_mbs_only_flag) * 2 * sp->frame_crop_bottom_offset;
+ }
+ if (seq->coded_height == 1088)
+ seq->coded_height = 1080;
+ sp->vui_parameters_present_flag = read_bits (&seq->br, 1);
+ lprintf ("vui_parameters_present_flag = %u\n",
+ sp->vui_parameters_present_flag);
+ if (sp->vui_parameters_present_flag)
+ vui_parameters (seq, &sp->vui);
+ set_ratio (seq, sp);
+}
+
+
+
+static void
+pic_parameter_set (vdpau_h264_alter_decoder_t * this_gen)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ pic_param_t *pic;
+ seq_param_t *sp;
+ int i;
+
+ uint8_t pic_parameter_set_id = read_exp_ue (&seq->br);
+ lprintf ("pic_parameter_set_id = %u\n", pic_parameter_set_id);
+ if (!seq->pic_param[pic_parameter_set_id])
+ seq->pic_param[pic_parameter_set_id] =
+ (pic_param_t *) calloc (1, sizeof (pic_param_t));
+ if (!seq->pic_param[pic_parameter_set_id])
+ {
+ lprintf ("OOPS : can't allocate PPS %d !!\n", pic_parameter_set_id);
+ return;
+ }
+ pic = seq->pic_param[pic_parameter_set_id];
+
+ uint8_t seq_parameter_set_id = read_exp_ue (&seq->br);
+ lprintf ("seq_parameter_set_id = %u\n", seq_parameter_set_id);
+ if (seq_parameter_set_id > 31)
+ {
+ lprintf ("OOPS : referenced SPS (%d) does not exist !!\n",
+ seq_parameter_set_id);
+ return;
+ }
+ if (!seq->seq_param[seq_parameter_set_id])
+ {
+ lprintf ("OOPS : referenced SPS (%d) does not exist !!\n",
+ seq_parameter_set_id);
+ return;
+ }
+
+ pic->seq_parameter_set_id = seq_parameter_set_id;
+ sp = seq->seq_param[pic->seq_parameter_set_id];
+ pic->entropy_coding_mode_flag = read_bits (&seq->br, 1);
+ lprintf ("entropy_coding_mode_flag = %u\n", pic->entropy_coding_mode_flag);
+ pic->pic_order_present_flag = read_bits (&seq->br, 1);
+ lprintf ("pic_order_present_flag = %u\n", pic->pic_order_present_flag);
+ uint8_t num_slice_groups_minus1 = read_exp_ue (&seq->br);
+ lprintf ("num_slice_groups_minus1 = %u\n", num_slice_groups_minus1);
+ if (num_slice_groups_minus1 > 0)
+ {
+ uint8_t slice_group_map_type = read_exp_ue (&seq->br);
+ lprintf ("slice_group_map_type = %u\n", slice_group_map_type);
+ if (!slice_group_map_type)
+ {
+ for (i = 0; i < num_slice_groups_minus1; i++)
+ read_exp_ue (&seq->br);
+ }
+ else if (slice_group_map_type == 2)
+ {
+ for (i = 0; i < num_slice_groups_minus1; i++)
+ {
+ read_exp_ue (&seq->br);
+ read_exp_ue (&seq->br);
+ }
+ }
+ else if (slice_group_map_type == 3 || slice_group_map_type == 4
+ || slice_group_map_type == 5)
+ {
+ read_bits (&seq->br, 1);
+ read_exp_ue (&seq->br);
+ }
+ else if (slice_group_map_type == 6)
+ {
+ read_exp_ue (&seq->br);
+ }
+ }
+ pic->num_ref_idx_l0_active_minus1 = read_exp_ue (&seq->br);
+ lprintf ("num_ref_idx_l0_active_minus1 = %u\n",
+ pic->num_ref_idx_l0_active_minus1);
+ pic->num_ref_idx_l1_active_minus1 = read_exp_ue (&seq->br);
+ lprintf ("num_ref_idx_l1_active_minus1 = %u\n",
+ pic->num_ref_idx_l1_active_minus1);
+ pic->weighted_pred_flag = read_bits (&seq->br, 1);
+ lprintf ("weighted_pred_flag = %u\n", pic->weighted_pred_flag);
+ pic->weighted_bipred_idc = read_bits (&seq->br, 2);
+ lprintf ("weighted_bipred_idc = %u\n", pic->weighted_bipred_idc);
+ pic->pic_init_qp_minus26 = read_exp_se (&seq->br);
+ lprintf ("pic_init_qp_minus26 = %d\n", pic->pic_init_qp_minus26);
+ pic->pic_init_qs_minus26 = read_exp_se (&seq->br);
+ lprintf ("pic_init_qs_minus26 = %d\n", pic->pic_init_qs_minus26);
+ pic->chroma_qp_index_offset = read_exp_se (&seq->br);
+ lprintf ("chroma_qp_index_offset = %d\n", pic->chroma_qp_index_offset);
+ pic->deblocking_filter_control_present_flag = read_bits (&seq->br, 1);
+ lprintf ("deblocking_filter_control_present_flag = %u\n",
+ pic->deblocking_filter_control_present_flag);
+ pic->constrained_intra_pred_flag = read_bits (&seq->br, 1);
+ lprintf ("constrained_intra_pred_flag = %u\n",
+ pic->constrained_intra_pred_flag);
+ pic->redundant_pic_cnt_present_flag = read_bits (&seq->br, 1);
+ lprintf ("redundant_pic_cnt_present_flag = %u\n",
+ pic->redundant_pic_cnt_present_flag);
+
+ uint32_t more = more_rbsp_data (&seq->br);
+ lprintf ("more bits = %u (buflen = %d) (still = %d)\n", more,
+ seq->br.length, seq->br.start + seq->br.length - seq->br.buffer);
+ if (more)
+ {
+ pic->transform_8x8_mode_flag = read_bits (&seq->br, 1);
+ lprintf ("transform_8x8_mode_flag = %u\n", pic->transform_8x8_mode_flag);
+ pic->pic_scaling_matrix_present_flag = read_bits (&seq->br, 1);
+ lprintf ("pic_scaling_matrix_present_flag = %u\n",
+ pic->pic_scaling_matrix_present_flag);
+ if (pic->pic_scaling_matrix_present_flag)
+ {
+ for (i = 0; i < 8; i++)
+ {
+ if (i < 6 || pic->transform_8x8_mode_flag)
+ pic->pic_scaling_list_present_flag[i] = read_bits (&seq->br, 1);
+ else
+ pic->pic_scaling_list_present_flag[i] = 0;
+
+ if (pic->pic_scaling_list_present_flag[i])
+ {
+ if (i < 6)
+ parse_scaling_list (&seq->br, &pic->scaling_lists_4x4[i][0], 16,
+ i);
+ else
+ parse_scaling_list (&seq->br, &pic->scaling_lists_8x8[i - 6][0],
+ 64, i);
+ }
+ else
+ {
+ if (!sp->seq_scaling_matrix_present_flag)
+ scaling_list_fallback_A ((uint8_t *) pic->scaling_lists_4x4,
+ (uint8_t *) pic->scaling_lists_8x8, i);
+ else
+ scaling_list_fallback_B (sp, pic, i);
+ }
+ }
+ }
+ pic->second_chroma_qp_index_offset = read_exp_se (&seq->br);
+ lprintf ("second_chroma_qp_index_offset = %d\n",
+ pic->second_chroma_qp_index_offset);
+ }
+ else
+ {
+ pic->transform_8x8_mode_flag = 0;
+ pic->pic_scaling_matrix_present_flag = 0;
+ pic->second_chroma_qp_index_offset = pic->chroma_qp_index_offset;
+ }
+}
+
+
+
+static void
+pred_weight_table (vdpau_h264_alter_decoder_t * this_gen, uint8_t slice_type,
+ uint8_t ChromaArrayType, uint8_t l0, uint8_t l1)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ int i;
+
+ read_exp_ue (&seq->br);
+ if (ChromaArrayType)
+ read_exp_ue (&seq->br);
+ for (i = 0; i <= l0; i++)
+ {
+ if (read_bits (&seq->br, 1))
+ {
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ }
+ if (ChromaArrayType && read_bits (&seq->br, 1))
+ {
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ }
+ }
+ if (slice_type == SLICE_TYPE_B)
+ {
+ for (i = 0; i <= l1; i++)
+ {
+ if (read_bits (&seq->br, 1))
+ {
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ }
+ if (ChromaArrayType)
+ {
+ if (read_bits (&seq->br, 1))
+ {
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ read_exp_se (&seq->br);
+ }
+ }
+ }
+ }
+}
+
+
+
+static void
+ref_pic_list_reordering (vdpau_h264_alter_decoder_t * this_gen)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ slice_param_t *sl = &seq->slice_param;
+
+ if ((sl->slice_type != SLICE_TYPE_I) && (sl->slice_type != SLICE_TYPE_SI))
+ {
+ if (read_bits (&seq->br, 1))
+ {
+ uint32_t tmp, diff;
+ do
+ {
+ tmp = read_exp_ue (&seq->br);
+ if (tmp == 0 || tmp == 1)
+ diff = read_exp_ue (&seq->br);
+ else if (tmp == 2)
+ diff = read_exp_ue (&seq->br);
+ }
+ while (tmp != 3 && !seq->br.oflow);
+ }
+ }
+ if (sl->slice_type == SLICE_TYPE_B)
+ {
+ if (read_bits (&seq->br, 1))
+ {
+ uint32_t tmp2, diff2;
+ do
+ {
+ tmp2 = read_exp_ue (&seq->br);
+ if (tmp2 == 0 || tmp2 == 1)
+ diff2 = read_exp_ue (&seq->br);
+ else if (tmp2 == 2)
+ diff2 = read_exp_ue (&seq->br);
+ }
+ while (tmp2 != 3 && !seq->br.oflow);
+ }
+ }
+}
+
+
+
+static void
+dec_ref_pic_marking (vdpau_h264_alter_decoder_t * this_gen, uint8_t idr)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ int32_t pic_num;
+
+ if (idr)
+ {
+ uint8_t no_output_of_prior_pics_flag = read_bits (&seq->br, 1);
+ lprintf ("no_output_of_prior_pics_flag = %u\n",
+ no_output_of_prior_pics_flag);
+ uint8_t long_term_reference_flag = read_bits (&seq->br, 1);
+ lprintf ("long_term_reference_flag = %u\n", long_term_reference_flag);
+ }
+ else
+ {
+ uint8_t adaptive_ref_pic_marking_mode_flag = read_bits (&seq->br, 1);
+ lprintf ("adaptive_ref_pic_marking_mode_flag = %u\n",
+ adaptive_ref_pic_marking_mode_flag);
+ if (!adaptive_ref_pic_marking_mode_flag)
+ {
+ if (seq->cur_pic.field_pic_flag
+ && (seq->cur_pic.completed == PICTURE_DONE)
+ && (seq->cur_pic.is_reference[0] || seq->cur_pic.is_reference[1]))
+ {
+ seq->cur_pic.is_reference[0] = seq->cur_pic.is_reference[1] =
+ SHORT_TERM_REF;
+ lprintf ("short_ref marking\n");
+ }
+ // sliding window is always performed in dpb_append()
+ }
+ else
+ {
+ uint8_t memory_management_control_operation;
+ do
+ {
+ memory_management_control_operation = read_exp_ue (&seq->br);
+ lprintf ("memory_management_control_operation = %u\n",
+ memory_management_control_operation);
+ if (memory_management_control_operation == 1
+ || memory_management_control_operation == 3)
+ {
+ uint32_t difference_of_pic_nums_minus1 = read_exp_ue (&seq->br);
+ lprintf ("difference_of_pic_nums_minus1 = %u\n",
+ difference_of_pic_nums_minus1);
+ pic_num =
+ seq->cur_pic.PicNum[0] - (difference_of_pic_nums_minus1 + 1);
+ dpb_mmc1 (this_gen, pic_num);
+ }
+ if (memory_management_control_operation == 2)
+ {
+ uint32_t long_term_pic_num = read_exp_ue (&seq->br);
+ lprintf ("long_term_pic_num = %u\n", long_term_pic_num);
+ }
+ if (memory_management_control_operation == 3
+ || memory_management_control_operation == 6)
+ {
+ uint32_t long_term_frame_idx = read_exp_ue (&seq->br);
+ lprintf ("long_term_frame_idx = %u\n", long_term_frame_idx);
+ }
+ if (memory_management_control_operation == 4)
+ {
+ uint32_t max_long_term_frame_idx_plus1 = read_exp_ue (&seq->br);
+ lprintf ("max_long_term_frame_idx_plus1 = %u\n",
+ max_long_term_frame_idx_plus1);
+ }
+ }
+ while (memory_management_control_operation && !seq->br.oflow);
+ }
+ }
+}
+
+
+
+static void
+slice_header (vdpau_h264_alter_decoder_t * this_gen, uint8_t nal_ref_idc,
+ uint8_t nal_unit_type)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ slice_param_t *sl = &seq->slice_param;
+ pic_param_t *pic;
+ seq_param_t *sp;
+
+ sl->nal_ref_idc = nal_ref_idc;
+ sl->nal_unit_type = nal_unit_type;
+
+ read_exp_ue (&seq->br); /* first_mb_in_slice */
+ sl->slice_type = read_exp_ue (&seq->br) % 5;
+ lprintf ("slice_type = %u\n", sl->slice_type);
+ sl->pic_parameter_set_id = read_exp_ue (&seq->br);
+ lprintf ("pic_parameter_set_id = %u\n", sl->pic_parameter_set_id);
+ if (!seq->pic_param[sl->pic_parameter_set_id])
+ {
+ lprintf ("OOPS : referenced PPS (%d) does not exist !!\n",
+ sl->pic_parameter_set_id);
+ seq->cur_pic.missing_header = 1;
+ return;
+ }
+ pic = seq->pic_param[sl->pic_parameter_set_id];
+ if (!seq->seq_param[pic->seq_parameter_set_id])
+ {
+ lprintf ("OOPS : referenced SPS (%d) does not exist !!\n",
+ pic->seq_parameter_set_id);
+ seq->cur_pic.missing_header = 1;
+ return;
+ }
+
+ if (!seq->startup_frame && (sl->slice_type == SLICE_TYPE_I)
+ && !seq->cur_pic.completed)
+ seq->startup_frame = 1;
+
+ sp = seq->seq_param[pic->seq_parameter_set_id];
+ if (sp->separate_colour_plane_flag)
+ read_bits (&seq->br, 2); /* colour_plane_id */
+ sl->frame_num = read_bits (&seq->br, sp->log2_max_frame_num_minus4 + 4);
+ lprintf ("frame_num = %u\n", sl->frame_num);
+ sl->MaxFrameNum = 1 << (sp->log2_max_frame_num_minus4 + 4);
+
+ sl->field_pic_flag = sl->bottom_field_flag =
+ sl->delta_pic_order_cnt_bottom = 0;
+ sl->delta_pic_order_cnt[0] = sl->delta_pic_order_cnt[1] = 0;
+
+ if (!sp->frame_mbs_only_flag)
+ {
+ sl->field_pic_flag = read_bits (&seq->br, 1);
+ lprintf ("field_pic_flag = %u\n", sl->field_pic_flag);
+ if (sl->field_pic_flag)
+ {
+ sl->bottom_field_flag = read_bits (&seq->br, 1);
+ lprintf ("bottom_field_flag = %u\n", sl->bottom_field_flag);
+ }
+ }
+ if (nal_unit_type == NAL_SLICE_IDR)
+ {
+ sl->idr_pic_id = read_exp_ue (&seq->br);
+ lprintf ("idr_pic_id = %u\n", sl->idr_pic_id);
+ }
+ if (sp->pic_order_cnt_type == 0)
+ {
+ sl->pic_order_cnt_lsb =
+ read_bits (&seq->br, sp->log2_max_pic_order_cnt_lsb_minus4 + 4);
+ lprintf ("pic_order_cnt_lsb = %u\n", sl->pic_order_cnt_lsb);
+ if (pic->pic_order_present_flag && !sl->field_pic_flag)
+ {
+ sl->delta_pic_order_cnt_bottom = read_exp_se (&seq->br);
+ lprintf ("delta_pic_order_cnt_bottom = %d\n",
+ sl->delta_pic_order_cnt_bottom);
+ }
+ }
+ if (sp->pic_order_cnt_type == 1 && !sp->delta_pic_order_always_zero_flag)
+ {
+ sl->delta_pic_order_cnt[0] = read_exp_se (&seq->br);
+ lprintf ("delta_pic_order_cnt[0] = %d\n", sl->delta_pic_order_cnt[0]);
+ if (pic->pic_order_present_flag && !sl->field_pic_flag)
+ {
+ sl->delta_pic_order_cnt[1] = read_exp_se (&seq->br);
+ lprintf ("delta_pic_order_cnt[1] = %d\n", sl->delta_pic_order_cnt[1]);
+ }
+ }
+ if (pic->redundant_pic_cnt_present_flag)
+ {
+ sl->redundant_pic_cnt = read_exp_ue (&seq->br);
+ lprintf ("redundant_pic_cnt = %u\n", sl->redundant_pic_cnt);
+ }
+ if (sl->slice_type == SLICE_TYPE_B)
+ skip_bits (&seq->br, 1); /* direct_spatial_mv_pred_flag */
+
+ sl->num_ref_idx_l0_active_minus1 = pic->num_ref_idx_l0_active_minus1;
+ sl->num_ref_idx_l1_active_minus1 = pic->num_ref_idx_l1_active_minus1;
+
+ if (sl->slice_type == SLICE_TYPE_P || sl->slice_type == SLICE_TYPE_SP
+ || sl->slice_type == SLICE_TYPE_B)
+ {
+ if (read_bits (&seq->br, 1))
+ {
+ lprintf ("num_ref_idx_active_override_flag = 1\n");
+ sl->num_ref_idx_l0_active_minus1 = read_exp_ue (&seq->br);
+ if (sl->slice_type == SLICE_TYPE_B)
+ sl->num_ref_idx_l1_active_minus1 = read_exp_ue (&seq->br);
+ lprintf ("num_ref_idx_l0_active_minus1 = %u\n",
+ sl->num_ref_idx_l0_active_minus1);
+ lprintf ("num_ref_idx_l1_active_minus1 = %u\n",
+ sl->num_ref_idx_l1_active_minus1);
+ }
+ }
+}
+
+
+
+static void
+slice_header_post (vdpau_h264_alter_decoder_t * this_gen)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ slice_param_t *sl = &seq->slice_param;
+
+ if (!sl->nal_ref_idc)
+ return;
+
+ pic_param_t *pic = seq->pic_param[sl->pic_parameter_set_id];
+ seq_param_t *sp = seq->seq_param[pic->seq_parameter_set_id];
+
+ if ((pic->weighted_pred_flag
+ && ((sl->slice_type == SLICE_TYPE_P)
+ || (sl->slice_type == SLICE_TYPE_SP)))
+ || ((pic->weighted_bipred_idc == 1)
+ && (sl->slice_type == SLICE_TYPE_B)))
+ {
+ uint8_t chroma =
+ (sp->separate_colour_plane_flag) ? 0 : sp->chroma_format_idc;
+ pred_weight_table (this_gen, sl->slice_type, chroma,
+ sl->num_ref_idx_l0_active_minus1,
+ sl->num_ref_idx_l1_active_minus1);
+ }
+
+ dec_ref_pic_marking (this_gen, (sl->nal_unit_type == 5) ? 1 : 0);
+}
+
+
+
+static void
+decode_poc (vdpau_h264_alter_decoder_t * this_gen)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ slice_param_t *sl = &seq->slice_param;
+ pic_param_t *pic = seq->pic_param[sl->pic_parameter_set_id];
+ seq_param_t *sp = seq->seq_param[pic->seq_parameter_set_id];
+ int parity = sl->bottom_field_flag ? 1 : 0;
+
+ seq->cur_pic.used = 1;
+ seq->cur_pic.FrameNum = sl->frame_num;
+ seq->cur_pic.is_reference[parity] = sl->nal_ref_idc;
+ seq->cur_pic.field_pic_flag = sl->field_pic_flag;
+
+ if (sl->field_pic_flag)
+ {
+ if (!seq->cur_pic.completed)
+ seq->cur_pic.top_field_first = !parity;
+ seq->cur_pic.completed |=
+ (parity ? PICTURE_BOTTOM_DONE : PICTURE_TOP_DONE);
+ }
+ else
+ {
+ seq->cur_pic.is_reference[!parity] = seq->cur_pic.is_reference[parity];
+ seq->cur_pic.completed = PICTURE_DONE;
+ }
+
+ if (sp->pic_order_cnt_type == 0)
+ {
+ dpb_frame_t *prev_pic = dpb_get_prev_ref (seq);
+ int32_t prevPicOrderCntMsb, prevPicOrderCntLsb;
+ uint32_t MaxPicOrderCntLsb =
+ 1 << (sp->log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ seq->cur_pic.pic_order_cnt_lsb = sl->pic_order_cnt_lsb;
+ seq->cur_pic.top_field_first =
+ (sl->delta_pic_order_cnt_bottom < 0) ? 0 : 1;
+
+ if (!prev_pic)
+ {
+ seq->cur_pic.PicOrderCntMsb = seq->cur_pic.TopFieldOrderCnt =
+ seq->cur_pic.BottomFieldOrderCnt = 0;
+ return;
+ }
+ if (sl->nal_unit_type == NAL_SLICE_IDR)
+ prevPicOrderCntMsb = prevPicOrderCntLsb = 0;
+ else if (prev_pic->mmc5)
+ {
+ if (!sl->bottom_field_flag)
+ {
+ prevPicOrderCntMsb = 0;
+ prevPicOrderCntLsb = prev_pic->TopFieldOrderCnt;
+ }
+ else
+ prevPicOrderCntMsb = prevPicOrderCntLsb = 0;
+ }
+ else
+ {
+ prevPicOrderCntMsb = prev_pic->PicOrderCntMsb;
+ prevPicOrderCntLsb = prev_pic->pic_order_cnt_lsb;
+ }
+
+ if ((sl->pic_order_cnt_lsb < prevPicOrderCntLsb)
+ && ((prevPicOrderCntLsb - sl->pic_order_cnt_lsb) >=
+ (MaxPicOrderCntLsb / 2)))
+ seq->cur_pic.PicOrderCntMsb = prevPicOrderCntMsb + MaxPicOrderCntLsb;
+ else if ((sl->pic_order_cnt_lsb > prevPicOrderCntLsb)
+ && ((sl->pic_order_cnt_lsb - prevPicOrderCntLsb) >
+ (MaxPicOrderCntLsb / 2)))
+ seq->cur_pic.PicOrderCntMsb = prevPicOrderCntMsb - MaxPicOrderCntLsb;
+ else
+ seq->cur_pic.PicOrderCntMsb = prevPicOrderCntMsb;
+
+ if (!sl->field_pic_flag)
+ {
+ seq->cur_pic.TopFieldOrderCnt =
+ seq->cur_pic.PicOrderCntMsb + sl->pic_order_cnt_lsb;
+ seq->cur_pic.BottomFieldOrderCnt =
+ seq->cur_pic.TopFieldOrderCnt + sl->delta_pic_order_cnt_bottom;
+ }
+ else
+ {
+ if (sl->bottom_field_flag)
+ seq->cur_pic.BottomFieldOrderCnt =
+ seq->cur_pic.PicOrderCntMsb + sl->pic_order_cnt_lsb;
+ else
+ seq->cur_pic.TopFieldOrderCnt =
+ seq->cur_pic.PicOrderCntMsb + sl->pic_order_cnt_lsb;
+ }
+ }
+ else
+ {
+ int16_t FrameNumOffset, prevFrameNumOffset;
+ uint16_t MaxFrameNum = 1 << (sp->log2_max_frame_num_minus4 + 4);
+
+ if (sl->nal_unit_type == NAL_SLICE_IDR)
+ {
+ FrameNumOffset = 0;
+ }
+ else
+ {
+ if (seq->prevMMC5)
+ prevFrameNumOffset = 0;
+ else
+ prevFrameNumOffset = seq->prevFrameNumOffset;
+
+ if (seq->prevFrameNum > sl->frame_num)
+ FrameNumOffset = prevFrameNumOffset + MaxFrameNum;
+ else
+ FrameNumOffset = prevFrameNumOffset;
+ }
+
+ if (sp->pic_order_cnt_type == 1)
+ {
+ int16_t absFrameNum = 0, picOrderCntCycleCnt =
+ 0, frameNumInPicOrderCntCycle = 0, expectedDeltaPerPicOrderCntCycle =
+ 0, expectedPicOrderCnt = 0;
+ int i;
+ if (sp->num_ref_frames_in_pic_order_cnt_cycle)
+ absFrameNum = FrameNumOffset + sl->frame_num;
+ if (!sl->nal_ref_idc && (absFrameNum > 0))
+ --absFrameNum;
+
+ for (i = 0; i < sp->num_ref_frames_in_pic_order_cnt_cycle; i++)
+ expectedDeltaPerPicOrderCntCycle += sp->offset_for_ref_frame[i];
+
+ if (absFrameNum > 0)
+ {
+ picOrderCntCycleCnt =
+ (absFrameNum - 1) / sp->num_ref_frames_in_pic_order_cnt_cycle;
+ frameNumInPicOrderCntCycle =
+ (absFrameNum - 1) % sp->num_ref_frames_in_pic_order_cnt_cycle;
+ expectedPicOrderCnt =
+ picOrderCntCycleCnt * expectedDeltaPerPicOrderCntCycle;
+ for (i = 0; i < frameNumInPicOrderCntCycle; i++)
+ expectedPicOrderCnt += sp->offset_for_ref_frame[i];
+ }
+ if (!sl->nal_ref_idc)
+ expectedPicOrderCnt += sp->offset_for_non_ref_pic;
+
+ if (!sl->field_pic_flag)
+ {
+ seq->cur_pic.TopFieldOrderCnt =
+ expectedPicOrderCnt + sl->delta_pic_order_cnt[0];
+ seq->cur_pic.BottomFieldOrderCnt =
+ seq->cur_pic.TopFieldOrderCnt + sp->offset_for_top_to_bottom_field +
+ sl->delta_pic_order_cnt[1];
+ }
+ else if (!sl->bottom_field_flag)
+ seq->cur_pic.TopFieldOrderCnt =
+ expectedPicOrderCnt + sl->delta_pic_order_cnt[0];
+ else
+ seq->cur_pic.BottomFieldOrderCnt =
+ expectedPicOrderCnt + sp->offset_for_top_to_bottom_field +
+ sl->delta_pic_order_cnt[1];
+ }
+ else
+ {
+ int32_t tmpPicOrderCnt;
+ if (sl->nal_unit_type == NAL_SLICE_IDR)
+ tmpPicOrderCnt = 0;
+ else if (!sl->nal_ref_idc)
+ tmpPicOrderCnt = 2 * (FrameNumOffset + sl->frame_num) - 1;
+ else
+ tmpPicOrderCnt = 2 * (FrameNumOffset + sl->frame_num);
+
+ if (!sl->field_pic_flag)
+ seq->cur_pic.TopFieldOrderCnt = seq->cur_pic.BottomFieldOrderCnt =
+ tmpPicOrderCnt;
+ else if (sl->bottom_field_flag)
+ seq->cur_pic.BottomFieldOrderCnt = tmpPicOrderCnt;
+ else
+ seq->cur_pic.TopFieldOrderCnt = tmpPicOrderCnt;
+ }
+ seq->prevFrameNum = seq->cur_pic.FrameNum;
+ seq->prevFrameNumOffset = FrameNumOffset;
+ }
+
+ if (seq->cur_pic.completed < PICTURE_DONE)
+ {
+ if (sl->bottom_field_flag)
+ seq->cur_pic.TopFieldOrderCnt = seq->cur_pic.BottomFieldOrderCnt;
+ else
+ seq->cur_pic.BottomFieldOrderCnt = seq->cur_pic.TopFieldOrderCnt;
+ }
+}
+
+
+
+static void
+decode_picnum (vdpau_h264_alter_decoder_t * this_gen)
+{
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ slice_param_t *sl = &seq->slice_param;
+ dpb_frame_t *frame;
+ int i = 0;
+
+ int parity = sl->bottom_field_flag ? 1 : 0;
+
+ if (!seq->cur_pic.field_pic_flag)
+ seq->cur_pic.PicNum[0] = seq->cur_pic.FrameNum;
+ else
+ seq->cur_pic.PicNum[parity] = 2 * seq->cur_pic.FrameNum + 1;
+
+ while (i < MAX_DPB_SIZE)
+ {
+ frame = seq->dpb[i];
+ if (!frame->used)
+ break;
+ if (frame->FrameNum > seq->cur_pic.FrameNum)
+ frame->FrameNumWrap = frame->FrameNum - sl->MaxFrameNum;
+ else
+ frame->FrameNumWrap = frame->FrameNum;
+
+ if (!sl->field_pic_flag)
+ {
+ frame->PicNum[0] = frame->PicNum[1] = frame->FrameNumWrap;
+ }
+ else
+ {
+ frame->PicNum[0] = 2 * frame->FrameNumWrap + (parity ? 0 : 1);
+ frame->PicNum[1] = 2 * frame->FrameNumWrap + (parity ? 1 : 0);
+ }
+ ++i;
+ }
+}
+
+
+
+static int
+check_ref_list (vdpau_h264_alter_decoder_t * this_gen)
+{
+ int i, j, bad_frame = 0;
+ dpb_frame_t *frame;
+ sequence_t *seq = (sequence_t *) & this_gen->sequence;
+ slice_param_t *sl = &seq->slice_param;
+ pic_param_t *pic = seq->pic_param[sl->pic_parameter_set_id];
+ seq_param_t *sp = seq->seq_param[pic->seq_parameter_set_id];
+ int prefs = 0;
+ int brefs = 0;
+ int poc, curpoc;
+
+ //int fps = (double)sp->vui.time_scale / (double)sp->vui.num_units_in_tick / ( 2 - sl->field_pic_flag );
+ int fps = (1 + sl->field_pic_flag) * 2 * sp->num_ref_frames;
+
+ if (seq->startup_frame >= fps)
+ return 0;
+
+ curpoc =
+ (seq->cur_pic.TopFieldOrderCnt >
+ seq->cur_pic.BottomFieldOrderCnt) ? seq->cur_pic.TopFieldOrderCnt : seq->
+ cur_pic.BottomFieldOrderCnt;
+
+ for (i = 15; i > -1; i--)
+ {
+ frame = seq->dpb[i];
+ if (!frame->used)
+ continue;
+ poc =
+ (frame->TopFieldOrderCnt >
+ frame->BottomFieldOrderCnt) ? frame->TopFieldOrderCnt : frame->
+ BottomFieldOrderCnt;
+ if (seq->cur_pic.field_pic_flag)
+ {
+ if (!frame->videoSurface->bad_frame)
+ {
+ for (j = 0; j < 2; j++)
+ {
+ if (frame->is_reference[j])
+ {
+ if (poc <= curpoc)
+ ++prefs;
+ else
+ ++brefs;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (!frame->videoSurface->bad_frame)
+ {
+ if (poc <= curpoc)
+ ++prefs;
+ else
+ ++brefs;
+ }
+ }
+ }
+
+ if (sl->slice_type != SLICE_TYPE_I)
+ {
+ if (prefs < (sl->num_ref_idx_l0_active_minus1 + 1))
+ bad_frame = 1;
+ if (sl->slice_type == SLICE_TYPE_B)
+ {
+ if (brefs < (sl->num_ref_idx_l1_active_minus1 + 1))
+ bad_frame = 1;
+ }
+ }
+
+ if (bad_frame)
+ fprintf (stderr,
+ "******** Missing refframes, dropping. nrf=%d lo=%d prefs=%d l1=%d brefs=%d type=%d (%d fps)\n",
+ sp->num_ref_frames, sl->num_ref_idx_l0_active_minus1 + 1, prefs,
+ sl->num_ref_idx_l1_active_minus1 + 1, brefs, sl->slice_type,
+ fps);
+ //else
+ //fprintf(stderr,"******** GOOD ! nrf=%d lo=%d prefs=%d l1=%d brefs=%d type=%d (%d fps)\n", sp->num_ref_frames, sl->num_ref_idx_l0_active_minus1 + 1, prefs, sl->num_ref_idx_l1_active_minus1 + 1, brefs, sl->slice_type, fps );
+
+ if (seq->cur_pic.is_reference[0] || seq->cur_pic.is_reference[1])
+ ++seq->startup_frame;
+
+ return bad_frame;
+}
+
+
+
+static void
+decode_render (vdpau_h264_alter_decoder_t * vd, int bad_frame)
+{
+ int i, j;
+ VdpPictureInfoH264 info;
+ seq_param_t *sp;
+ pic_param_t *pic;
+ slice_param_t *sl;
+ sequence_t *seq = (sequence_t *) & vd->sequence;
+ vo_frame_t *img;
+
+ if (!seq->cur_pic.field_pic_flag || (seq->cur_pic.completed < PICTURE_DONE))
+ {
+ img =
+ vd->stream->video_out->get_frame (vd->stream->video_out,
+ seq->coded_width, seq->coded_height,
+ seq->ratio, XINE_IMGFMT_VDPAU,
+ VO_BOTH_FIELDS | seq->chroma | seq->
+ reset);
+ seq->reset = 0;
+ img->drawn = 0;
+ }
+ else
+ img = seq->cur_pic.videoSurface;
+
+ if (!img)
+ { /* should not happen */
+ fprintf (stderr,
+ "vdpau_h264_alter : !!!!!!!!!!!!!!!!!!!!!! No vo_frame_t !!!!!!!!!!!!!!!!!!!!!!!\n");
+ return;
+ }
+
+ vdpau_accel_t *accel = (vdpau_accel_t *) img->accel_data;
+ if (!seq->accel_vdpau)
+ seq->accel_vdpau = accel;
+
+ if (seq->vdp_runtime_nr != *(seq->accel_vdpau->current_vdp_runtime_nr))
+ vd->decoder = VDP_INVALID_HANDLE;
+
+ sl = &vd->sequence.slice_param;
+ pic = vd->sequence.pic_param[sl->pic_parameter_set_id];
+ sp = vd->sequence.seq_param[pic->seq_parameter_set_id];
+
+ VdpStatus st;
+ if (vd->decoder == VDP_INVALID_HANDLE || vd->decoder_profile != seq->profile
+ || vd->decoder_width != seq->coded_width
+ || vd->decoder_height != seq->coded_height)
+ {
+ if (vd->decoder != VDP_INVALID_HANDLE)
+ {
+ accel->vdp_decoder_destroy (vd->decoder);
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+ st =
+ accel->vdp_decoder_create (accel->vdp_device, seq->profile,
+ seq->coded_width, seq->coded_height,
+ sp->num_ref_frames, &vd->decoder);
+ if (st != VDP_STATUS_OK)
+ fprintf (stderr, "vdpau_h264_alter : failed to create decoder !! %s\n",
+ accel->vdp_get_error_string (st));
+ else
+ {
+ vd->decoder_profile = seq->profile;
+ vd->decoder_width = seq->coded_width;
+ vd->decoder_height = seq->coded_height;
+ seq->vdp_runtime_nr = seq->accel_vdpau->vdp_runtime_nr;
+ }
+ }
+
+ info.slice_count = seq->slices_count;
+ info.field_order_cnt[0] = seq->cur_pic.TopFieldOrderCnt;
+ info.field_order_cnt[1] = seq->cur_pic.BottomFieldOrderCnt;
+ info.is_reference = sl->nal_ref_idc ? VDP_TRUE : VDP_FALSE;
+ info.frame_num = sl->frame_num;
+ info.field_pic_flag = sl->field_pic_flag;
+ info.bottom_field_flag = sl->bottom_field_flag;
+ info.num_ref_frames = sp->num_ref_frames;
+ info.mb_adaptive_frame_field_flag = sp->mb_adaptive_frame_field_flag
+ && !sl->field_pic_flag;
+ info.constrained_intra_pred_flag = pic->constrained_intra_pred_flag;
+ info.weighted_pred_flag = pic->weighted_pred_flag;
+ info.weighted_bipred_idc = pic->weighted_bipred_idc;
+ info.frame_mbs_only_flag = sp->frame_mbs_only_flag;
+ info.transform_8x8_mode_flag = pic->transform_8x8_mode_flag;
+ info.chroma_qp_index_offset = pic->chroma_qp_index_offset;
+ info.second_chroma_qp_index_offset = pic->second_chroma_qp_index_offset;
+ info.pic_init_qp_minus26 = pic->pic_init_qp_minus26;
+ info.num_ref_idx_l0_active_minus1 = pic->num_ref_idx_l0_active_minus1;
+ info.num_ref_idx_l1_active_minus1 = pic->num_ref_idx_l1_active_minus1;
+ info.log2_max_frame_num_minus4 = sp->log2_max_frame_num_minus4;
+ info.pic_order_cnt_type = sp->pic_order_cnt_type;
+ info.log2_max_pic_order_cnt_lsb_minus4 =
+ sp->log2_max_pic_order_cnt_lsb_minus4;
+ info.delta_pic_order_always_zero_flag =
+ sp->delta_pic_order_always_zero_flag;
+ info.direct_8x8_inference_flag = sp->direct_8x8_inference_flag;
+ info.entropy_coding_mode_flag = pic->entropy_coding_mode_flag;
+ info.pic_order_present_flag = pic->pic_order_present_flag;
+ info.deblocking_filter_control_present_flag =
+ pic->deblocking_filter_control_present_flag;
+ info.redundant_pic_cnt_present_flag = pic->redundant_pic_cnt_present_flag;
+
+ if (!pic->pic_scaling_matrix_present_flag)
+ {
+ xine_fast_memcpy (info.scaling_lists_4x4, sp->scaling_lists_4x4,
+ sizeof (info.scaling_lists_4x4));
+ xine_fast_memcpy (info.scaling_lists_8x8, sp->scaling_lists_8x8,
+ sizeof (info.scaling_lists_8x8));
+ }
+ else
+ {
+ xine_fast_memcpy (info.scaling_lists_4x4, pic->scaling_lists_4x4,
+ sizeof (info.scaling_lists_4x4));
+ xine_fast_memcpy (info.scaling_lists_8x8, pic->scaling_lists_8x8,
+ sizeof (info.scaling_lists_8x8));
+ }
+
+ j = 0;
+ for (i = (MAX_DPB_SIZE - 1); i > -1; i--)
+ {
+ if (!seq->dpb[i]->used)
+ continue;
+ vdpau_accel_t *accel =
+ (vdpau_accel_t *) seq->dpb[i]->videoSurface->accel_data;
+ info.referenceFrames[j].surface = accel->surface;
+ info.referenceFrames[j].is_long_term = 0;
+ info.referenceFrames[j].frame_idx = seq->dpb[i]->FrameNum;
+ info.referenceFrames[j].top_is_reference =
+ seq->dpb[i]->is_reference[0] ? VDP_TRUE : VDP_FALSE;
+ info.referenceFrames[j].bottom_is_reference =
+ seq->dpb[i]->is_reference[1] ? VDP_TRUE : VDP_FALSE;
+ info.referenceFrames[j].field_order_cnt[0] =
+ seq->dpb[i]->TopFieldOrderCnt;
+ info.referenceFrames[j].field_order_cnt[1] =
+ seq->dpb[i]->BottomFieldOrderCnt;
+ ++j;
+ }
+ for (; j < MAX_DPB_SIZE; j++)
+ {
+ info.referenceFrames[j].surface = VDP_INVALID_HANDLE;
+ info.referenceFrames[j].is_long_term = 0;
+ info.referenceFrames[j].frame_idx = 0;
+ info.referenceFrames[j].top_is_reference = 0;
+ info.referenceFrames[j].bottom_is_reference = 0;
+ info.referenceFrames[j].field_order_cnt[0] = 0;
+ info.referenceFrames[j].field_order_cnt[1] = 0;
+ }
+
+ uint8_t sc[3] = { 0, 0, 1 };
+ VdpBitstreamBuffer vbits[seq->slices_count * 2];
+ for (i = 0; i < seq->slices_count; i++)
+ {
+ vbits[i * 2].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ vbits[i * 2].bitstream = sc;
+ vbits[i * 2].bitstream_bytes = 3;
+ vbits[(i * 2) + 1].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ vbits[(i * 2) + 1].bitstream = seq->buf + seq->slices[i].buf_offset;
+ vbits[(i * 2) + 1].bitstream_bytes = seq->slices[i].len;
+ }
+ st =
+ accel->vdp_decoder_render (vd->decoder, accel->surface,
+ (VdpPictureInfo *) & info,
+ seq->slices_count * 2, vbits);
+ if (st != VDP_STATUS_OK)
+ lprintf ("**********************DECODING failed! - surface = %d - %s\n",
+ accel->surface, accel->vdp_get_error_string (st));
+ else
+ lprintf ("**********************DECODING success! - surface = %d\n",
+ accel->surface);
+
+ if ((seq->ratio != seq->reported_ratio)
+ || (seq->coded_width != seq->reported_coded_width)
+ || (seq->coded_height != seq->reported_coded_height)
+ || (seq->video_step != seq->reported_video_step))
+ {
+ seq->reported_ratio = seq->ratio;
+ seq->reported_coded_width = seq->coded_width;
+ seq->reported_coded_height = seq->coded_height;
+ seq->reported_video_step = seq->video_step;
+ _x_stream_info_set (vd->stream, XINE_STREAM_INFO_VIDEO_WIDTH,
+ seq->coded_width);
+ _x_stream_info_set (vd->stream, XINE_STREAM_INFO_VIDEO_HEIGHT,
+ seq->coded_height);
+ _x_stream_info_set (vd->stream, XINE_STREAM_INFO_VIDEO_RATIO,
+ ((double) 10000 * seq->ratio));
+ _x_stream_info_set (vd->stream, XINE_STREAM_INFO_FRAME_DURATION,
+ seq->video_step);
+ _x_meta_info_set_utf8 (vd->stream, XINE_META_INFO_VIDEOCODEC,
+ "H264/AVC (vdpau_alter)");
+ xine_event_t event;
+ xine_format_change_data_t data;
+ event.type = XINE_EVENT_FRAME_FORMAT_CHANGE;
+ event.stream = vd->stream;
+ event.data = &data;
+ event.data_length = sizeof (data);
+ data.width = seq->coded_width;
+ data.height = seq->coded_height;
+ data.aspect = seq->ratio;
+ xine_event_send (vd->stream, &event);
+ }
+
+ accel->color_standard = seq->color_standard;
+
+ if (seq->cur_pic.completed == PICTURE_DONE)
+ {
+ seq->cur_pic.pts = seq->pic_pts;
+ seq->pic_pts = 0;
+ }
+ if (seq->cur_pic.drop_pts)
+ seq->cur_pic.pts = 0;
+ if (sp->frame_mbs_only_flag)
+ img->progressive_frame = -1;
+ img->bad_frame = bad_frame;
+ img->duration = seq->video_step;
+ seq->cur_pic.videoSurface = img;
+}
+
+
+
+static void
+decode_picture (vdpau_h264_alter_decoder_t * decoder)
+{
+ if (decoder->sequence.cur_pic.missing_header
+ || !decoder->sequence.startup_frame)
+ {
+ memset (&decoder->sequence.cur_pic, 0, sizeof (dpb_frame_t));
+ lprintf ("MISSING_HEADER or !startup_frame\n\n");
+ return;
+ }
+
+ slice_param_t *sl = &decoder->sequence.slice_param;
+ dpb_frame_t *cur_pic = &decoder->sequence.cur_pic;
+
+ if (cur_pic->completed && cur_pic->field_pic_flag)
+ {
+ int wrong_field = 0;
+ if ((sl->frame_num != cur_pic->FrameNum)
+ || (sl->bottom_field_flag
+ && (cur_pic->completed == PICTURE_BOTTOM_DONE))
+ || (!sl->bottom_field_flag
+ && (cur_pic->completed == PICTURE_TOP_DONE))
+ || !sl->field_pic_flag)
+ {
+ wrong_field = 1;
+ }
+ if (wrong_field)
+ {
+ fprintf (stderr, "vdpau_h264_alter : Wrong field, skipping.\n");
+ memset (cur_pic, 0, sizeof (dpb_frame_t));
+ dpb_reset (&decoder->sequence);
+ cur_pic->missing_header = 1;
+ decoder->sequence.startup_frame = 0;
+ return;
+ }
+ }
+
+ /* picture decoding */
+ decode_poc (decoder);
+ lprintf ("TopFieldOrderCnt = %d - BottomFieldOrderCnt = %d\n",
+ cur_pic->TopFieldOrderCnt, cur_pic->BottomFieldOrderCnt);
+ if (sl->nal_unit_type == 5)
+ {
+ dpb_draw_frames (decoder, MAX_POC, DPB_DRAW_CLEAR);
+ decoder->sequence.startup_frame = START_IDR_FLAG;
+ }
+ decode_picnum (decoder);
+ ref_pic_list_reordering (decoder);
+ lprintf ("............................. slices_count = %d\n",
+ decoder->sequence.slices_count);
+
+ decode_render (decoder, check_ref_list (decoder));
+
+ /* dec_ref_pic_marking */
+ slice_header_post (decoder);
+
+ if (!cur_pic->is_reference[0] && !cur_pic->is_reference[1])
+ {
+ if (cur_pic->completed == PICTURE_DONE)
+ {
+ dpb_draw_frames (decoder,
+ (cur_pic->TopFieldOrderCnt >
+ cur_pic->BottomFieldOrderCnt) ? cur_pic->
+ TopFieldOrderCnt : cur_pic->BottomFieldOrderCnt,
+ DPB_DRAW_CURRENT);
+ }
+ }
+ else
+ {
+ if (decoder->sequence.
+ seq_param[decoder->sequence.pic_param[sl->pic_parameter_set_id]->
+ seq_parameter_set_id]->pic_order_cnt_type == 2)
+ dpb_draw_frames (decoder,
+ (cur_pic->TopFieldOrderCnt >
+ cur_pic->BottomFieldOrderCnt) ? cur_pic->
+ TopFieldOrderCnt : cur_pic->BottomFieldOrderCnt,
+ DPB_DRAW_REFS);
+
+ if (!sl->field_pic_flag || cur_pic->completed < PICTURE_DONE)
+ dbp_append (decoder, 0);
+ else
+ dbp_append (decoder, 1);
+ }
+
+ if (cur_pic->completed == PICTURE_DONE)
+ memset (cur_pic, 0, sizeof (dpb_frame_t));
+
+
+ lprintf
+ ("\n___________________________________________________________________________________________\n\n");
+}
+
+
+
+static int
+parse_startcodes (vdpau_h264_alter_decoder_t * this_gen, uint8_t * buf,
+ uint32_t len)
+{
+ sequence_t *sequence = (sequence_t *) & this_gen->sequence;
+ bits_reader_set (&sequence->br, buf, len);
+ int ret = 0;
+
+ skip_bits (&sequence->br, 1); /* forbidden_zero_bit */
+ uint8_t nal_ref_idc = read_bits (&sequence->br, 2);
+ uint8_t nal_unit_type = read_bits (&sequence->br, 5);
+ lprintf ("NAL size = %d, nal_ref_idc = %d, nal_unit_type = %d\n", len,
+ nal_ref_idc, nal_unit_type);
+
+ switch (nal_unit_type)
+ {
+ case NAL_END_SEQUENCE:
+ break;
+ case NAL_SEQUENCE:
+ seq_parameter_set_data (this_gen);
+ break;
+ case NAL_PICTURE:
+ pic_parameter_set (this_gen);
+ break;
+ case NAL_SLICE_IDR:
+ slice_header (this_gen, nal_ref_idc, nal_unit_type);
+ sequence->slices[sequence->slices_count].buf_offset = buf - sequence->buf;
+ sequence->slices[sequence->slices_count].len = len;
+ ++sequence->slices_count;
+ sequence->slice_mode = NAL_SLICE_IDR;
+ break;
+ case NAL_SLICE_NO_IDR:
+ slice_header (this_gen, nal_ref_idc, nal_unit_type);
+ sequence->slices[sequence->slices_count].buf_offset = buf - sequence->buf;
+ sequence->slices[sequence->slices_count].len = len;
+ ++sequence->slices_count;
+ sequence->slice_mode = NAL_SLICE_NO_IDR;
+ break;
+ }
+
+ return ret;
+}
+
+
+
+static void
+parse_codec_private (vdpau_h264_alter_decoder_t * this_gen, uint8_t * buf,
+ uint32_t len)
+{
+ sequence_t *sequence = (sequence_t *) & this_gen->sequence;
+ bits_reader_set (&sequence->br, buf, len);
+ uint8_t *buffer = buf;
+ int i;
+
+ lprintf ("parse_codec_private\n");
+
+ sequence->mode_frame = 1;
+
+ // reserved
+ skip_bits (&sequence->br, 8);
+ skip_bits (&sequence->br, 8); /* profile_idc */
+ skip_bits (&sequence->br, 8);
+ skip_bits (&sequence->br, 8); /* level_idc */
+ skip_bits (&sequence->br, 6);
+
+ sequence->frame_header_size = read_bits (&sequence->br, 2) + 1;
+ //sequence->frame_header_size = 3;
+ skip_bits (&sequence->br, 3);
+ uint8_t count = read_bits (&sequence->br, 5);
+ buffer += 6;
+ for (i = 0; i < count; i++)
+ {
+ bits_reader_set (&sequence->br, buffer, len - (buffer - buf));
+ uint16_t sps_size = read_bits (&sequence->br, 16);
+ skip_bits (&sequence->br, 8);
+ seq_parameter_set_data (this_gen);
+ buffer += sps_size + 2;
+ }
+ count = buffer[0];
+ ++buffer;
+ for (i = 0; i < count; i++)
+ {
+ bits_reader_set (&sequence->br, buffer, len - (buffer - buf));
+ uint16_t pps_size = read_bits (&sequence->br, 16);
+ skip_bits (&sequence->br, 8);
+ pic_parameter_set (this_gen);
+ buffer += pps_size + 2;
+ }
+}
+
+
+
+static void
+flush_buffer (sequence_t * seq)
+{
+ if ((seq->bufpos - seq->bufseek) >= seq->bufseek)
+ {
+ seq->bufsize = (seq->bufpos - seq->bufseek) + MIN_BUFFER_SIZE;
+ lprintf ("buffer too short, have to allocate a new one.\n");
+ uint8_t *tmp = (uint8_t *) malloc (seq->bufsize);
+ xine_fast_memcpy (tmp, seq->buf + seq->bufseek,
+ seq->bufpos - seq->bufseek);
+ free (seq->buf);
+ seq->buf = tmp;
+ }
+ else
+ xine_fast_memcpy (seq->buf, seq->buf + seq->bufseek,
+ seq->bufpos - seq->bufseek);
+
+ seq->bufpos -= seq->bufseek;
+ seq->start = -1;
+ seq->bufseek = 0;
+ reset_slices (seq);
+}
+
+
+
+/*
+ * This function receives a buffer of data from the demuxer layer and
+ * figures out how to handle it based on its header flags.
+ */
+static void
+vdpau_h264_alter_decode_data (video_decoder_t * this_gen, buf_element_t * buf)
+{
+ vdpau_h264_alter_decoder_t *this = (vdpau_h264_alter_decoder_t *) this_gen;
+ sequence_t *seq = (sequence_t *) & this->sequence;
+
+#ifdef MAKE_DAT
+ fwrite (buf->content, 1, buf->size, outfile);
+#endif
+
+ if (buf->decoder_flags & BUF_FLAG_FRAMERATE)
+ {
+ lprintf ("BUF_FLAG_FRAMERATE\n");
+ seq->video_step = buf->decoder_info[0];
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_ASPECT)
+ {
+ lprintf ("BUF_FLAG_ASPECT\n");
+ seq->ratio =
+ (double) buf->decoder_info[1] / (double) buf->decoder_info[2];
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_STDHEADER)
+ {
+ lprintf ("BUF_FLAG_STDHEADER\n");
+ seq->flag_header = 1;
+ xine_bmiheader *bih = (xine_bmiheader *) buf->content;
+ seq->coded_width = bih->biWidth;
+ seq->coded_height = bih->biHeight;
+ uint8_t *codec_private = buf->content + sizeof (xine_bmiheader);
+ uint32_t codec_private_len = bih->biSize - sizeof (xine_bmiheader);
+ if (codec_private_len > 0)
+ parse_codec_private (this, codec_private, codec_private_len);
+ return;
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_SPECIAL)
+ {
+ if (buf->decoder_info[1] == BUF_SPECIAL_DECODER_CONFIG)
+ {
+ lprintf ("BUF_SPECIAL_DECODER_CONFIG\n");
+ seq->flag_header = 1;
+ uint8_t *codec_private = buf->decoder_info_ptr[2];
+ uint32_t codec_private_len = buf->decoder_info[2];
+ if (codec_private_len > 0)
+ parse_codec_private (this, codec_private, codec_private_len);
+ }
+ return;
+ }
+
+ if (!buf->size)
+ return;
+
+ int size = seq->bufpos + buf->size;
+ if (seq->bufsize < size)
+ {
+ if (seq->bufsize > MAX_BUFFER_SIZE)
+ {
+ fprintf (stderr,
+ "vdpau_h264_alter : sorry, can't accumulate so much data, broken stream?\n");
+ reset_sequence (seq);
+ return;
+ }
+ seq->bufsize = size + MIN_BUFFER_SIZE;
+ seq->buf = (uint8_t *) realloc (seq->buf, seq->bufsize);
+ lprintf ("realloc new size = %d\n", seq->bufsize);
+ }
+ xine_fast_memcpy (seq->buf + seq->bufpos, buf->content, buf->size);
+ seq->bufpos += buf->size;
+
+ if (buf->decoder_flags & BUF_FLAG_FRAME_START)
+ seq->pic_pts = buf->pts;
+
+ int frame_end = buf->decoder_flags & BUF_FLAG_FRAME_END;
+
+ if (seq->mode_frame)
+ {
+ if (!seq->pic_pts)
+ seq->pic_pts = buf->pts;
+ if (frame_end)
+ {
+ if (buf->pts)
+ seq->pic_pts = buf->pts;
+ lprintf ("frame_end && seq->mode_frame\n");
+ int fhs;
+ uint8_t tb;
+ uint32_t j = 0;
+ while (j < seq->bufpos)
+ {
+ uint32_t s = 0;
+ for (fhs = 0; fhs < seq->frame_header_size; fhs++)
+ s |= seq->buf[j + fhs] << (8 * (seq->frame_header_size - 1 - fhs));
+ tb = *(seq->buf + j + seq->frame_header_size) & 0x1F;
+ if (seq->slice_mode && (tb != seq->slice_mode))
+ {
+ decode_picture (this);
+ reset_slices (seq);
+ }
+ parse_startcodes (this, seq->buf + j + seq->frame_header_size, s);
+ j += seq->frame_header_size + s;
+ }
+ if (seq->slice_mode)
+ {
+ decode_picture (this);
+ reset_slices (seq);
+ }
+ seq->bufpos = 0;
+ }
+ return;
+ }
+
+ while (seq->bufseek <= seq->bufpos - 4)
+ {
+ uint8_t *buffer = seq->buf + seq->bufseek;
+ if (buffer[0] == 0 && buffer[1] == 0 && buffer[2] == 1)
+ {
+ if (seq->start < 0)
+ {
+ seq->start = seq->bufseek;
+ uint8_t tb = buffer[3] & 0x1F;
+ if (((tb == NAL_SLICE_NO_IDR) || (tb == NAL_SLICE_IDR))
+ && !seq->pic_pts)
+ seq->pic_pts = buf->pts;
+ if (seq->slice_mode && (tb != seq->slice_mode))
+ {
+ decode_picture (this);
+ flush_buffer (seq);
+ }
+ if ((tb & 0x1F) == NAL_END_SEQUENCE)
+ {
+ dpb_print (seq);
+ dpb_draw_frames (this, MAX_POC, DPB_DRAW_CLEAR);
+ lprintf ("NAL_END_SEQUENCE\n");
+ dpb_print (seq);
+ }
+ }
+ else
+ {
+ parse_startcodes (this, seq->buf + seq->start + 3,
+ seq->bufseek - seq->start - 3);
+ seq->start = -1;
+ --seq->bufseek;
+ }
+ }
+ ++seq->bufseek;
+ }
+
+ if (frame_end && seq->flag_header && (seq->start > -1)
+ && (seq->bufseek > seq->start))
+ {
+ lprintf ("frame_end && seq->start\n");
+ seq->bufseek = seq->bufpos;
+ parse_startcodes (this, seq->buf + seq->start + 3,
+ seq->bufseek - seq->start - 3);
+ if (seq->slice_mode)
+ decode_picture (this);
+ flush_buffer (seq);
+ }
+}
+
+
+/*
+ * This function is called when xine needs to flush the system.
+ */
+static void
+vdpau_h264_alter_flush (video_decoder_t * this_gen)
+{
+ vdpau_h264_alter_decoder_t *this = (vdpau_h264_alter_decoder_t *) this_gen;
+
+ printf ("vdpau_h264_alter_flush\n");
+ dpb_draw_frames (this, MAX_POC, DPB_DRAW_REFS);
+}
+
+
+/*
+ * This function resets the video decoder.
+ */
+static void
+vdpau_h264_alter_reset (video_decoder_t * this_gen)
+{
+ vdpau_h264_alter_decoder_t *this = (vdpau_h264_alter_decoder_t *) this_gen;
+
+ lprintf ("vdpau_h264_alter_reset\n");
+ reset_sequence (&this->sequence);
+}
+
+
+/*
+ * The decoder should forget any stored pts values here.
+ */
+static void
+vdpau_h264_alter_discontinuity (video_decoder_t * this_gen)
+{
+ vdpau_h264_alter_decoder_t *this = (vdpau_h264_alter_decoder_t *) this_gen;
+ printf ("vdpau_h264_alter_discontinuity\n");
+
+ dpb_clear_all_pts (&this->sequence);
+ this->sequence.reset = VO_NEW_SEQUENCE_FLAG;
+}
+
+
+/*
+ * This function frees the video decoder instance allocated to the decoder.
+ */
+static void
+vdpau_h264_alter_dispose (video_decoder_t * this_gen)
+{
+
+ vdpau_h264_alter_decoder_t *this = (vdpau_h264_alter_decoder_t *) this_gen;
+
+ lprintf ("vdpau_h264_alter_dispose\n");
+
+ if ((this->decoder != VDP_INVALID_HANDLE) && this->sequence.accel_vdpau)
+ {
+ this->sequence.accel_vdpau->vdp_decoder_destroy (this->decoder);
+ this->decoder = VDP_INVALID_HANDLE;
+ }
+ reset_sequence (&this->sequence);
+
+ int i;
+ for (i = 0; i < MAX_DPB_SIZE; i++)
+ free (this->sequence.dpb[i]);
+ for (i = 0; i < 32; i++)
+ if (this->sequence.seq_param[i])
+ free (this->sequence.seq_param[i]);
+ for (i = 0; i < 255; i++)
+ if (this->sequence.pic_param[i])
+ free (this->sequence.pic_param[i]);
+
+ this->stream->video_out->close (this->stream->video_out, this->stream);
+
+ free (this->sequence.buf);
+ free (this_gen);
+}
+
+
+/*
+ * This function allocates, initializes, and returns a private video
+ * decoder structure.
+ */
+static video_decoder_t *
+open_plugin (video_decoder_class_t * class_gen, xine_stream_t * stream)
+{
+
+ vdpau_h264_alter_decoder_t *this;
+
+ /* the videoout must be vdpau-capable to support this decoder */
+ if (!
+ (stream->video_driver->
+ get_capabilities (stream->video_driver) & VO_CAP_VDPAU_H264))
+ return NULL;
+
+ /* now check if vdpau has free decoder resource */
+ vo_frame_t *img =
+ stream->video_out->get_frame (stream->video_out, 1920, 1080, 1,
+ XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS);
+ vdpau_accel_t *accel = (vdpau_accel_t *) img->accel_data;
+ int runtime_nr = accel->vdp_runtime_nr;
+ img->free (img);
+ VdpDecoder decoder;
+ VdpStatus st =
+ accel->vdp_decoder_create (accel->vdp_device,
+ VDP_DECODER_PROFILE_H264_MAIN, 1920, 1080, 16,
+ &decoder);
+ if (st != VDP_STATUS_OK)
+ {
+ fprintf (stderr, "can't create vdpau decoder!\n");
+ return NULL;
+ }
+
+ accel->vdp_decoder_destroy (decoder);
+
+ this =
+ (vdpau_h264_alter_decoder_t *) calloc (1,
+ sizeof
+ (vdpau_h264_alter_decoder_t));
+
+ this->video_decoder.decode_data = vdpau_h264_alter_decode_data;
+ this->video_decoder.flush = vdpau_h264_alter_flush;
+ this->video_decoder.reset = vdpau_h264_alter_reset;
+ this->video_decoder.discontinuity = vdpau_h264_alter_discontinuity;
+ this->video_decoder.dispose = vdpau_h264_alter_dispose;
+
+ this->stream = stream;
+ this->class = (vdpau_h264_alter_class_t *) class_gen;
+
+ int i;
+ for (i = 0; i < 16; i++)
+ this->sequence.dpb[i] = (dpb_frame_t *) calloc (1, sizeof (dpb_frame_t));
+ this->sequence.bufsize = MIN_BUFFER_SIZE;
+ this->sequence.buf = (uint8_t *) malloc (this->sequence.bufsize);
+ this->sequence.vdp_runtime_nr = runtime_nr;
+ this->sequence.reset = VO_NEW_SEQUENCE_FLAG;
+ this->sequence.reset = VO_NEW_SEQUENCE_FLAG;
+ this->sequence.ratio = 0.0;
+ this->sequence.video_step = 3600;
+ this->sequence.coded_width = 1280;
+ this->sequence.coded_height = 720;
+ this->sequence.reported_ratio = 0.0;
+ this->sequence.reported_video_step = 0;
+ this->sequence.reported_coded_width = 0;
+ this->sequence.reported_coded_height = 0;
+ this->sequence.frame_header_size = 4;
+ this->sequence.flag_header = 0;
+ this->sequence.mode_frame = 0;
+ reset_sequence (&this->sequence);
+
+ this->decoder = VDP_INVALID_HANDLE;
+ this->sequence.accel_vdpau = NULL;
+
+ (stream->video_out->open) (stream->video_out, stream);
+
+#ifdef MAKE_DAT
+ outfile = fopen ("/tmp/h264.dat", "w");
+ nframes = 0;
+#endif
+
+ return &this->video_decoder;
+}
+
+
+/*
+ * This function allocates a private video decoder class and initializes
+ * the class's member functions.
+ */
+static void *
+init_plugin (xine_t * xine, void *data)
+{
+
+ vdpau_h264_alter_class_t *this;
+
+ this =
+ (vdpau_h264_alter_class_t *) calloc (1,
+ sizeof (vdpau_h264_alter_class_t));
+
+ this->decoder_class.open_plugin = open_plugin;
+ this->decoder_class.identifier = "vdpau_h264_alter";
+ this->decoder_class.description =
+ N_
+ ("vdpau_h264_alter: H264 decoder plugin using VDPAU hardware decoding.\n"
+ "Must be used along with video_out_vdpau.");
+ this->decoder_class.dispose = default_video_decoder_class_dispose;
+
+ return this;
+}
+
+
+/*
+ * This is a list of all of the internal xine video buffer types that
+ * this decoder is able to handle. Check src/xine-engine/buffer.h for a
+ * list of valid buffer types (and add a new one if the one you need does
+ * not exist). Terminate the list with a 0.
+ */
+static const uint32_t video_types[] = {
+ BUF_VIDEO_H264,
+ 0
+};
+
+
+/*
+ * This data structure combines the list of supported xine buffer types and
+ * the priority that the plugin should be given with respect to other
+ * plugins that handle the same buffer type. A plugin with priority (n+1)
+ * will be used instead of a plugin with priority (n).
+ */
+static const decoder_info_t dec_info_video = {
+ video_types, /* supported types */
+ 9 /* priority */
+};
+
+
+/*
+ * The plugin catalog entry. This is the only information that this plugin
+ * will export to the public.
+ */
+const plugin_info_t xine_plugin_info[] EXPORTED = {
+ /* { type, API, "name", version, special_info, init_function } */
+ {PLUGIN_VIDEO_DECODER, 19, "vdpau_h264_alter", XINE_VERSION_CODE,
+ &dec_info_video, init_plugin},
+ {PLUGIN_NONE, 0, "", 0, NULL, NULL}
+};
diff --git a/src/video_dec/libvdpau/alterh264_decode.h b/src/video_dec/libvdpau/alterh264_decode.h
new file mode 100644
index 000000000..88f5e638f
--- /dev/null
+++ b/src/video_dec/libvdpau/alterh264_decode.h
@@ -0,0 +1,339 @@
+/* kate: tab-indent on; indent-width 4; mixedindent off; indent-mode cstyle; remove-trailing-space on; */
+#ifndef ALTERH264_DECODE_H
+#define ALTERH264_DECODE_H
+
+//#define LOG
+#define LOG_MODULE "vdpau_h264"
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <xine/xine_internal.h>
+#include <xine/video_out.h>
+#include <xine/buffer.h>
+#include <xine/xineutils.h>
+#include "accel_vdpau.h"
+#include <vdpau/vdpau.h>
+
+#include "alterh264_bits_reader.h"
+
+
+
+enum aspect_ratio
+{
+ ASPECT_UNSPECIFIED = 0,
+ ASPECT_1_1,
+ ASPECT_12_11,
+ ASPECT_10_11,
+ ASPECT_16_11,
+ ASPECT_40_33,
+ ASPECT_24_11,
+ ASPECT_20_11,
+ ASPECT_32_11,
+ ASPECT_80_33,
+ ASPECT_18_11,
+ ASPECT_15_11,
+ ASPECT_64_33,
+ ASPECT_160_99,
+ ASPECT_4_3,
+ ASPECT_3_2,
+ ASPECT_2_1,
+ ASPECT_RESERVED,
+ ASPECT_EXTENDED_SAR = 255
+};
+
+
+
+static const uint8_t zigzag_4x4[16] = {
+ 0, 1, 4, 8,
+ 5, 2, 3, 6,
+ 9, 12, 13, 10,
+ 7, 11, 14, 15
+};
+
+static const uint8_t zigzag_8x8[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static const uint8_t default_4x4_intra[16] = {
+ 6, 13, 13, 20,
+ 20, 20, 28, 28,
+ 28, 28, 32, 32,
+ 32, 37, 37, 42
+};
+
+static const uint8_t default_4x4_inter[16] = {
+ 10, 14, 14, 20,
+ 20, 20, 24, 24,
+ 24, 24, 27, 27,
+ 27, 30, 30, 34
+};
+
+static const uint8_t default_8x8_intra[64] = {
+ 6, 10, 10, 13, 11, 13, 16, 16,
+ 16, 16, 18, 18, 18, 18, 18, 23,
+ 23, 23, 23, 23, 23, 25, 25, 25,
+ 25, 25, 25, 25, 27, 27, 27, 27,
+ 27, 27, 27, 27, 29, 29, 29, 29,
+ 29, 29, 29, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 36, 36,
+ 36, 36, 38, 38, 38, 40, 40, 42
+};
+
+static const uint8_t default_8x8_inter[64] = {
+ 9, 13, 13, 15, 13, 15, 17, 17,
+ 17, 17, 19, 19, 19, 19, 19, 21,
+ 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 22, 22, 22, 24, 24, 24, 24,
+ 24, 24, 24, 24, 25, 25, 25, 25,
+ 25, 25, 25, 27, 27, 27, 27, 27,
+ 27, 28, 28, 28, 28, 28, 30, 30,
+ 30, 30, 32, 32, 32, 33, 33, 35
+};
+
+
+
+typedef struct
+{
+ uint8_t aspect_ratio_info;
+ uint8_t aspect_ratio_idc;
+ uint16_t sar_width;
+ uint16_t sar_height;
+ uint8_t colour_desc;
+ uint8_t colour_primaries;
+ uint8_t timing_info;
+ uint32_t num_units_in_tick;
+ uint32_t time_scale;
+} vui_param_t;
+
+
+
+typedef struct
+{
+ uint8_t profile_idc;
+ uint8_t level_idc;
+ uint8_t seq_parameter_set_id;
+ uint8_t constraint_set0_flag;
+ uint8_t constraint_set1_flag;
+ uint8_t constraint_set2_flag;
+ uint8_t constraint_set3_flag;
+ uint8_t chroma_format_idc;
+ uint8_t separate_colour_plane_flag;
+ uint8_t bit_depth_luma_minus8;
+ uint8_t bit_depth_chroma_minus8;
+ uint8_t qpprime_y_zero_transform_bypass_flag;
+ uint8_t seq_scaling_matrix_present_flag;
+ uint8_t scaling_lists_4x4[6][16];
+ uint8_t scaling_lists_8x8[2][64];
+ uint8_t log2_max_frame_num_minus4;
+ uint8_t pic_order_cnt_type;
+ uint8_t log2_max_pic_order_cnt_lsb_minus4;
+ uint8_t delta_pic_order_always_zero_flag;
+ int32_t offset_for_non_ref_pic;
+ int32_t offset_for_top_to_bottom_field;
+ uint8_t num_ref_frames_in_pic_order_cnt_cycle;
+ int32_t offset_for_ref_frame[256];
+ uint8_t num_ref_frames;
+ uint8_t gaps_in_frame_num_value_allowed_flag;
+ uint8_t pic_width_in_mbs_minus1;
+ uint8_t pic_height_in_map_units_minus1;
+ uint8_t frame_mbs_only_flag;
+ uint8_t mb_adaptive_frame_field_flag;
+ uint8_t direct_8x8_inference_flag;
+ uint8_t frame_cropping_flag;
+ uint16_t frame_crop_left_offset;
+ uint16_t frame_crop_right_offset;
+ uint16_t frame_crop_top_offset;
+ uint16_t frame_crop_bottom_offset;
+ uint8_t vui_parameters_present_flag;
+ vui_param_t vui;
+} seq_param_t;
+
+
+
+typedef struct
+{
+ uint8_t pic_parameter_set_id;
+ uint8_t seq_parameter_set_id;
+ uint8_t entropy_coding_mode_flag;
+ uint8_t pic_order_present_flag;
+ /*uint8_t num_slice_groups_minus1;
+ uint8_t slice_group_map_type;
+ uint16_t run_length_minus1[64];
+ uint16_t top_left[64];
+ uint16_t bottom_right[64];
+ uint8_t slice_group_change_direction_flag;
+ uint16_t slice_group_change_rate_minus1;
+ uint16_t pic_size_in_map_units_minus1;
+ uint8_t slice_group_id[64]; */
+ uint8_t num_ref_idx_l0_active_minus1;
+ uint8_t num_ref_idx_l1_active_minus1;
+ uint8_t weighted_pred_flag;
+ uint8_t weighted_bipred_idc;
+ int8_t pic_init_qp_minus26;
+ int8_t pic_init_qs_minus26;
+ int8_t chroma_qp_index_offset;
+ uint8_t deblocking_filter_control_present_flag;
+ uint8_t constrained_intra_pred_flag;
+ uint8_t redundant_pic_cnt_present_flag;
+ uint8_t transform_8x8_mode_flag;
+ uint8_t pic_scaling_matrix_present_flag;
+ uint8_t pic_scaling_list_present_flag[8];
+ uint8_t scaling_lists_4x4[6][16];
+ uint8_t scaling_lists_8x8[2][64];
+ int8_t second_chroma_qp_index_offset;
+} pic_param_t;
+
+
+
+typedef struct
+{
+ uint8_t nal_ref_idc;
+ uint8_t nal_unit_type;
+ uint8_t slice_type;
+ uint8_t pic_parameter_set_id;
+ uint16_t frame_num;
+ uint32_t MaxFrameNum;
+ uint8_t field_pic_flag;
+ uint8_t bottom_field_flag;
+ uint16_t idr_pic_id;
+ uint16_t pic_order_cnt_lsb;
+ int32_t delta_pic_order_cnt_bottom;
+ int32_t delta_pic_order_cnt[2];
+ uint8_t redundant_pic_cnt;
+ uint8_t num_ref_idx_l0_active_minus1;
+ uint8_t num_ref_idx_l1_active_minus1;
+} slice_param_t;
+
+
+#define PICTURE_TOP_DONE 1
+#define PICTURE_BOTTOM_DONE 2
+#define PICTURE_DONE 3
+
+#define SHORT_TERM_REF 1
+#define LONG_TERM_REF 2
+
+typedef struct
+{
+ uint8_t used;
+ uint8_t missing_header;
+ int64_t pts;
+ uint8_t drop_pts;
+ uint8_t completed;
+ uint8_t top_field_first;
+ uint16_t FrameNum;
+ int32_t FrameNumWrap;
+ int32_t PicNum[2]; /* 0:top, 1:bottom */
+ uint8_t is_reference[2]; /* 0:top, 1:bottom, short or long term */
+ uint8_t field_pic_flag;
+ int32_t PicOrderCntMsb;
+ int32_t TopFieldOrderCnt;
+ int32_t BottomFieldOrderCnt;
+ uint16_t pic_order_cnt_lsb;
+ uint8_t mmc5;
+
+ vo_frame_t *videoSurface;
+} dpb_frame_t;
+
+
+
+typedef struct
+{
+ uint32_t buf_offset;
+ uint32_t len;
+} slice_t;
+
+
+
+typedef struct
+{
+ uint32_t coded_width;
+ uint32_t reported_coded_width;
+ uint32_t coded_height;
+ uint32_t reported_coded_height;
+ uint64_t video_step; /* frame duration in pts units */
+ uint64_t reported_video_step; /* frame duration in pts units */
+ double ratio;
+ double reported_ratio;
+
+ slice_t slices[68];
+ int slices_count;
+ int slice_mode;
+
+ seq_param_t *seq_param[32];
+ pic_param_t *pic_param[256];
+ slice_param_t slice_param;
+
+ dpb_frame_t *dpb[16];
+ dpb_frame_t cur_pic;
+ uint16_t prevFrameNum;
+ uint16_t prevFrameNumOffset;
+ uint8_t prevMMC5;
+
+ VdpColorStandard color_standard;
+ int chroma;
+ int top_field_first;
+ VdpDecoderProfile profile;
+
+ uint8_t *buf; /* accumulate data */
+ int bufseek;
+ uint32_t bufsize;
+ uint32_t bufpos;
+ int start;
+
+ int64_t pic_pts;
+
+ bits_reader_t br;
+
+ int vdp_runtime_nr;
+ vdpau_accel_t *accel_vdpau;
+
+ int reset;
+ int startup_frame;
+
+ uint8_t mode_frame;
+ uint8_t flag_header;
+ uint32_t frame_header_size;
+
+} sequence_t;
+
+
+
+typedef struct
+{
+ video_decoder_class_t decoder_class;
+} vdpau_h264_alter_class_t;
+
+
+
+typedef struct vdpau_mpeg12_decoder_s
+{
+ video_decoder_t video_decoder; /* parent video decoder structure */
+
+ vdpau_h264_alter_class_t *class;
+ xine_stream_t *stream;
+
+ sequence_t sequence;
+
+ VdpDecoder decoder;
+ VdpDecoderProfile decoder_profile;
+ uint32_t decoder_width;
+ uint32_t decoder_height;
+
+} vdpau_h264_alter_decoder_t;
+
+#endif /* ALTERH264_DECODE_H */
diff --git a/src/video_dec/libvdpau/bits_reader.h b/src/video_dec/libvdpau/bits_reader.h
new file mode 100644
index 000000000..db7cdfc7e
--- /dev/null
+++ b/src/video_dec/libvdpau/bits_reader.h
@@ -0,0 +1,82 @@
+#include <sys/types.h>
+
+
+
+typedef struct {
+ uint8_t *buffer, *start;
+ int offbits, length, oflow;
+} bits_reader_t;
+
+
+
+static void bits_reader_set( bits_reader_t *br, uint8_t *buf, int len )
+{
+ br->buffer = br->start = buf;
+ br->offbits = 0;
+ br->length = len;
+ br->oflow = 0;
+}
+
+
+
+static uint32_t read_bits( bits_reader_t *br, int nbits )
+{
+ int i, nbytes;
+ uint32_t ret = 0;
+ uint8_t *buf;
+
+ buf = br->buffer;
+ nbytes = (br->offbits + nbits)/8;
+ if ( ((br->offbits + nbits) %8 ) > 0 )
+ nbytes++;
+ if ( (buf + nbytes) > (br->start + br->length) ) {
+ br->oflow = 1;
+ return 0;
+ }
+ for ( i=0; i<nbytes; i++ )
+ ret += buf[i]<<((nbytes-i-1)*8);
+ i = (4-nbytes)*8+br->offbits;
+ ret = ((ret<<i)>>i)>>((nbytes*8)-nbits-br->offbits);
+
+ br->offbits += nbits;
+ br->buffer += br->offbits / 8;
+ br->offbits %= 8;
+
+ return ret;
+}
+
+
+
+static void skip_bits( bits_reader_t *br, int nbits )
+{
+ br->offbits += nbits;
+ br->buffer += br->offbits / 8;
+ br->offbits %= 8;
+ if ( br->buffer > (br->start + br->length) ) {
+ br->oflow = 1;
+ }
+}
+
+
+
+static uint32_t get_bits( bits_reader_t *br, int nbits )
+{
+ int i, nbytes;
+ uint32_t ret = 0;
+ uint8_t *buf;
+
+ buf = br->buffer;
+ nbytes = (br->offbits + nbits)/8;
+ if ( ((br->offbits + nbits) %8 ) > 0 )
+ nbytes++;
+ if ( (buf + nbytes) > (br->start + br->length) ) {
+ br->oflow = 1;
+ return 0;
+ }
+ for ( i=0; i<nbytes; i++ )
+ ret += buf[i]<<((nbytes-i-1)*8);
+ i = (4-nbytes)*8+br->offbits;
+ ret = ((ret<<i)>>i)>>((nbytes*8)-nbits-br->offbits);
+
+ return ret;
+}
diff --git a/src/video_dec/libvdpau/cpb.c b/src/video_dec/libvdpau/cpb.c
new file mode 100644
index 000000000..d06238e5c
--- /dev/null
+++ b/src/video_dec/libvdpau/cpb.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2009 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * cpb.c: Coded Picture Buffer
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "cpb.h"
+
+#include <stdlib.h>
+
+struct coded_picture* create_coded_picture()
+{
+ struct coded_picture* pic = calloc(1, sizeof(struct coded_picture));
+ return pic;
+}
+
+void free_coded_picture(struct coded_picture *pic)
+{
+ if(!pic)
+ return;
+
+ release_nal_unit(pic->sei_nal);
+ release_nal_unit(pic->sps_nal);
+ release_nal_unit(pic->pps_nal);
+ release_nal_unit(pic->slc_nal);
+
+ free(pic);
+}
+
diff --git a/src/video_dec/libvdpau/cpb.h b/src/video_dec/libvdpau/cpb.h
new file mode 100644
index 000000000..37dbd94cf
--- /dev/null
+++ b/src/video_dec/libvdpau/cpb.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2009 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * cpb.h: Coded Picture Buffer
+ */
+
+#ifndef CPB_H_
+#define CPB_H_
+
+#include "nal.h"
+
+enum picture_flags {
+ IDR_PIC = 0x01,
+ REFERENCE = 0x02,
+ NOT_EXISTING = 0x04,
+ INTERLACED = 0x08
+};
+
+struct coded_picture
+{
+ uint32_t flag_mask;
+
+ uint32_t max_pic_num;
+ int32_t pic_num;
+
+ uint8_t used_for_long_term_ref;
+ uint32_t long_term_pic_num;
+ uint32_t long_term_frame_idx;
+
+ int32_t top_field_order_cnt;
+ int32_t bottom_field_order_cnt;
+
+ uint8_t repeat_pic;
+
+ /* buffer data for the image slices, which
+ * are passed to the decoder
+ */
+ uint32_t slice_cnt;
+
+ int64_t pts;
+
+ struct nal_unit *sei_nal;
+ struct nal_unit *sps_nal;
+ struct nal_unit *pps_nal;
+ struct nal_unit *slc_nal;
+};
+
+struct coded_picture* create_coded_picture(void);
+void free_coded_picture(struct coded_picture *pic);
+
+#endif /* CPB_H_ */
diff --git a/src/video_dec/libvdpau/dpb.c b/src/video_dec/libvdpau/dpb.c
new file mode 100644
index 000000000..c2afd42ba
--- /dev/null
+++ b/src/video_dec/libvdpau/dpb.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * dpb.c: Implementing Decoded Picture Buffer
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "cpb.h"
+#include "dpb.h"
+#include "nal.h"
+
+#include "h264_parser.h"
+
+#include "accel_vdpau.h"
+
+#include <xine/video_out.h>
+
+//#define DEBUG_DPB
+
+int dp_top_field_first(struct decoded_picture *decoded_pic)
+{
+ int top_field_first = 1;
+
+
+ if (decoded_pic->coded_pic[1] != NULL) {
+ if (!decoded_pic->coded_pic[0]->slc_nal->slc.bottom_field_flag &&
+ decoded_pic->coded_pic[1]->slc_nal->slc.bottom_field_flag &&
+ decoded_pic->coded_pic[0]->top_field_order_cnt !=
+ decoded_pic->coded_pic[1]->bottom_field_order_cnt) {
+ top_field_first = decoded_pic->coded_pic[0]->top_field_order_cnt < decoded_pic->coded_pic[1]->bottom_field_order_cnt;
+ } else if (decoded_pic->coded_pic[0]->slc_nal->slc.bottom_field_flag &&
+ !decoded_pic->coded_pic[1]->slc_nal->slc.bottom_field_flag &&
+ decoded_pic->coded_pic[0]->bottom_field_order_cnt !=
+ decoded_pic->coded_pic[1]->top_field_order_cnt) {
+ top_field_first = decoded_pic->coded_pic[0]->bottom_field_order_cnt > decoded_pic->coded_pic[1]->top_field_order_cnt;
+ }
+ }
+
+ if (decoded_pic->coded_pic[0]->flag_mask & PIC_STRUCT_PRESENT && decoded_pic->coded_pic[0]->sei_nal != NULL) {
+ uint8_t pic_struct = decoded_pic->coded_pic[0]->sei_nal->sei.pic_timing.pic_struct;
+ if(pic_struct == DISP_TOP_BOTTOM ||
+ pic_struct == DISP_TOP_BOTTOM_TOP) {
+ top_field_first = 1;
+ } else if (pic_struct == DISP_BOTTOM_TOP ||
+ pic_struct == DISP_BOTTOM_TOP_BOTTOM) {
+ top_field_first = 0;
+ } else if (pic_struct == DISP_FRAME) {
+ top_field_first = 1;
+ }
+ }
+
+ return top_field_first;
+}
+
+/**
+ * ----------------------------------------------------------------------------
+ * decoded picture
+ * ----------------------------------------------------------------------------
+ */
+
+void free_decoded_picture(struct decoded_picture *pic);
+
+struct decoded_picture* init_decoded_picture(struct coded_picture *cpic, vo_frame_t *img)
+{
+ struct decoded_picture *pic = calloc(1, sizeof(struct decoded_picture));
+
+ pic->coded_pic[0] = cpic;
+
+ decoded_pic_check_reference(pic);
+ pic->img = img;
+ pic->lock_counter = 1;
+
+ return pic;
+}
+
+void decoded_pic_check_reference(struct decoded_picture *pic)
+{
+ int i;
+ for(i = 0; i < 2; i++) {
+ struct coded_picture *cpic = pic->coded_pic[i];
+ if(cpic && (cpic->flag_mask & REFERENCE)) {
+ // FIXME: this assumes Top Field First!
+ if(i == 0) {
+ pic->top_is_reference = cpic->slc_nal->slc.field_pic_flag
+ ? (cpic->slc_nal->slc.bottom_field_flag ? 0 : 1) : 1;
+ }
+
+ pic->bottom_is_reference = cpic->slc_nal->slc.field_pic_flag
+ ? (cpic->slc_nal->slc.bottom_field_flag ? 1 : 0) : 1;
+ }
+ }
+}
+
+void decoded_pic_add_field(struct decoded_picture *pic,
+ struct coded_picture *cpic)
+{
+ pic->coded_pic[1] = cpic;
+
+ decoded_pic_check_reference(pic);
+}
+
+void release_decoded_picture(struct decoded_picture *pic)
+{
+ if(!pic)
+ return;
+
+ pic->lock_counter--;
+ //printf("release decoded picture: %p (%d)\n", pic, pic->lock_counter);
+
+ if(pic->lock_counter <= 0) {
+ free_decoded_picture(pic);
+ }
+}
+
+void lock_decoded_picture(struct decoded_picture *pic)
+{
+ if(!pic)
+ return;
+
+ pic->lock_counter++;
+ //printf("lock decoded picture: %p (%d)\n", pic, pic->lock_counter);
+}
+
+void free_decoded_picture(struct decoded_picture *pic)
+{
+ if(!pic)
+ return;
+
+ if(pic->img != NULL) {
+ pic->img->free(pic->img);
+ }
+
+ free_coded_picture(pic->coded_pic[1]);
+ free_coded_picture(pic->coded_pic[0]);
+ pic->coded_pic[0] = NULL;
+ pic->coded_pic[1] = NULL;
+ free(pic);
+}
+
+
+
+
+/**
+ * ----------------------------------------------------------------------------
+ * dpb code starting here
+ * ----------------------------------------------------------------------------
+ */
+
+struct dpb* create_dpb(void)
+{
+ struct dpb *dpb = calloc(1, sizeof(struct dpb));
+
+ dpb->output_list = xine_list_new();
+ dpb->reference_list = xine_list_new();
+
+ dpb->max_reorder_frames = MAX_DPB_COUNT;
+ dpb->max_dpb_frames = MAX_DPB_COUNT;
+
+ return dpb;
+}
+
+int dpb_total_frames(struct dpb *dpb)
+{
+ int num_frames = xine_list_size(dpb->output_list);
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while(ite) {
+ struct decoded_picture *pic = xine_list_get_value(dpb->reference_list, ite);
+ if (xine_list_find(dpb->output_list, pic) == NULL) {
+ num_frames++;
+ }
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return num_frames;
+}
+
+void release_dpb(struct dpb *dpb)
+{
+ if(!dpb)
+ return;
+
+ dpb_free_all(dpb);
+
+ xine_list_delete(dpb->output_list);
+ xine_list_delete(dpb->reference_list);
+
+ free(dpb);
+}
+
+struct decoded_picture* dpb_get_next_out_picture(struct dpb *dpb, int do_flush)
+{
+ struct decoded_picture *pic = NULL;;
+ struct decoded_picture *outpic = NULL;
+
+ if(!do_flush &&
+ xine_list_size(dpb->output_list) < dpb->max_reorder_frames &&
+ dpb_total_frames(dpb) < dpb->max_dpb_frames) {
+ return NULL;
+ }
+
+ xine_list_iterator_t ite = xine_list_back(dpb->output_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->output_list, ite);
+
+ int32_t out_top_field_order_cnt = outpic != NULL ?
+ outpic->coded_pic[0]->top_field_order_cnt : 0;
+ int32_t top_field_order_cnt = pic->coded_pic[0]->top_field_order_cnt;
+
+ int32_t out_bottom_field_order_cnt = outpic != NULL ?
+ (outpic->coded_pic[1] != NULL ?
+ outpic->coded_pic[1]->bottom_field_order_cnt :
+ outpic->coded_pic[0]->top_field_order_cnt) : 0;
+ int32_t bottom_field_order_cnt = pic->coded_pic[1] != NULL ?
+ pic->coded_pic[1]->bottom_field_order_cnt :
+ pic->coded_pic[0]->top_field_order_cnt;
+
+ if (outpic == NULL ||
+ (top_field_order_cnt <= out_top_field_order_cnt &&
+ bottom_field_order_cnt <= out_bottom_field_order_cnt) ||
+ (out_top_field_order_cnt <= 0 && top_field_order_cnt > 0 &&
+ out_bottom_field_order_cnt <= 0 && bottom_field_order_cnt > 0) ||
+ outpic->coded_pic[0]->flag_mask & IDR_PIC) {
+ outpic = pic;
+ }
+
+ ite = xine_list_prev(dpb->output_list, ite);
+ }
+
+ return outpic;
+}
+
+struct decoded_picture* dpb_get_picture(struct dpb *dpb, uint32_t picnum)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ if ((pic->coded_pic[0]->pic_num == picnum ||
+ (pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->pic_num == picnum))) {
+ return pic;
+ }
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return NULL;
+}
+
+struct decoded_picture* dpb_get_picture_by_ltpn(struct dpb *dpb,
+ uint32_t longterm_picnum)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ if (pic->coded_pic[0]->long_term_pic_num == longterm_picnum ||
+ (pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->long_term_pic_num == longterm_picnum)) {
+ return pic;
+ }
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return NULL;
+}
+
+struct decoded_picture* dpb_get_picture_by_ltidx(struct dpb *dpb,
+ uint32_t longterm_idx)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ if (pic->coded_pic[0]->long_term_frame_idx == longterm_idx ||
+ (pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->long_term_frame_idx == longterm_idx)) {
+ return pic;
+ }
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return NULL;
+}
+
+int dpb_set_unused_ref_picture_byltpn(struct dpb *dpb, uint32_t longterm_picnum)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ uint8_t found = 0;
+
+ if (pic->coded_pic[0]->long_term_pic_num == longterm_picnum) {
+ pic->coded_pic[0]->used_for_long_term_ref = 0;
+ found = 1;
+ }
+
+ if ((pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->long_term_pic_num == longterm_picnum)) {
+ pic->coded_pic[1]->used_for_long_term_ref = 0;
+ found = 1;
+ }
+
+ if(found && !pic->coded_pic[0]->used_for_long_term_ref &&
+ (pic->coded_pic[1] == NULL ||
+ !pic->coded_pic[1]->used_for_long_term_ref)) {
+ dpb_unmark_reference_picture(dpb, pic);
+ }
+
+ if (found)
+ return 0;
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return -1;
+}
+
+int dpb_set_unused_ref_picture_bylidx(struct dpb *dpb, uint32_t longterm_idx)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ uint8_t found = 0;
+
+ if (pic->coded_pic[0]->long_term_frame_idx == longterm_idx) {
+ pic->coded_pic[0]->used_for_long_term_ref = 0;
+ found = 1;
+ }
+
+ if ((pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->long_term_frame_idx == longterm_idx)) {
+ pic->coded_pic[1]->used_for_long_term_ref = 0;
+ found = 1;
+ }
+
+ if(found && !pic->coded_pic[0]->used_for_long_term_ref &&
+ (pic->coded_pic[1] == NULL ||
+ !pic->coded_pic[1]->used_for_long_term_ref)) {
+ dpb_unmark_reference_picture(dpb, pic);
+ }
+
+ if (found)
+ return 0;
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return -1;
+}
+
+int dpb_set_unused_ref_picture_lidx_gt(struct dpb *dpb, int32_t longterm_idx)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ uint8_t found = 0;
+
+ if (pic->coded_pic[0]->long_term_frame_idx >= longterm_idx) {
+ pic->coded_pic[0]->used_for_long_term_ref = 0;
+ found = 1;
+ }
+
+ if ((pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->long_term_frame_idx >= longterm_idx)) {
+ pic->coded_pic[1]->used_for_long_term_ref = 0;
+ found = 1;
+ }
+
+ if(found && !pic->coded_pic[0]->used_for_long_term_ref &&
+ (pic->coded_pic[1] == NULL ||
+ !pic->coded_pic[1]->used_for_long_term_ref)) {
+ dpb_unmark_reference_picture(dpb, pic);
+ }
+
+ ite = xine_list_next(dpb->reference_list, ite);
+ }
+
+ return -1;
+}
+
+
+int dpb_unmark_picture_delayed(struct dpb *dpb, struct decoded_picture *pic)
+{
+ if(!pic)
+ return -1;
+
+ xine_list_iterator_t ite = xine_list_find(dpb->output_list, pic);
+ if (ite) {
+ xine_list_remove(dpb->output_list, ite);
+ release_decoded_picture(pic);
+
+ return 0;
+ }
+
+ return -1;
+}
+
+int dpb_unmark_reference_picture(struct dpb *dpb, struct decoded_picture *pic)
+{
+ if(!pic)
+ return -1;
+
+ xine_list_iterator_t ite = xine_list_find(dpb->reference_list, pic);
+ if (ite) {
+ xine_list_remove(dpb->reference_list, ite);
+ release_decoded_picture(pic);
+
+ return 0;
+ }
+
+ return -1;
+}
+
+/*static int dpb_remove_picture_by_img(struct dpb *dpb, vo_frame_t *remimg)
+{
+ int retval = -1;
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->output_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->output_list, ite);
+
+ if (pic->img == remimg) {
+ dpb_unmark_picture_delayed(dpb, pic);
+ dpb->used--;
+ retval = 0;
+ }
+
+ ite = xine_list_next(dpb->output_list, ite);
+ }
+
+ return retval;
+}*/
+
+
+int dpb_add_picture(struct dpb *dpb, struct decoded_picture *pic, uint32_t num_ref_frames)
+{
+#if 0
+ /* this should never happen */
+ pic->img->lock(pic->img);
+ if (0 == dpb_remove_picture_by_img(dpb, pic->img))
+ lprintf("H264/DPB broken stream: current img was already in dpb -- freed it\n");
+ else
+ pic->img->free(pic->img);
+#endif
+
+ /* add the pic to the output picture list, as no
+ * pic would be immediately drawn.
+ * acquire a lock for this list
+ */
+ lock_decoded_picture(pic);
+ xine_list_push_back(dpb->output_list, pic);
+
+
+ /* check if the pic is a reference pic,
+ * if it is it should be added to the reference
+ * list. another lock has to be acquired in that case
+ */
+ if (pic->coded_pic[0]->flag_mask & REFERENCE ||
+ (pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->flag_mask & REFERENCE)) {
+ lock_decoded_picture(pic);
+ xine_list_push_back(dpb->reference_list, pic);
+
+ /*
+ * always apply the sliding window reference removal, if more reference
+ * frames than expected are in the list. we will always remove the oldest
+ * reference frame
+ */
+ if(xine_list_size(dpb->reference_list) > num_ref_frames) {
+ struct decoded_picture *discard = xine_list_get_value(dpb->reference_list, xine_list_front(dpb->reference_list));
+ dpb_unmark_reference_picture(dpb, discard);
+ }
+ }
+
+#if DEBUG_DPB
+ printf("DPB list sizes: Total: %2d, Output: %2d, Reference: %2d\n",
+ dpb_total_frames(dpb), xine_list_size(dpb->output_list),
+ xine_list_size(dpb->reference_list));
+#endif
+
+ return 0;
+}
+
+int dpb_flush(struct dpb *dpb)
+{
+ struct decoded_picture *pic = NULL;
+
+ xine_list_iterator_t ite = xine_list_front(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+
+ dpb_unmark_reference_picture(dpb, pic);
+
+ /* CAUTION: xine_list_next would return an item, but not the one we
+ * expect, as the current one was deleted
+ */
+ ite = xine_list_front(dpb->reference_list);
+ }
+
+ return 0;
+}
+
+void dpb_free_all(struct dpb *dpb)
+{
+ xine_list_iterator_t ite = xine_list_front(dpb->output_list);
+ while(ite) {
+ dpb_unmark_picture_delayed(dpb, xine_list_get_value(dpb->output_list, ite));
+ /* CAUTION: xine_list_next would return an item, but not the one we
+ * expect, as the current one was deleted
+ */
+ ite = xine_list_front(dpb->output_list);
+ }
+
+ ite = xine_list_front(dpb->reference_list);
+ while(ite) {
+ dpb_unmark_reference_picture(dpb, xine_list_get_value(dpb->reference_list, ite));
+ /* CAUTION: xine_list_next would return an item, but not the one we
+ * expect, as the current one was deleted
+ */
+ ite = xine_list_front(dpb->reference_list);
+ }
+}
+
+void dpb_clear_all_pts(struct dpb *dpb)
+{
+ xine_list_iterator_t ite = xine_list_front(dpb->output_list);
+ while(ite) {
+ struct decoded_picture *pic = xine_list_get_value(dpb->output_list, ite);
+ pic->img->pts = 0;
+
+ ite = xine_list_next(dpb->output_list, ite);
+ }
+}
+
+int fill_vdpau_reference_list(struct dpb *dpb, VdpReferenceFrameH264 *reflist)
+{
+ struct decoded_picture *pic = NULL;
+
+ int i = 0;
+ int used_refframes = 0;
+
+ xine_list_iterator_t ite = xine_list_back(dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(dpb->reference_list, ite);
+ reflist[i].surface = ((vdpau_accel_t*)pic->img->accel_data)->surface;
+ reflist[i].is_long_term = pic->coded_pic[0]->used_for_long_term_ref ||
+ (pic->coded_pic[1] != NULL && pic->coded_pic[1]->used_for_long_term_ref);
+
+ reflist[i].frame_idx = pic->coded_pic[0]->used_for_long_term_ref ?
+ pic->coded_pic[0]->long_term_pic_num :
+ pic->coded_pic[0]->slc_nal->slc.frame_num;
+ reflist[i].top_is_reference = pic->top_is_reference;
+ reflist[i].bottom_is_reference = pic->bottom_is_reference;
+ reflist[i].field_order_cnt[0] = pic->coded_pic[0]->top_field_order_cnt;
+ reflist[i].field_order_cnt[1] = pic->coded_pic[1] != NULL ?
+ pic->coded_pic[1]->bottom_field_order_cnt :
+ pic->coded_pic[0]->bottom_field_order_cnt;
+ i++;
+
+ ite = xine_list_prev(dpb->reference_list, ite);
+ }
+
+ used_refframes = i;
+
+ // fill all other frames with invalid handles
+ while(i < 16) {
+ reflist[i].bottom_is_reference = VDP_FALSE;
+ reflist[i].top_is_reference = VDP_FALSE;
+ reflist[i].frame_idx = 0;
+ reflist[i].is_long_term = VDP_FALSE;
+ reflist[i].surface = VDP_INVALID_HANDLE;
+ reflist[i].field_order_cnt[0] = 0;
+ reflist[i].field_order_cnt[1] = 0;
+ i++;
+ }
+
+ return used_refframes;
+}
diff --git a/src/video_dec/libvdpau/dpb.h b/src/video_dec/libvdpau/dpb.h
new file mode 100644
index 000000000..356bcbf70
--- /dev/null
+++ b/src/video_dec/libvdpau/dpb.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * dpb.h: Decoded Picture Buffer
+ */
+
+#ifndef DPB_H_
+#define DPB_H_
+
+#define MAX_DPB_COUNT 16
+
+#include "nal.h"
+#include "cpb.h"
+#include <xine/video_out.h>
+#include <xine/list.h>
+
+#define USED_FOR_REF (top_is_reference || bottom_is_reference)
+
+/**
+ * ----------------------------------------------------------------------------
+ * decoded picture
+ * ----------------------------------------------------------------------------
+ */
+
+struct decoded_picture {
+ vo_frame_t *img; /* this is the image we block, to make sure
+ * the surface is not double-used */
+
+ /**
+ * a decoded picture always contains a whole frame,
+ * respective a field pair, so it can contain up to
+ * 2 coded pics
+ */
+ struct coded_picture *coded_pic[2];
+
+ int32_t frame_num_wrap;
+
+ uint8_t top_is_reference;
+ uint8_t bottom_is_reference;
+
+ uint32_t lock_counter;
+};
+
+struct decoded_picture* init_decoded_picture(struct coded_picture *cpic,
+ vo_frame_t *img);
+void release_decoded_picture(struct decoded_picture *pic);
+void lock_decoded_picture(struct decoded_picture *pic);
+void decoded_pic_check_reference(struct decoded_picture *pic);
+void decoded_pic_add_field(struct decoded_picture *pic,
+ struct coded_picture *cpic);
+
+
+/**
+ * ----------------------------------------------------------------------------
+ * dpb code starting here
+ * ----------------------------------------------------------------------------
+ */
+
+/* Decoded Picture Buffer */
+struct dpb {
+ xine_list_t *reference_list;
+ xine_list_t *output_list;
+
+ int max_reorder_frames;
+ int max_dpb_frames;
+};
+
+struct dpb* create_dpb(void);
+void release_dpb(struct dpb *dpb);
+
+/**
+ * calculates the total number of frames in the dpb
+ * when frames are used for reference and are not drawn
+ * yet the result would be less then reference_list-size+
+ * output_list-size
+ */
+int dpb_total_frames(struct dpb *dpb);
+
+struct decoded_picture* dpb_get_next_out_picture(struct dpb *dpb, int do_flush);
+
+struct decoded_picture* dpb_get_picture(struct dpb *dpb, uint32_t picnum);
+struct decoded_picture* dpb_get_picture_by_ltpn(struct dpb *dpb, uint32_t longterm_picnum);
+struct decoded_picture* dpb_get_picture_by_ltidx(struct dpb *dpb, uint32_t longterm_idx);
+
+int dpb_set_unused_ref_picture_byltpn(struct dpb *dpb, uint32_t longterm_picnum);
+int dpb_set_unused_ref_picture_bylidx(struct dpb *dpb, uint32_t longterm_idx);
+int dpb_set_unused_ref_picture_lidx_gt(struct dpb *dpb, int32_t longterm_idx);
+
+int dpb_unmark_picture_delayed(struct dpb *dpb, struct decoded_picture *pic);
+int dpb_unmark_reference_picture(struct dpb *dpb, struct decoded_picture *pic);
+
+int dpb_add_picture(struct dpb *dpb, struct decoded_picture *pic, uint32_t num_ref_frames);
+int dpb_flush(struct dpb *dpb);
+void dpb_free_all(struct dpb *dpb);
+void dpb_clear_all_pts(struct dpb *dpb);
+
+int fill_vdpau_reference_list(struct dpb *dpb, VdpReferenceFrameH264 *reflist);
+
+int dp_top_field_first(struct decoded_picture *decoded_pic);
+
+#endif /* DPB_H_ */
diff --git a/src/video_dec/libvdpau/h264_parser.c b/src/video_dec/libvdpau/h264_parser.c
new file mode 100644
index 000000000..d495bf483
--- /dev/null
+++ b/src/video_dec/libvdpau/h264_parser.c
@@ -0,0 +1,2038 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * h264_parser.c: Almost full-features H264 NAL-Parser
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+#include "h264_parser.h"
+#include "nal.h"
+#include "cpb.h"
+
+/* default scaling_lists according to Table 7-2 */
+uint8_t default_4x4_intra[16] = { 6, 13, 13, 20, 20, 20, 28, 28, 28, 28, 32,
+ 32, 32, 37, 37, 42 };
+
+uint8_t default_4x4_inter[16] = { 10, 14, 14, 20, 20, 20, 24, 24, 24, 24, 27,
+ 27, 27, 30, 30, 34 };
+
+uint8_t default_8x8_intra[64] = { 6, 10, 10, 13, 11, 13, 16, 16, 16, 16, 18,
+ 18, 18, 18, 18, 23, 23, 23, 23, 23, 23, 25, 25, 25, 25, 25, 25, 25, 27, 27,
+ 27, 27, 27, 27, 27, 27, 29, 29, 29, 29, 29, 29, 29, 31, 31, 31, 31, 31, 31,
+ 33, 33, 33, 33, 33, 36, 36, 36, 36, 38, 38, 38, 40, 40, 42 };
+
+uint8_t default_8x8_inter[64] = { 9, 13, 13, 15, 13, 15, 17, 17, 17, 17, 19,
+ 19, 19, 19, 19, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 24, 24,
+ 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 27, 27, 27, 27, 27, 27,
+ 28, 28, 28, 28, 28, 30, 30, 30, 30, 32, 32, 32, 33, 33, 35 };
+
+struct buf_reader
+{
+ uint8_t *buf;
+ uint8_t *cur_pos;
+ int len;
+ int cur_offset;
+};
+
+struct h264_parser* init_parser();
+
+static inline uint32_t read_bits(struct buf_reader *buf, int len);
+uint32_t read_exp_golomb(struct buf_reader *buf);
+int32_t read_exp_golomb_s(struct buf_reader *buf);
+
+void calculate_pic_order(struct h264_parser *parser, struct coded_picture *pic,
+ struct slice_header *slc);
+void skip_scaling_list(struct buf_reader *buf, int size);
+void parse_scaling_list(struct buf_reader *buf, uint8_t *scaling_list,
+ int length, int index);
+
+struct nal_unit* parse_nal_header(struct buf_reader *buf,
+ struct coded_picture *pic, struct h264_parser *parser);
+static void sps_scaling_list_fallback(struct seq_parameter_set_rbsp *sps,
+ int i);
+static void pps_scaling_list_fallback(struct seq_parameter_set_rbsp *sps,
+ struct pic_parameter_set_rbsp *pps, int i);
+
+uint8_t parse_sps(struct buf_reader *buf, struct seq_parameter_set_rbsp *sps);
+void interpret_sps(struct coded_picture *pic, struct h264_parser *parser);
+
+void parse_vui_parameters(struct buf_reader *buf,
+ struct seq_parameter_set_rbsp *sps);
+void parse_hrd_parameters(struct buf_reader *buf, struct hrd_parameters *hrd);
+
+uint8_t parse_pps(struct buf_reader *buf, struct pic_parameter_set_rbsp *pps);
+void interpret_pps(struct coded_picture *pic);
+
+void parse_sei(struct buf_reader *buf, struct sei_message *sei,
+ struct h264_parser *parser);
+void interpret_sei(struct coded_picture *pic);
+
+uint8_t parse_slice_header(struct buf_reader *buf, struct nal_unit *slc_nal,
+ struct h264_parser *parser);
+void interpret_slice_header(struct h264_parser *parser, struct nal_unit *slc_nal);
+
+void parse_ref_pic_list_reordering(struct buf_reader *buf,
+ struct slice_header *slc);
+
+void calculate_pic_nums(struct h264_parser *parser, struct coded_picture *cpic);
+void execute_ref_pic_marking(struct coded_picture *cpic,
+ uint32_t memory_management_control_operation,
+ uint32_t marking_nr,
+ struct h264_parser *parser);
+void parse_pred_weight_table(struct buf_reader *buf, struct slice_header *slc,
+ struct h264_parser *parser);
+void parse_dec_ref_pic_marking(struct buf_reader *buf,
+ struct nal_unit *slc_nal);
+
+/* here goes the parser implementation */
+
+static void decode_nal(uint8_t **ret, int *len_ret, uint8_t *buf, int buf_len)
+{
+ // TODO: rework without copying
+ uint8_t *end = &buf[buf_len];
+ uint8_t *pos = malloc(buf_len);
+
+ *ret = pos;
+ while (buf < end) {
+ if (buf < end - 3 && buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0x03) {
+
+ *pos++ = 0x00;
+ *pos++ = 0x00;
+
+ buf += 3;
+ continue;
+ }
+ *pos++ = *buf++;
+ }
+
+ *len_ret = pos - *ret;
+}
+
+#if 0
+static inline void dump_bits(const char *label, const struct buf_reader *buf, int bits)
+{
+ struct buf_reader lbuf;
+ memcpy(&lbuf, buf, sizeof(struct buf_reader));
+
+ int i;
+ printf("%s: 0b", label);
+ for(i=0; i < bits; i++)
+ printf("%d", read_bits(&lbuf, 1));
+ printf("\n");
+}
+#endif
+
+/**
+ * @return total number of bits read by the buf_reader
+ */
+static inline uint32_t bits_read(struct buf_reader *buf)
+{
+ int bits_read = 0;
+ bits_read = (buf->cur_pos - buf->buf)*8;
+ bits_read += (8-buf->cur_offset);
+
+ return bits_read;
+}
+
+/* skips stuffing bytes in the buf_reader */
+static inline void skip_emulation_prevention_three_byte(struct buf_reader *buf)
+{
+ if(buf->cur_pos - buf->buf > 2 &&
+ *(buf->cur_pos-2) == 0x00 &&
+ *(buf->cur_pos-1) == 0x00 &&
+ *buf->cur_pos == 0x03) {
+ buf->cur_pos++;
+ }
+}
+
+/*
+ * read len bits from the buffer and return them
+ * @return right aligned bits
+ */
+static inline uint32_t read_bits(struct buf_reader *buf, int len)
+{
+ static uint32_t i_mask[33] = { 0x00, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f,
+ 0x7f, 0xff, 0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff,
+ 0x1ffff, 0x3ffff, 0x7ffff, 0xfffff, 0x1fffff, 0x3fffff, 0x7fffff,
+ 0xffffff, 0x1ffffff, 0x3ffffff, 0x7ffffff, 0xfffffff, 0x1fffffff,
+ 0x3fffffff, 0x7fffffff, 0xffffffff };
+
+ int i_shr;
+ uint32_t bits = 0;
+
+ while (len > 0 && (buf->cur_pos - buf->buf) < buf->len) {
+ if ((i_shr = buf->cur_offset - len) >= 0) {
+ bits |= (*buf->cur_pos >> i_shr) & i_mask[len];
+ buf->cur_offset -= len;
+ if (buf->cur_offset == 0) {
+ buf->cur_pos++;
+ buf->cur_offset = 8;
+
+ skip_emulation_prevention_three_byte(buf);
+ }
+ return bits;
+ }
+ else {
+ bits |= (*buf->cur_pos & i_mask[buf->cur_offset]) << -i_shr;
+ len -= buf->cur_offset;
+ buf->cur_pos++;
+ buf->cur_offset = 8;
+
+ skip_emulation_prevention_three_byte(buf);
+ }
+ }
+ return bits;
+}
+
+/* determines if following bits are rtsb_trailing_bits */
+static inline int rbsp_trailing_bits(uint8_t *buf, int buf_len)
+{
+ uint8_t *cur_buf = buf+(buf_len-1);
+ uint8_t cur_val;
+ int parsed_bits = 0;
+ int i;
+
+ while(buf_len > 0) {
+ cur_val = *cur_buf;
+ for(i = 0; i < 9; i++) {
+ if (cur_val&1)
+ return parsed_bits+i;
+ cur_val>>=1;
+ }
+ parsed_bits += 8;
+ cur_buf--;
+ }
+
+ lprintf("rbsp trailing bits could not be found\n");
+ return 0;
+}
+
+uint32_t read_exp_golomb(struct buf_reader *buf)
+{
+ int leading_zero_bits = 0;
+
+ while (read_bits(buf, 1) == 0 && leading_zero_bits < 32)
+ leading_zero_bits++;
+
+ uint32_t code = (1 << leading_zero_bits) - 1 + read_bits(buf,
+ leading_zero_bits);
+ return code;
+}
+
+int32_t read_exp_golomb_s(struct buf_reader *buf)
+{
+ uint32_t ue = read_exp_golomb(buf);
+ int32_t code = ue & 0x01 ? (ue + 1) / 2 : -(ue / 2);
+ return code;
+}
+
+
+/**
+ * parses the NAL header data and calls the subsequent
+ * parser methods that handle specific NAL units
+ */
+struct nal_unit* parse_nal_header(struct buf_reader *buf,
+ struct coded_picture *pic, struct h264_parser *parser)
+{
+ if (buf->len < 1)
+ return NULL;
+
+
+ struct nal_unit *nal = create_nal_unit();
+
+ nal->nal_ref_idc = (buf->buf[0] >> 5) & 0x03;
+ nal->nal_unit_type = buf->buf[0] & 0x1f;
+
+ buf->cur_pos = buf->buf + 1;
+ //lprintf("NAL: %d\n", nal->nal_unit_type);
+
+ struct buf_reader ibuf;
+ ibuf.cur_offset = 8;
+
+ switch (nal->nal_unit_type) {
+ case NAL_SPS:
+ parse_sps(buf, &nal->sps);
+ break;
+ case NAL_PPS:
+ parse_pps(buf, &nal->pps);
+ break;
+ case NAL_SLICE:
+ case NAL_PART_A:
+ case NAL_PART_B:
+ case NAL_PART_C:
+ case NAL_SLICE_IDR:
+ parse_slice_header(buf, nal, parser);
+ break;
+ case NAL_SEI:
+ memset(&(nal->sei), 0x00, sizeof(struct sei_message));
+ parse_sei(buf, &nal->sei, parser);
+ break;
+ default:
+ break;
+ }
+
+ return nal;
+}
+
+/**
+ * calculates the picture order count according to ITU-T Rec. H.264 (11/2007)
+ * chapter 8.2.1, p104f
+ */
+void calculate_pic_order(struct h264_parser *parser, struct coded_picture *pic,
+ struct slice_header *slc)
+{
+ /* retrieve sps and pps from the buffers */
+ struct nal_unit *pps_nal =
+ nal_buffer_get_by_pps_id(parser->pps_buffer, slc->pic_parameter_set_id);
+
+ if (pps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: calculate_pic_order: pic_parameter_set_id %d not found in buffers\n",
+ slc->pic_parameter_set_id);
+ return;
+ }
+
+ struct pic_parameter_set_rbsp *pps = &pps_nal->pps;
+
+ struct nal_unit *sps_nal =
+ nal_buffer_get_by_sps_id(parser->sps_buffer, pps->seq_parameter_set_id);
+
+ if (sps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: calculate_pic_order: seq_parameter_set_id %d not found in buffers\n",
+ pps->seq_parameter_set_id);
+ return;
+ }
+
+ struct seq_parameter_set_rbsp *sps = &sps_nal->sps;
+
+ if (sps->pic_order_cnt_type == 0) {
+
+ if (pic->flag_mask & IDR_PIC) {
+ parser->prev_pic_order_cnt_lsb = 0;
+ parser->prev_pic_order_cnt_msb = 0;
+
+
+ // FIXME
+ parser->frame_num_offset = 0;
+ }
+
+ const int max_poc_lsb = 1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+ uint32_t pic_order_cnt_msb = 0;
+
+ if (slc->pic_order_cnt_lsb < parser->prev_pic_order_cnt_lsb
+ && parser->prev_pic_order_cnt_lsb - slc->pic_order_cnt_lsb
+ >= max_poc_lsb / 2)
+ pic_order_cnt_msb = parser->prev_pic_order_cnt_msb + max_poc_lsb;
+ else if (slc->pic_order_cnt_lsb > parser->prev_pic_order_cnt_lsb
+ && parser->prev_pic_order_cnt_lsb - slc->pic_order_cnt_lsb
+ < -max_poc_lsb / 2)
+ pic_order_cnt_msb = parser->prev_pic_order_cnt_msb - max_poc_lsb;
+ else
+ pic_order_cnt_msb = parser->prev_pic_order_cnt_msb;
+
+ if(!slc->field_pic_flag || !slc->bottom_field_flag) {
+ pic->top_field_order_cnt = pic_order_cnt_msb + slc->pic_order_cnt_lsb;
+ parser->prev_top_field_order_cnt = pic->top_field_order_cnt;
+ }
+
+ if (pic->flag_mask & REFERENCE) {
+ parser->prev_pic_order_cnt_msb = pic_order_cnt_msb;
+ }
+
+ pic->bottom_field_order_cnt = 0;
+
+ if(!slc->field_pic_flag)
+ pic->bottom_field_order_cnt = pic->top_field_order_cnt + slc->delta_pic_order_cnt_bottom;
+ else //if(slc->bottom_field_flag) //TODO: this is not spec compliant, but works...
+ pic->bottom_field_order_cnt = pic_order_cnt_msb + slc->pic_order_cnt_lsb;
+
+ if(slc->field_pic_flag && slc->bottom_field_flag)
+ pic->top_field_order_cnt = parser->prev_top_field_order_cnt;
+
+ } else if (sps->pic_order_cnt_type == 2) {
+ uint32_t prev_frame_num = parser->last_vcl_nal ? parser->last_vcl_nal->slc.frame_num : 0;
+ uint32_t prev_frame_num_offset = parser->frame_num_offset;
+ uint32_t temp_pic_order_cnt = 0;
+
+ if (parser->pic->flag_mask & IDR_PIC)
+ parser->frame_num_offset = 0;
+ else if (prev_frame_num > slc->frame_num)
+ parser->frame_num_offset = prev_frame_num_offset + sps->max_frame_num;
+ else
+ parser->frame_num_offset = prev_frame_num_offset;
+
+ if(parser->pic->flag_mask & IDR_PIC)
+ temp_pic_order_cnt = 0;
+ else if(!(parser->pic->flag_mask & REFERENCE))
+ temp_pic_order_cnt = 2 * (parser->frame_num_offset + slc->frame_num)-1;
+ else
+ temp_pic_order_cnt = 2 * (parser->frame_num_offset + slc->frame_num);
+
+ if(!slc->field_pic_flag)
+ pic->top_field_order_cnt = pic->bottom_field_order_cnt = temp_pic_order_cnt;
+ else if(slc->bottom_field_flag)
+ pic->bottom_field_order_cnt = temp_pic_order_cnt;
+ else
+ pic->top_field_order_cnt = temp_pic_order_cnt;
+
+ } else {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "FIXME: Unsupported poc_type: %d\n", sps->pic_order_cnt_type);
+ }
+}
+
+void skip_scaling_list(struct buf_reader *buf, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ read_exp_golomb_s(buf);
+ }
+}
+
+void parse_scaling_list(struct buf_reader *buf, uint8_t *scaling_list,
+ int length, int index)
+{
+ int last_scale = 8;
+ int next_scale = 8;
+ int32_t delta_scale;
+ uint8_t use_default_scaling_matrix_flag = 0;
+ int i;
+
+ const uint8_t *zigzag = (length==64) ? zigzag_8x8 : zigzag_4x4;
+
+ for (i = 0; i < length; i++) {
+ if (next_scale != 0) {
+ delta_scale = read_exp_golomb_s(buf);
+ next_scale = (last_scale + delta_scale + 256) % 256;
+ if (i == 0 && next_scale == 0) {
+ use_default_scaling_matrix_flag = 1;
+ break;
+ }
+ }
+ scaling_list[zigzag[i]] = last_scale = (next_scale == 0) ? last_scale : next_scale;
+ }
+
+ if (use_default_scaling_matrix_flag) {
+ switch (index) {
+ case 0:
+ case 1:
+ case 2: {
+ for(i = 0; i < sizeof(default_4x4_intra); i++) {
+ scaling_list[zigzag_4x4[i]] = default_4x4_intra[i];
+ }
+ //memcpy(scaling_list, default_4x4_intra, sizeof(default_4x4_intra));
+ break;
+ }
+ case 3:
+ case 4:
+ case 5: {
+ for(i = 0; i < sizeof(default_4x4_inter); i++) {
+ scaling_list[zigzag_4x4[i]] = default_4x4_inter[i];
+ }
+ //memcpy(scaling_list, default_4x4_inter, sizeof(default_4x4_inter));
+ break;
+ }
+ case 6: {
+ for(i = 0; i < sizeof(default_8x8_intra); i++) {
+ scaling_list[zigzag_8x8[i]] = default_8x8_intra[i];
+ }
+ //memcpy(scaling_list, default_8x8_intra, sizeof(default_8x8_intra));
+ break;
+ }
+ case 7: {
+ for(i = 0; i < sizeof(default_8x8_inter); i++) {
+ scaling_list[zigzag_8x8[i]] = default_8x8_inter[i];
+ }
+ //memcpy(scaling_list, default_8x8_inter, sizeof(default_8x8_inter));
+ break;
+ }
+ }
+ }
+}
+
+static void sps_scaling_list_fallback(struct seq_parameter_set_rbsp *sps, int i)
+{
+ int j;
+ switch (i) {
+ case 0: {
+ for(j = 0; j < sizeof(default_4x4_intra); j++) {
+ sps->scaling_lists_4x4[i][zigzag_4x4[j]] = default_4x4_intra[j];
+ }
+ //memcpy(sps->scaling_lists_4x4[i], default_4x4_intra, sizeof(sps->scaling_lists_4x4[i]));
+ break;
+ }
+ case 3: {
+ for(j = 0; j < sizeof(default_4x4_inter); j++) {
+ sps->scaling_lists_4x4[i][zigzag_4x4[j]] = default_4x4_inter[j];
+ }
+ //memcpy(sps->scaling_lists_4x4[i], default_4x4_inter, sizeof(sps->scaling_lists_4x4[i]));
+ break;
+ }
+ case 1:
+ case 2:
+ case 4:
+ case 5:
+ memcpy(sps->scaling_lists_4x4[i], sps->scaling_lists_4x4[i-1], sizeof(sps->scaling_lists_4x4[i]));
+ break;
+ case 6: {
+ for(j = 0; j < sizeof(default_8x8_intra); j++) {
+ sps->scaling_lists_8x8[i-6][zigzag_8x8[j]] = default_8x8_intra[j];
+ }
+ //memcpy(sps->scaling_lists_8x8[i-6], default_8x8_intra, sizeof(sps->scaling_lists_8x8[i-6]));
+ break;
+ }
+ case 7: {
+ for(j = 0; j < sizeof(default_8x8_inter); j++) {
+ sps->scaling_lists_8x8[i-6][zigzag_8x8[j]] = default_8x8_inter[j];
+ }
+ //memcpy(sps->scaling_lists_8x8[i-6], default_8x8_inter, sizeof(sps->scaling_lists_8x8[i-6]));
+ break;
+ }
+
+ }
+}
+
+static void pps_scaling_list_fallback(struct seq_parameter_set_rbsp *sps, struct pic_parameter_set_rbsp *pps, int i)
+{
+ switch (i) {
+ case 0:
+ case 3:
+ memcpy(pps->scaling_lists_4x4[i], sps->scaling_lists_4x4[i], sizeof(pps->scaling_lists_4x4[i]));
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 5:
+ memcpy(pps->scaling_lists_4x4[i], pps->scaling_lists_4x4[i-1], sizeof(pps->scaling_lists_4x4[i]));
+ break;
+ case 6:
+ case 7:
+ memcpy(pps->scaling_lists_8x8[i-6], sps->scaling_lists_8x8[i-6], sizeof(pps->scaling_lists_8x8[i-6]));
+ break;
+
+ }
+}
+
+
+uint8_t parse_sps(struct buf_reader *buf, struct seq_parameter_set_rbsp *sps)
+{
+ sps->profile_idc = read_bits(buf, 8);
+ sps->constraint_setN_flag = read_bits(buf, 4);
+ read_bits(buf, 4);
+ sps->level_idc = read_bits(buf, 8);
+
+ sps->seq_parameter_set_id = read_exp_golomb(buf);
+
+ memset(sps->scaling_lists_4x4, 16, sizeof(sps->scaling_lists_4x4));
+ memset(sps->scaling_lists_8x8, 16, sizeof(sps->scaling_lists_8x8));
+ if (sps->profile_idc == 100 || sps->profile_idc == 110 || sps->profile_idc
+ == 122 || sps->profile_idc == 244 || sps->profile_idc == 44 ||
+ sps->profile_idc == 83 || sps->profile_idc == 86) {
+ sps->chroma_format_idc = read_exp_golomb(buf);
+ if (sps->chroma_format_idc == 3) {
+ sps->separate_colour_plane_flag = read_bits(buf, 1);
+ }
+
+ sps->bit_depth_luma_minus8 = read_exp_golomb(buf);
+ sps->bit_depth_chroma_minus8 = read_exp_golomb(buf);
+ sps->qpprime_y_zero_transform_bypass_flag = read_bits(buf, 1);
+ sps->seq_scaling_matrix_present_flag = read_bits(buf, 1);
+ if (sps->seq_scaling_matrix_present_flag) {
+ int i;
+ for (i = 0; i < 8; i++) {
+ sps->seq_scaling_list_present_flag[i] = read_bits(buf, 1);
+
+ if (sps->seq_scaling_list_present_flag[i]) {
+ if (i < 6)
+ parse_scaling_list(buf, sps->scaling_lists_4x4[i], 16, i);
+ else
+ parse_scaling_list(buf, sps->scaling_lists_8x8[i - 6], 64, i);
+ } else {
+ sps_scaling_list_fallback(sps, i);
+ }
+ }
+ }
+ } else
+ sps->chroma_format_idc = 1;
+
+ sps->log2_max_frame_num_minus4 = read_exp_golomb(buf);
+ sps->max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+
+ sps->pic_order_cnt_type = read_exp_golomb(buf);
+ if (!sps->pic_order_cnt_type)
+ sps->log2_max_pic_order_cnt_lsb_minus4 = read_exp_golomb(buf);
+ else if(sps->pic_order_cnt_type == 1) {
+ sps->delta_pic_order_always_zero_flag = read_bits(buf, 1);
+ sps->offset_for_non_ref_pic = read_exp_golomb_s(buf);
+ sps->offset_for_top_to_bottom_field = read_exp_golomb_s(buf);
+ sps->num_ref_frames_in_pic_order_cnt_cycle = read_exp_golomb(buf);
+ int i;
+ for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ sps->offset_for_ref_frame[i] = read_exp_golomb_s(buf);
+ }
+ }
+
+ sps->num_ref_frames = read_exp_golomb(buf);
+ sps->gaps_in_frame_num_value_allowed_flag = read_bits(buf, 1);
+
+ /*sps->pic_width_in_mbs_minus1 = read_exp_golomb(buf);
+ sps->pic_height_in_map_units_minus1 = read_exp_golomb(buf);*/
+ sps->pic_width = 16 * (read_exp_golomb(buf) + 1);
+ sps->pic_height = 16 * (read_exp_golomb(buf) + 1);
+
+ sps->frame_mbs_only_flag = read_bits(buf, 1);
+
+ /* compute the height correctly even for interlaced material */
+ sps->pic_height = (2 - sps->frame_mbs_only_flag) * sps->pic_height;
+ if (sps->pic_height == 1088)
+ sps->pic_height = 1080;
+
+ if (!sps->frame_mbs_only_flag)
+ sps->mb_adaptive_frame_field_flag = read_bits(buf, 1);
+
+ sps->direct_8x8_inference_flag = read_bits(buf, 1);
+ sps->frame_cropping_flag = read_bits(buf, 1);
+ if (sps->frame_cropping_flag) {
+ sps->frame_crop_left_offset = read_exp_golomb(buf);
+ sps->frame_crop_right_offset = read_exp_golomb(buf);
+ sps->frame_crop_top_offset = read_exp_golomb(buf);
+ sps->frame_crop_bottom_offset = read_exp_golomb(buf);
+ }
+ sps->vui_parameters_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters_present_flag) {
+ parse_vui_parameters(buf, sps);
+ }
+
+ return 0;
+}
+
+/* evaluates values parsed by sps and modifies the current
+ * picture according to them
+ */
+void interpret_sps(struct coded_picture *pic, struct h264_parser *parser)
+{
+ if(pic->sps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "WARNING: Picture contains no seq_parameter_set\n");
+ return;
+ }
+
+ struct seq_parameter_set_rbsp *sps = &pic->sps_nal->sps;
+
+ if(sps->vui_parameters_present_flag &&
+ sps->vui_parameters.pic_struct_present_flag) {
+ parser->flag_mask |= PIC_STRUCT_PRESENT;
+ } else {
+ parser->flag_mask &= ~PIC_STRUCT_PRESENT;
+ }
+
+ if(sps->vui_parameters_present_flag &&
+ (sps->vui_parameters.nal_hrd_parameters_present_flag ||
+ sps->vui_parameters.vc1_hrd_parameters_present_flag)) {
+ parser->flag_mask |= CPB_DPB_DELAYS_PRESENT;
+ } else {
+ parser->flag_mask &= ~(CPB_DPB_DELAYS_PRESENT);
+ }
+
+ if(pic->slc_nal != NULL) {
+ struct slice_header *slc = &pic->slc_nal->slc;
+ if (slc->field_pic_flag == 0) {
+ pic->max_pic_num = sps->max_frame_num;
+ parser->curr_pic_num = slc->frame_num;
+ } else {
+ pic->max_pic_num = 2 * sps->max_frame_num;
+ parser->curr_pic_num = 2 * slc->frame_num + 1;
+ }
+ }
+}
+
+void parse_sei(struct buf_reader *buf, struct sei_message *sei,
+ struct h264_parser *parser)
+{
+ uint8_t tmp;
+
+ struct nal_unit *sps_nal =
+ nal_buffer_get_last(parser->sps_buffer);
+
+ if (sps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: parse_sei: seq_parameter_set_id not found in buffers\n");
+ return;
+ }
+
+ struct seq_parameter_set_rbsp *sps = &sps_nal->sps;
+
+ sei->payload_type = 0;
+ while((tmp = read_bits(buf, 8)) == 0xff) {
+ sei->payload_type += 255;
+ }
+ sei->last_payload_type_byte = tmp;
+ sei->payload_type += sei->last_payload_type_byte;
+
+ sei->payload_size = 0;
+ while((tmp = read_bits(buf, 8)) == 0xff) {
+ sei->payload_size += 255;
+ }
+ sei->last_payload_size_byte = tmp;
+ sei->payload_size += sei->last_payload_size_byte;
+
+ /* pic_timing */
+ if(sei->payload_type == 1) {
+ if(parser->flag_mask & CPB_DPB_DELAYS_PRESENT) {
+ sei->pic_timing.cpb_removal_delay = read_bits(buf, 5);
+ sei->pic_timing.dpb_output_delay = read_bits(buf, 5);
+ }
+
+ if(parser->flag_mask & PIC_STRUCT_PRESENT) {
+ sei->pic_timing.pic_struct = read_bits(buf, 4);
+
+ uint8_t NumClockTs = 0;
+ switch(sei->pic_timing.pic_struct) {
+ case 0:
+ case 1:
+ case 2:
+ NumClockTs = 1;
+ break;
+ case 3:
+ case 4:
+ case 7:
+ NumClockTs = 2;
+ break;
+ case 5:
+ case 6:
+ case 8:
+ NumClockTs = 3;
+ break;
+ }
+
+ int i;
+ for(i = 0; i < NumClockTs; i++) {
+ if(read_bits(buf, 1)) { /* clock_timestamp_flag == 1 */
+ sei->pic_timing.ct_type = read_bits(buf, 2);
+ sei->pic_timing.nuit_field_based_flag = read_bits(buf, 1);
+ sei->pic_timing.counting_type = read_bits(buf, 5);
+ sei->pic_timing.full_timestamp_flag = read_bits(buf, 1);
+ sei->pic_timing.discontinuity_flag = read_bits(buf, 1);
+ sei->pic_timing.cnt_dropped_flag = read_bits(buf, 1);
+ sei->pic_timing.n_frames = read_bits(buf, 8);
+ if(sei->pic_timing.full_timestamp_flag) {
+ sei->pic_timing.seconds_value = read_bits(buf, 6);
+ sei->pic_timing.minutes_value = read_bits(buf, 6);
+ sei->pic_timing.hours_value = read_bits(buf, 5);
+ } else {
+ if(read_bits(buf, 1)) {
+ sei->pic_timing.seconds_value = read_bits(buf, 6);
+
+ if(read_bits(buf, 1)) {
+ sei->pic_timing.minutes_value = read_bits(buf, 6);
+
+ if(read_bits(buf, 1)) {
+ sei->pic_timing.hours_value = read_bits(buf, 5);
+ }
+ }
+ }
+ }
+
+ if(sps->vui_parameters_present_flag &&
+ sps->vui_parameters.nal_hrd_parameters_present_flag) {
+ sei->pic_timing.time_offset =
+ read_bits(buf,
+ sps->vui_parameters.nal_hrd_parameters.time_offset_length);
+ }
+ }
+ }
+ }
+ } /*else {
+ fprintf(stderr, "Unimplemented SEI payload: %d\n", sei->payload_type);
+ }*/
+
+}
+
+void interpret_sei(struct coded_picture *pic)
+{
+ if(!pic->sps_nal || !pic->sei_nal)
+ return;
+
+ struct seq_parameter_set_rbsp *sps = &pic->sps_nal->sps;
+ struct sei_message *sei = &pic->sei_nal->sei;
+
+ if(sps && sps->vui_parameters_present_flag &&
+ sps->vui_parameters.pic_struct_present_flag) {
+ switch(sei->pic_timing.pic_struct) {
+ case DISP_FRAME:
+ pic->flag_mask &= ~INTERLACED;
+ pic->repeat_pic = 0;
+ break;
+ case DISP_TOP:
+ case DISP_BOTTOM:
+ case DISP_TOP_BOTTOM:
+ case DISP_BOTTOM_TOP:
+ pic->flag_mask |= INTERLACED;
+ break;
+ case DISP_TOP_BOTTOM_TOP:
+ case DISP_BOTTOM_TOP_BOTTOM:
+ pic->flag_mask |= INTERLACED;
+ pic->repeat_pic = 1;
+ break;
+ case DISP_FRAME_DOUBLING:
+ pic->flag_mask &= ~INTERLACED;
+ pic->repeat_pic = 2;
+ break;
+ case DISP_FRAME_TRIPLING:
+ pic->flag_mask &= ~INTERLACED;
+ pic->repeat_pic = 3;
+ }
+ }
+}
+
+void parse_vui_parameters(struct buf_reader *buf,
+ struct seq_parameter_set_rbsp *sps)
+{
+ sps->vui_parameters.aspect_ration_info_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.aspect_ration_info_present_flag == 1) {
+ sps->vui_parameters.aspect_ratio_idc = read_bits(buf, 8);
+ if (sps->vui_parameters.aspect_ratio_idc == ASPECT_EXTENDED_SAR) {
+ sps->vui_parameters.sar_width = read_bits(buf, 16);
+ sps->vui_parameters.sar_height = read_bits(buf, 16);
+ }
+ }
+
+ sps->vui_parameters.overscan_info_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.overscan_info_present_flag) {
+ sps->vui_parameters.overscan_appropriate_flag = read_bits(buf, 1);
+ }
+
+ sps->vui_parameters.video_signal_type_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.video_signal_type_present_flag) {
+ sps->vui_parameters.video_format = read_bits(buf, 3);
+ sps->vui_parameters.video_full_range_flag = read_bits(buf, 1);
+ sps->vui_parameters.colour_description_present = read_bits(buf, 1);
+ if (sps->vui_parameters.colour_description_present) {
+ sps->vui_parameters.colour_primaries = read_bits(buf, 8);
+ sps->vui_parameters.transfer_characteristics = read_bits(buf, 8);
+ sps->vui_parameters.matrix_coefficients = read_bits(buf, 8);
+ }
+ }
+
+ sps->vui_parameters.chroma_loc_info_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.chroma_loc_info_present_flag) {
+ sps->vui_parameters.chroma_sample_loc_type_top_field = read_exp_golomb(buf);
+ sps->vui_parameters.chroma_sample_loc_type_bottom_field = read_exp_golomb(
+ buf);
+ }
+
+ sps->vui_parameters.timing_info_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.timing_info_present_flag) {
+ uint32_t num_units_in_tick = read_bits(buf, 32);
+ uint32_t time_scale = read_bits(buf, 32);
+ sps->vui_parameters.num_units_in_tick = num_units_in_tick;
+ sps->vui_parameters.time_scale = time_scale;
+ sps->vui_parameters.fixed_frame_rate_flag = read_bits(buf, 1);
+ }
+
+ sps->vui_parameters.nal_hrd_parameters_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.nal_hrd_parameters_present_flag)
+ parse_hrd_parameters(buf, &sps->vui_parameters.nal_hrd_parameters);
+
+ sps->vui_parameters.vc1_hrd_parameters_present_flag = read_bits(buf, 1);
+ if (sps->vui_parameters.vc1_hrd_parameters_present_flag)
+ parse_hrd_parameters(buf, &sps->vui_parameters.vc1_hrd_parameters);
+
+ if (sps->vui_parameters.nal_hrd_parameters_present_flag
+ || sps->vui_parameters.vc1_hrd_parameters_present_flag)
+ sps->vui_parameters.low_delay_hrd_flag = read_bits(buf, 1);
+
+ sps->vui_parameters.pic_struct_present_flag = read_bits(buf, 1);
+ sps->vui_parameters.bitstream_restriction_flag = read_bits(buf, 1);
+
+ if (sps->vui_parameters.bitstream_restriction_flag) {
+ sps->vui_parameters.motion_vectors_over_pic_boundaries = read_bits(buf, 1);
+ sps->vui_parameters.max_bytes_per_pic_denom = read_exp_golomb(buf);
+ sps->vui_parameters.max_bits_per_mb_denom = read_exp_golomb(buf);
+ sps->vui_parameters.log2_max_mv_length_horizontal = read_exp_golomb(buf);
+ sps->vui_parameters.log2_max_mv_length_vertical = read_exp_golomb(buf);
+ sps->vui_parameters.num_reorder_frames = read_exp_golomb(buf);
+ sps->vui_parameters.max_dec_frame_buffering = read_exp_golomb(buf);
+ }
+}
+
+void parse_hrd_parameters(struct buf_reader *buf, struct hrd_parameters *hrd)
+{
+ hrd->cpb_cnt_minus1 = read_exp_golomb(buf);
+ hrd->bit_rate_scale = read_bits(buf, 4);
+ hrd->cpb_size_scale = read_bits(buf, 4);
+
+ int i;
+ for (i = 0; i <= hrd->cpb_cnt_minus1; i++) {
+ hrd->bit_rate_value_minus1[i] = read_exp_golomb(buf);
+ hrd->cpb_size_value_minus1[i] = read_exp_golomb(buf);
+ hrd->cbr_flag[i] = read_bits(buf, 1);
+ }
+
+ hrd->initial_cpb_removal_delay_length_minus1 = read_bits(buf, 5);
+ hrd->cpb_removal_delay_length_minus1 = read_bits(buf, 5);
+ hrd->dpb_output_delay_length_minus1 = read_bits(buf, 5);
+ hrd->time_offset_length = read_bits(buf, 5);
+}
+
+uint8_t parse_pps(struct buf_reader *buf, struct pic_parameter_set_rbsp *pps)
+{
+ pps->pic_parameter_set_id = read_exp_golomb(buf);
+ pps->seq_parameter_set_id = read_exp_golomb(buf);
+ pps->entropy_coding_mode_flag = read_bits(buf, 1);
+ pps->pic_order_present_flag = read_bits(buf, 1);
+
+ pps->num_slice_groups_minus1 = read_exp_golomb(buf);
+ if (pps->num_slice_groups_minus1 > 0) {
+ pps->slice_group_map_type = read_exp_golomb(buf);
+ if (pps->slice_group_map_type == 0) {
+ int i_group;
+ for (i_group = 0; i_group <= pps->num_slice_groups_minus1; i_group++) {
+ if (i_group < 64)
+ pps->run_length_minus1[i_group] = read_exp_golomb(buf);
+ else { // FIXME: skips if more than 64 groups exist
+ lprintf("Error: Only 64 slice_groups are supported\n");
+ read_exp_golomb(buf);
+ }
+ }
+ }
+ else if (pps->slice_group_map_type == 3 || pps->slice_group_map_type == 4
+ || pps->slice_group_map_type == 5) {
+ pps->slice_group_change_direction_flag = read_bits(buf, 1);
+ pps->slice_group_change_rate_minus1 = read_exp_golomb(buf);
+ }
+ else if (pps->slice_group_map_type == 6) {
+ pps->pic_size_in_map_units_minus1 = read_exp_golomb(buf);
+ int i_group;
+ for (i_group = 0; i_group <= pps->num_slice_groups_minus1; i_group++) {
+ pps->slice_group_id[i_group] = read_bits(buf, ceil(log(
+ pps->num_slice_groups_minus1 + 1)));
+ }
+ }
+ }
+
+ pps->num_ref_idx_l0_active_minus1 = read_exp_golomb(buf);
+ pps->num_ref_idx_l1_active_minus1 = read_exp_golomb(buf);
+ pps->weighted_pred_flag = read_bits(buf, 1);
+ pps->weighted_bipred_idc = read_bits(buf, 2);
+ pps->pic_init_qp_minus26 = read_exp_golomb_s(buf);
+ pps->pic_init_qs_minus26 = read_exp_golomb_s(buf);
+ pps->chroma_qp_index_offset = read_exp_golomb_s(buf);
+ pps->deblocking_filter_control_present_flag = read_bits(buf, 1);
+ pps->constrained_intra_pred_flag = read_bits(buf, 1);
+ pps->redundant_pic_cnt_present_flag = read_bits(buf, 1);
+
+ int bit_length = (buf->len*8)-rbsp_trailing_bits(buf->buf, buf->len);
+ int bit_read = bits_read(buf);
+
+ memset(pps->scaling_lists_4x4, 16, sizeof(pps->scaling_lists_4x4));
+ memset(pps->scaling_lists_8x8, 16, sizeof(pps->scaling_lists_8x8));
+ if (bit_length-bit_read > 1) {
+ pps->transform_8x8_mode_flag = read_bits(buf, 1);
+ pps->pic_scaling_matrix_present_flag = read_bits(buf, 1);
+ if (pps->pic_scaling_matrix_present_flag) {
+ int i;
+ for (i = 0; i < 8; i++) {
+ if(i < 6 || pps->transform_8x8_mode_flag)
+ pps->pic_scaling_list_present_flag[i] = read_bits(buf, 1);
+ else
+ pps->pic_scaling_list_present_flag[i] = 0;
+
+ if (pps->pic_scaling_list_present_flag[i]) {
+ if (i < 6)
+ parse_scaling_list(buf, pps->scaling_lists_4x4[i], 16, i);
+ else
+ parse_scaling_list(buf, pps->scaling_lists_8x8[i - 6], 64, i);
+ }
+ }
+ }
+
+ pps->second_chroma_qp_index_offset = read_exp_golomb_s(buf);
+ } else
+ pps->second_chroma_qp_index_offset = pps->chroma_qp_index_offset;
+
+ return 0;
+}
+
+void interpret_pps(struct coded_picture *pic)
+{
+ if(pic->sps_nal == NULL) {
+ lprintf("WARNING: Picture contains no seq_parameter_set\n");
+ return;
+ } else if(pic->pps_nal == NULL) {
+ lprintf("WARNING: Picture contains no pic_parameter_set\n");
+ return;
+ }
+
+ struct seq_parameter_set_rbsp *sps = &pic->sps_nal->sps;
+ struct pic_parameter_set_rbsp *pps = &pic->pps_nal->pps;
+
+ int i;
+ for (i = 0; i < 8; i++) {
+ if (!pps->pic_scaling_list_present_flag[i]) {
+ pps_scaling_list_fallback(sps, pps, i);
+ }
+ }
+
+ if (!pps->pic_scaling_matrix_present_flag && sps != NULL) {
+ memcpy(pps->scaling_lists_4x4, sps->scaling_lists_4x4,
+ sizeof(pps->scaling_lists_4x4));
+ memcpy(pps->scaling_lists_8x8, sps->scaling_lists_8x8,
+ sizeof(pps->scaling_lists_8x8));
+ }
+}
+
+uint8_t parse_slice_header(struct buf_reader *buf, struct nal_unit *slc_nal,
+ struct h264_parser *parser)
+{
+ struct slice_header *slc = &slc_nal->slc;
+
+ slc->first_mb_in_slice = read_exp_golomb(buf);
+ /* we do some parsing on the slice type, because the list is doubled */
+ slc->slice_type = slice_type(read_exp_golomb(buf));
+
+ //print_slice_type(slc->slice_type);
+ slc->pic_parameter_set_id = read_exp_golomb(buf);
+
+ /* retrieve sps and pps from the buffers */
+ struct nal_unit *pps_nal =
+ nal_buffer_get_by_pps_id(parser->pps_buffer, slc->pic_parameter_set_id);
+
+ if (pps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: parse_slice_header: pic_parameter_set_id %d not found in buffers\n",
+ slc->pic_parameter_set_id);
+ return -1;
+ }
+
+ struct pic_parameter_set_rbsp *pps = &pps_nal->pps;
+
+ struct nal_unit *sps_nal =
+ nal_buffer_get_by_sps_id(parser->sps_buffer, pps->seq_parameter_set_id);
+
+ if (sps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: parse_slice_header: seq_parameter_set_id %d not found in buffers\n",
+ pps->seq_parameter_set_id);
+ return -1;
+ }
+
+ struct seq_parameter_set_rbsp *sps = &sps_nal->sps;
+
+ if(sps->separate_colour_plane_flag)
+ slc->colour_plane_id = read_bits(buf, 2);
+
+ slc->frame_num = read_bits(buf, sps->log2_max_frame_num_minus4 + 4);
+ if (!sps->frame_mbs_only_flag) {
+ slc->field_pic_flag = read_bits(buf, 1);
+ if (slc->field_pic_flag)
+ slc->bottom_field_flag = read_bits(buf, 1);
+ else
+ slc->bottom_field_flag = 0;
+ }
+ else {
+ slc->field_pic_flag = 0;
+ slc->bottom_field_flag = 0;
+ }
+
+ if (slc_nal->nal_unit_type == NAL_SLICE_IDR)
+ slc->idr_pic_id = read_exp_golomb(buf);
+
+ if (!sps->pic_order_cnt_type) {
+ slc->pic_order_cnt_lsb = read_bits(buf,
+ sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+ if (pps->pic_order_present_flag && !slc->field_pic_flag)
+ slc->delta_pic_order_cnt_bottom = read_exp_golomb_s(buf);
+ }
+
+ if (sps->pic_order_cnt_type == 1 && !sps->delta_pic_order_always_zero_flag) {
+ slc->delta_pic_order_cnt[0] = read_exp_golomb_s(buf);
+ if (pps->pic_order_present_flag && !slc->field_pic_flag)
+ slc->delta_pic_order_cnt[1] = read_exp_golomb_s(buf);
+ }
+
+ if (pps->redundant_pic_cnt_present_flag == 1) {
+ slc->redundant_pic_cnt = read_exp_golomb(buf);
+ }
+
+ if (slc->slice_type == SLICE_B)
+ slc->direct_spatial_mv_pred_flag = read_bits(buf, 1);
+
+ /* take default values in case they are not set here */
+ slc->num_ref_idx_l0_active_minus1 = pps->num_ref_idx_l0_active_minus1;
+ slc->num_ref_idx_l1_active_minus1 = pps->num_ref_idx_l1_active_minus1;
+
+ if (slc->slice_type == SLICE_P || slc->slice_type == SLICE_SP
+ || slc->slice_type == SLICE_B) {
+ slc->num_ref_idx_active_override_flag = read_bits(buf, 1);
+
+ if (slc->num_ref_idx_active_override_flag == 1) {
+ slc->num_ref_idx_l0_active_minus1 = read_exp_golomb(buf);
+
+ if (slc->slice_type == SLICE_B) {
+ slc->num_ref_idx_l1_active_minus1 = read_exp_golomb(buf);
+ }
+ }
+ }
+
+ /* --- ref_pic_list_reordering --- */
+ parse_ref_pic_list_reordering(buf, slc);
+
+ /* --- pred_weight_table --- */
+ if ((pps->weighted_pred_flag && (slc->slice_type == SLICE_P
+ || slc->slice_type == SLICE_SP)) || (pps->weighted_bipred_idc == 1
+ && slc->slice_type == SLICE_B)) {
+ parse_pred_weight_table(buf, slc, parser);
+ }
+
+ /* --- dec_ref_pic_marking --- */
+ if (slc_nal->nal_ref_idc != 0)
+ parse_dec_ref_pic_marking(buf, slc_nal);
+ else
+ slc->dec_ref_pic_marking_count = 0;
+
+ return 0;
+}
+
+void interpret_slice_header(struct h264_parser *parser, struct nal_unit *slc_nal)
+{
+ struct coded_picture *pic = parser->pic;
+ struct slice_header *slc = &slc_nal->slc;
+
+ /* retrieve sps and pps from the buffers */
+ struct nal_unit *pps_nal =
+ nal_buffer_get_by_pps_id(parser->pps_buffer, slc->pic_parameter_set_id);
+
+ if (pps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: interpret_slice_header: pic_parameter_set_id %d not found in buffers\n",
+ slc->pic_parameter_set_id);
+ return;
+ }
+
+ struct nal_unit *sps_nal =
+ nal_buffer_get_by_sps_id(parser->sps_buffer, pps_nal->pps.seq_parameter_set_id);
+
+ if (sps_nal == NULL) {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "ERR: interpret_slice_header: seq_parameter_set_id %d not found in buffers\n",
+ pps_nal->pps.seq_parameter_set_id);
+ return;
+ }
+
+ if (pic->sps_nal) {
+ release_nal_unit(pic->sps_nal);
+ }
+ if (pic->pps_nal) {
+ release_nal_unit(pic->pps_nal);
+ }
+ lock_nal_unit(sps_nal);
+ pic->sps_nal = sps_nal;
+ lock_nal_unit(pps_nal);
+ pic->pps_nal = pps_nal;
+}
+
+void parse_ref_pic_list_reordering(struct buf_reader *buf, struct slice_header *slc)
+{
+ if (slc->slice_type != SLICE_I && slc->slice_type != SLICE_SI) {
+ slc->ref_pic_list_reordering.ref_pic_list_reordering_flag_l0 = read_bits(
+ buf, 1);
+
+ if (slc->ref_pic_list_reordering.ref_pic_list_reordering_flag_l0 == 1) {
+ do {
+ slc->ref_pic_list_reordering.reordering_of_pic_nums_idc
+ = read_exp_golomb(buf);
+
+ if (slc->ref_pic_list_reordering.reordering_of_pic_nums_idc == 0
+ || slc->ref_pic_list_reordering.reordering_of_pic_nums_idc == 1) {
+ slc->ref_pic_list_reordering.abs_diff_pic_num_minus1
+ = read_exp_golomb(buf);
+ }
+ else if (slc->ref_pic_list_reordering.reordering_of_pic_nums_idc == 2) {
+ slc->ref_pic_list_reordering.long_term_pic_num = read_exp_golomb(buf);
+ }
+ } while (slc->ref_pic_list_reordering.reordering_of_pic_nums_idc != 3);
+ }
+ }
+
+ if (slc->slice_type == SLICE_B) {
+ slc->ref_pic_list_reordering.ref_pic_list_reordering_flag_l1 = read_bits(
+ buf, 1);
+
+ if (slc->ref_pic_list_reordering.ref_pic_list_reordering_flag_l1 == 1) {
+ do {
+ slc->ref_pic_list_reordering.reordering_of_pic_nums_idc
+ = read_exp_golomb(buf);
+
+ if (slc->ref_pic_list_reordering.reordering_of_pic_nums_idc == 0
+ || slc->ref_pic_list_reordering.reordering_of_pic_nums_idc == 1) {
+ slc->ref_pic_list_reordering.abs_diff_pic_num_minus1
+ = read_exp_golomb(buf);
+ }
+ else if (slc->ref_pic_list_reordering.reordering_of_pic_nums_idc == 2) {
+ slc->ref_pic_list_reordering.long_term_pic_num = read_exp_golomb(buf);
+ }
+ } while (slc->ref_pic_list_reordering.reordering_of_pic_nums_idc != 3);
+ }
+ }
+}
+
+void parse_pred_weight_table(struct buf_reader *buf, struct slice_header *slc,
+ struct h264_parser *parser)
+{
+ /* retrieve sps and pps from the buffers */
+ struct pic_parameter_set_rbsp *pps =
+ &nal_buffer_get_by_pps_id(parser->pps_buffer, slc->pic_parameter_set_id)
+ ->pps;
+
+ struct seq_parameter_set_rbsp *sps =
+ &nal_buffer_get_by_sps_id(parser->sps_buffer, pps->seq_parameter_set_id)
+ ->sps;
+
+ slc->pred_weight_table.luma_log2_weight_denom = read_exp_golomb(buf);
+
+ uint32_t ChromaArrayType = sps->chroma_format_idc;
+ if(sps->separate_colour_plane_flag)
+ ChromaArrayType = 0;
+
+ if (ChromaArrayType != 0)
+ slc->pred_weight_table.chroma_log2_weight_denom = read_exp_golomb(buf);
+
+ int i;
+ for (i = 0; i <= slc->num_ref_idx_l0_active_minus1; i++) {
+ uint8_t luma_weight_l0_flag = read_bits(buf, 1);
+
+ if (luma_weight_l0_flag == 1) {
+ slc->pred_weight_table.luma_weight_l0[i] = read_exp_golomb_s(buf);
+ slc->pred_weight_table.luma_offset_l0[i] = read_exp_golomb_s(buf);
+ }
+
+ if (ChromaArrayType != 0) {
+ uint8_t chroma_weight_l0_flag = read_bits(buf, 1);
+
+ if (chroma_weight_l0_flag == 1) {
+ int j;
+ for (j = 0; j < 2; j++) {
+ slc->pred_weight_table.chroma_weight_l0[i][j]
+ = read_exp_golomb_s(buf);
+ slc->pred_weight_table.chroma_offset_l0[i][j]
+ = read_exp_golomb_s(buf);
+ }
+ }
+ }
+ }
+
+ if ((slc->slice_type % 5) == SLICE_B) {
+ /* FIXME: Being spec-compliant here and loop to num_ref_idx_l0_active_minus1
+ * will break Divx7 files. Keep this in mind if any other streams are broken
+ */
+ for (i = 0; i <= slc->num_ref_idx_l1_active_minus1; i++) {
+ uint8_t luma_weight_l1_flag = read_bits(buf, 1);
+
+ if (luma_weight_l1_flag == 1) {
+ slc->pred_weight_table.luma_weight_l1[i] = read_exp_golomb_s(buf);
+ slc->pred_weight_table.luma_offset_l1[i] = read_exp_golomb_s(buf);
+ }
+
+ if (ChromaArrayType != 0) {
+ uint8_t chroma_weight_l1_flag = read_bits(buf, 1);
+
+ if (chroma_weight_l1_flag == 1) {
+ int j;
+ for (j = 0; j < 2; j++) {
+ slc->pred_weight_table.chroma_weight_l1[i][j]
+ = read_exp_golomb_s(buf);
+ slc->pred_weight_table.chroma_offset_l1[i][j]
+ = read_exp_golomb_s(buf);
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * PicNum calculation following ITU-T H264 11/2007
+ * 8.2.4.1 p112f
+ */
+void calculate_pic_nums(struct h264_parser *parser, struct coded_picture *cpic)
+{
+ struct decoded_picture *pic = NULL;
+ struct slice_header *cslc = &cpic->slc_nal->slc;
+
+ xine_list_iterator_t ite = xine_list_front(parser->dpb->reference_list);
+ while (ite) {
+ pic = xine_list_get_value(parser->dpb->reference_list, ite);
+
+ int i;
+ for (i=0; i<2; i++) {
+ if(pic->coded_pic[i] == NULL)
+ continue;
+
+ struct slice_header *slc = &pic->coded_pic[i]->slc_nal->slc;
+ struct seq_parameter_set_rbsp *sps = &pic->coded_pic[i]->sps_nal->sps;
+
+ if (!pic->coded_pic[i]->used_for_long_term_ref) {
+ int32_t frame_num_wrap = 0;
+ if (slc->frame_num > cslc->frame_num)
+ frame_num_wrap = slc->frame_num - sps->max_frame_num;
+ else
+ frame_num_wrap = slc->frame_num;
+
+ if(i == 0) {
+ pic->frame_num_wrap = frame_num_wrap;
+ }
+
+ if (cslc->field_pic_flag == 0) {
+ pic->coded_pic[i]->pic_num = frame_num_wrap;
+ } else {
+ pic->coded_pic[i]->pic_num = 2 * frame_num_wrap;
+ if((slc->field_pic_flag == 1 &&
+ cslc->bottom_field_flag == slc->bottom_field_flag) ||
+ (slc->field_pic_flag == 0 && !cslc->bottom_field_flag))
+ pic->coded_pic[i]->pic_num++;
+ }
+ } else {
+ pic->coded_pic[i]->long_term_pic_num = pic->coded_pic[i]->long_term_frame_idx;
+ if(slc->bottom_field_flag == cslc->bottom_field_flag)
+ pic->coded_pic[i]->long_term_pic_num++;
+ }
+ }
+
+ ite = xine_list_next(parser->dpb->reference_list, ite);
+ }
+}
+
+void execute_ref_pic_marking(struct coded_picture *cpic,
+ uint32_t memory_management_control_operation,
+ uint32_t marking_nr,
+ struct h264_parser *parser)
+{
+ /**
+ * according to NOTE 6, p83 the dec_ref_pic_marking
+ * structure is identical for all slice headers within
+ * a coded picture, so we can simply use the last
+ * slice_header we saw in the pic
+ */
+ if (!cpic->slc_nal)
+ return;
+ struct slice_header *slc = &cpic->slc_nal->slc;
+ struct dpb *dpb = parser->dpb;
+
+ calculate_pic_nums(parser, cpic);
+
+ if (cpic->flag_mask & IDR_PIC) {
+ if(slc->dec_ref_pic_marking[marking_nr].long_term_reference_flag) {
+ cpic->used_for_long_term_ref = 1;
+ dpb_set_unused_ref_picture_lidx_gt(dpb, 0);
+ } else {
+ dpb_set_unused_ref_picture_lidx_gt(dpb, -1);
+ }
+ return;
+ }
+
+ /* MMC operation == 1 : 8.2.5.4.1, p. 120 */
+ if (memory_management_control_operation == 1) {
+ // short-term -> unused for reference
+ int32_t pic_num_x = (parser->curr_pic_num
+ - (slc->dec_ref_pic_marking[marking_nr].difference_of_pic_nums_minus1 + 1));
+ //% cpic->max_pic_num;
+ struct decoded_picture* pic = NULL;
+ if ((pic = dpb_get_picture(dpb, pic_num_x)) != NULL) {
+ if (cpic->slc_nal->slc.field_pic_flag == 0) {
+ dpb_unmark_reference_picture(dpb, pic);
+ } else {
+
+ if (pic->coded_pic[0]->slc_nal->slc.field_pic_flag == 1) {
+ if (pic->top_is_reference)
+ pic->top_is_reference = 0;
+ else if (pic->bottom_is_reference)
+ pic->bottom_is_reference = 0;
+
+ if(!pic->top_is_reference && !pic->bottom_is_reference)
+ dpb_unmark_reference_picture(dpb, pic);
+ } else {
+ pic->top_is_reference = pic->bottom_is_reference = 0;
+ dpb_unmark_reference_picture(dpb, pic);
+ }
+ }
+ } else {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "H264: mmc 1 failed: %d not existent - curr_pic: %d\n",
+ pic_num_x, parser->curr_pic_num);
+ }
+ } else if (memory_management_control_operation == 2) {
+ // long-term -> unused for reference
+ struct decoded_picture* pic = dpb_get_picture_by_ltpn(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_pic_num);
+ if (pic != NULL) {
+ if (cpic->slc_nal->slc.field_pic_flag == 0)
+ dpb_set_unused_ref_picture_byltpn(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_pic_num);
+ else {
+
+ if (pic->coded_pic[0]->slc_nal->slc.field_pic_flag == 1) {
+ if (pic->top_is_reference)
+ pic->top_is_reference = 0;
+ else if (pic->bottom_is_reference)
+ pic->bottom_is_reference = 0;
+
+ if(!pic->top_is_reference && !pic->bottom_is_reference) {
+ dpb_set_unused_ref_picture_byltpn(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_pic_num);
+ }
+ } else {
+ pic->top_is_reference = pic->bottom_is_reference = 0;
+ dpb_set_unused_ref_picture_byltpn(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_pic_num);
+ }
+ }
+ }
+ } else if (memory_management_control_operation == 3) {
+ // short-term -> long-term, set long-term frame index
+ uint32_t pic_num_x = parser->curr_pic_num
+ - (slc->dec_ref_pic_marking[marking_nr].difference_of_pic_nums_minus1 + 1);
+ struct decoded_picture* pic = dpb_get_picture_by_ltidx(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_pic_num);
+ if (pic != NULL)
+ dpb_set_unused_ref_picture_bylidx(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx);
+
+ pic = dpb_get_picture(dpb, pic_num_x);
+ if (pic) {
+ pic = dpb_get_picture(dpb, pic_num_x);
+
+ if (pic->coded_pic[0]->slc_nal->slc.field_pic_flag == 0) {
+ pic->coded_pic[0]->long_term_frame_idx
+ = slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx;
+ pic->coded_pic[0]->long_term_pic_num = pic->coded_pic[0]->long_term_frame_idx;
+ }
+ else {
+ if(pic->coded_pic[0]->pic_num == pic_num_x) {
+ pic->coded_pic[0]->long_term_frame_idx
+ = slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx;
+ pic->coded_pic[0]->long_term_pic_num = pic->coded_pic[0]->long_term_frame_idx * 2 + 1;
+ } else if(pic->coded_pic[1] != NULL &&
+ pic->coded_pic[1]->pic_num == pic_num_x) {
+ pic->coded_pic[1]->long_term_frame_idx
+ = slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx;
+ pic->coded_pic[1]->long_term_pic_num = pic->coded_pic[1]->long_term_frame_idx * 2 + 1;
+ }
+ }
+ }
+ else {
+ xprintf(parser->xine, XINE_VERBOSITY_DEBUG,
+ "memory_management_control_operation: 3 failed. No such picture.\n");
+ }
+
+ } else if (memory_management_control_operation == 4) {
+ /* set max-long-term frame index,
+ * mark all long-term pictures with long-term frame idx
+ * greater max-long-term farme idx as unused for ref */
+ if (slc->dec_ref_pic_marking[marking_nr].max_long_term_frame_idx_plus1 == 0)
+ dpb_set_unused_ref_picture_lidx_gt(dpb, 0);
+ else
+ dpb_set_unused_ref_picture_lidx_gt(dpb,
+ slc->dec_ref_pic_marking[marking_nr].max_long_term_frame_idx_plus1 - 1);
+ } else if (memory_management_control_operation == 5) {
+ /* mark all ref pics as unused for reference,
+ * set max-long-term frame index = no long-term frame idxs */
+ dpb_flush(dpb);
+
+ if (!slc->bottom_field_flag) {
+ parser->prev_pic_order_cnt_lsb = cpic->top_field_order_cnt;
+ parser->prev_pic_order_cnt_msb = 0;
+ } else {
+ parser->prev_pic_order_cnt_lsb = 0;
+ parser->prev_pic_order_cnt_msb = 0;
+ }
+ } else if (memory_management_control_operation == 6) {
+ /* mark current picture as used for long-term ref,
+ * assing long-term frame idx to it */
+ struct decoded_picture* pic = dpb_get_picture_by_ltidx(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx);
+ if (pic != NULL)
+ dpb_set_unused_ref_picture_bylidx(dpb,
+ slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx);
+
+ cpic->long_term_frame_idx = slc->dec_ref_pic_marking[marking_nr].long_term_frame_idx;
+ cpic->used_for_long_term_ref = 1;
+
+ if (slc->field_pic_flag == 0) {
+ cpic->long_term_pic_num = cpic->long_term_frame_idx;
+ }
+ else {
+ cpic->long_term_pic_num = cpic->long_term_frame_idx * 2 + 1;
+ }
+
+ }
+}
+
+void parse_dec_ref_pic_marking(struct buf_reader *buf,
+ struct nal_unit *slc_nal)
+{
+ struct slice_header *slc = &slc_nal->slc;
+
+ if (!slc)
+ return;
+
+ slc->dec_ref_pic_marking_count = 0;
+ int i = slc->dec_ref_pic_marking_count;
+
+ if (slc_nal->nal_unit_type == NAL_SLICE_IDR) {
+ slc->dec_ref_pic_marking[i].no_output_of_prior_pics_flag = read_bits(buf, 1);
+ slc->dec_ref_pic_marking[i].long_term_reference_flag = read_bits(buf, 1);
+ i+=2;
+ } else {
+ slc->dec_ref_pic_marking[i].adaptive_ref_pic_marking_mode_flag = read_bits(
+ buf, 1);
+
+ if (slc->dec_ref_pic_marking[i].adaptive_ref_pic_marking_mode_flag) {
+ do {
+ slc->dec_ref_pic_marking[i].memory_management_control_operation
+ = read_exp_golomb(buf);
+
+ if (slc->dec_ref_pic_marking[i].memory_management_control_operation == 1
+ || slc->dec_ref_pic_marking[i].memory_management_control_operation
+ == 3)
+ slc->dec_ref_pic_marking[i].difference_of_pic_nums_minus1
+ = read_exp_golomb(buf);
+
+ if (slc->dec_ref_pic_marking[i].memory_management_control_operation == 2)
+ slc->dec_ref_pic_marking[i].long_term_pic_num = read_exp_golomb(buf);
+
+ if (slc->dec_ref_pic_marking[i].memory_management_control_operation == 3
+ || slc->dec_ref_pic_marking[i].memory_management_control_operation
+ == 6)
+ slc->dec_ref_pic_marking[i].long_term_frame_idx = read_exp_golomb(buf);
+
+ if (slc->dec_ref_pic_marking[i].memory_management_control_operation == 4)
+ slc->dec_ref_pic_marking[i].max_long_term_frame_idx_plus1
+ = read_exp_golomb(buf);
+
+ i++;
+ if(i >= 10) {
+ lprintf("Error: Not more than 10 MMC operations supported per slice. Dropping some.\n");
+ i = 0;
+ }
+ } while (slc->dec_ref_pic_marking[i-1].memory_management_control_operation
+ != 0);
+ }
+ }
+
+ slc->dec_ref_pic_marking_count = (i>0) ? (i-1) : 0;
+}
+
+/* ----------------- NAL parser ----------------- */
+
+struct h264_parser* init_parser(xine_t *xine)
+{
+ struct h264_parser *parser = calloc(1, sizeof(struct h264_parser));
+ parser->pic = create_coded_picture();
+ parser->position = NON_VCL;
+ parser->last_vcl_nal = NULL;
+ parser->sps_buffer = create_nal_buffer(32);
+ parser->pps_buffer = create_nal_buffer(32);
+ parser->xine = xine;
+ parser->dpb = create_dpb();
+
+ return parser;
+}
+
+void reset_parser(struct h264_parser *parser)
+{
+ parser->position = NON_VCL;
+ parser->buf_len = parser->prebuf_len = 0;
+ parser->next_nal_position = 0;
+ parser->last_nal_res = 0;
+
+ if(parser->last_vcl_nal) {
+ release_nal_unit(parser->last_vcl_nal);
+ }
+ parser->last_vcl_nal = NULL;
+
+ parser->prev_pic_order_cnt_msb = 0;
+ parser->prev_pic_order_cnt_lsb = 0;
+ parser->frame_num_offset = 0;
+ parser->prev_top_field_order_cnt = 0;
+ parser->curr_pic_num = 0;
+ parser->flag_mask = 0;
+
+ if(parser->pic != NULL) {
+ free_coded_picture(parser->pic);
+ parser->pic = create_coded_picture();
+ }
+}
+
+void free_parser(struct h264_parser *parser)
+{
+ dpb_free_all(parser->dpb);
+ release_dpb(parser->dpb);
+ free_nal_buffer(parser->pps_buffer);
+ free_nal_buffer(parser->sps_buffer);
+ free(parser);
+}
+
+void parse_codec_private(struct h264_parser *parser, uint8_t *inbuf, int inbuf_len)
+{
+ struct buf_reader bufr;
+
+ bufr.buf = inbuf;
+ bufr.cur_pos = inbuf;
+ bufr.cur_offset = 8;
+ bufr.len = inbuf_len;
+
+ // FIXME: Might be broken!
+ struct nal_unit *nal = calloc(1, sizeof(struct nal_unit));
+
+
+ /* reserved */
+ read_bits(&bufr, 8);
+ nal->sps.profile_idc = read_bits(&bufr, 8);
+ read_bits(&bufr, 8);
+ nal->sps.level_idc = read_bits(&bufr, 8);
+ read_bits(&bufr, 6);
+
+ parser->nal_size_length = read_bits(&bufr, 2) + 1;
+ parser->nal_size_length_buf = calloc(1, parser->nal_size_length);
+ read_bits(&bufr, 3);
+ uint8_t sps_count = read_bits(&bufr, 5);
+
+ inbuf += 6;
+ inbuf_len -= 6;
+ int i;
+
+ struct coded_picture *dummy = NULL;
+ for(i = 0; i < sps_count; i++) {
+ uint16_t sps_size = read_bits(&bufr, 16);
+ inbuf += 2;
+ inbuf_len -= 2;
+ parse_nal(inbuf, sps_size, parser, &dummy);
+ inbuf += sps_size;
+ inbuf_len -= sps_size;
+ }
+
+ bufr.buf = inbuf;
+ bufr.cur_pos = inbuf;
+ bufr.cur_offset = 8;
+ bufr.len = inbuf_len;
+
+ uint8_t pps_count = read_bits(&bufr, 8);
+ inbuf += 1;
+ for(i = 0; i < pps_count; i++) {
+ uint16_t pps_size = read_bits(&bufr, 16);
+ inbuf += 2;
+ inbuf_len -= 2;
+ parse_nal(inbuf, pps_size, parser, &dummy);
+ inbuf += pps_size;
+ inbuf_len -= pps_size;
+ }
+
+ nal_buffer_append(parser->sps_buffer, nal);
+}
+
+void process_mmc_operations(struct h264_parser *parser, struct coded_picture *picture)
+{
+ if (picture->flag_mask & REFERENCE) {
+ parser->prev_pic_order_cnt_lsb
+ = picture->slc_nal->slc.pic_order_cnt_lsb;
+ }
+
+ int i;
+ for(i = 0; i < picture->slc_nal->slc.
+ dec_ref_pic_marking_count; i++) {
+ execute_ref_pic_marking(
+ picture,
+ picture->slc_nal->slc.dec_ref_pic_marking[i].
+ memory_management_control_operation,
+ i,
+ parser);
+ }
+}
+
+int parse_frame(struct h264_parser *parser, uint8_t *inbuf, int inbuf_len,
+ int64_t pts,
+ uint8_t **ret_buf, uint32_t *ret_len, struct coded_picture **ret_pic)
+{
+ int32_t next_nal = 0;
+ int32_t offset = 0;
+ int start_seq_len = 3;
+
+ *ret_pic = NULL;
+ *ret_buf = NULL;
+ *ret_len = 0;
+
+ if(parser->nal_size_length > 0)
+ start_seq_len = offset = parser->nal_size_length;
+
+ if (parser->prebuf_len + inbuf_len > MAX_FRAME_SIZE) {
+ xprintf(parser->xine, XINE_VERBOSITY_LOG,"h264_parser: prebuf underrun\n");
+ *ret_len = 0;
+ *ret_buf = NULL;
+ parser->prebuf_len = 0;
+ return inbuf_len;
+ }
+
+ /* copy the whole inbuf to the prebuf,
+ * then search for a nal-start sequence in the prebuf,
+ * if it's in there, parse the nal and append to parser->buf
+ * or return a frame */
+
+ xine_fast_memcpy(parser->prebuf + parser->prebuf_len, inbuf, inbuf_len);
+ parser->prebuf_len += inbuf_len;
+
+ while((next_nal = seek_for_nal(parser->prebuf+start_seq_len-offset, parser->prebuf_len-start_seq_len+offset, parser)) > 0) {
+
+ struct coded_picture *completed_pic = NULL;
+
+ if(!parser->nal_size_length &&
+ (parser->prebuf[0] != 0x00 || parser->prebuf[1] != 0x00 ||
+ parser->prebuf[2] != 0x01)) {
+ xprintf(parser->xine, XINE_VERBOSITY_LOG, "Broken NAL, skip it.\n");
+ parser->last_nal_res = 2;
+ } else {
+ parser->last_nal_res = parse_nal(parser->prebuf+start_seq_len,
+ next_nal, parser, &completed_pic);
+ }
+
+ if (completed_pic != NULL &&
+ completed_pic->slice_cnt > 0 &&
+ parser->buf_len > 0) {
+
+ //lprintf("Frame complete: %d bytes\n", parser->buf_len);
+ *ret_len = parser->buf_len;
+ *ret_buf = malloc(parser->buf_len);
+ xine_fast_memcpy(*ret_buf, parser->buf, parser->buf_len);
+
+ *ret_pic = completed_pic;
+
+ parser->buf_len = 0;
+
+ if (pts != 0 && (parser->pic->pts == 0 || parser->pic->pts != pts)) {
+ parser->pic->pts = pts;
+ }
+
+ /**
+ * if the new coded picture started with a VCL nal
+ * we have to copy this to buffer for the next picture
+ * now.
+ */
+ if(parser->last_nal_res == 1) {
+ if(parser->nal_size_length > 0) {
+ static const uint8_t start_seq[3] = { 0x00, 0x00, 0x01 };
+ xine_fast_memcpy(parser->buf, start_seq, 3);
+ parser->buf_len += 3;
+ }
+
+ xine_fast_memcpy(parser->buf+parser->buf_len, parser->prebuf+offset, next_nal+start_seq_len-2*offset);
+ parser->buf_len += next_nal+start_seq_len-2*offset;
+ }
+
+ memmove(parser->prebuf, parser->prebuf+(next_nal+start_seq_len-offset), parser->prebuf_len-(next_nal+start_seq_len-offset));
+ parser->prebuf_len -= next_nal+start_seq_len-offset;
+
+ return inbuf_len;
+ }
+
+ /* got a new nal, which is part of the current
+ * coded picture. add it to buf
+ */
+ if (parser->last_nal_res < 2) {
+ if (parser->buf_len + next_nal+start_seq_len-offset > MAX_FRAME_SIZE) {
+ xprintf(parser->xine, XINE_VERBOSITY_LOG, "h264_parser: buf underrun!\n");
+ parser->buf_len = 0;
+ *ret_len = 0;
+ *ret_buf = NULL;
+ return inbuf_len;
+ }
+
+ if(parser->nal_size_length > 0) {
+ static const uint8_t start_seq[3] = { 0x00, 0x00, 0x01 };
+ xine_fast_memcpy(parser->buf+parser->buf_len, start_seq, 3);
+ parser->buf_len += 3;
+ }
+
+ xine_fast_memcpy(parser->buf+parser->buf_len, parser->prebuf+offset, next_nal+start_seq_len-2*offset);
+ parser->buf_len += next_nal+start_seq_len-2*offset;
+
+ memmove(parser->prebuf, parser->prebuf+(next_nal+start_seq_len-offset), parser->prebuf_len-(next_nal+start_seq_len-offset));
+ parser->prebuf_len -= next_nal+start_seq_len-offset;
+ } else {
+ /* got a non-relevant nal, just remove it */
+ memmove(parser->prebuf, parser->prebuf+(next_nal+start_seq_len-offset), parser->prebuf_len-(next_nal+start_seq_len-offset));
+ parser->prebuf_len -= next_nal+start_seq_len-offset;
+ }
+ }
+
+ if (pts != 0 && (parser->pic->pts == 0 || parser->pic->pts != pts)) {
+ parser->pic->pts = pts;
+ }
+
+ *ret_buf = NULL;
+ *ret_len = 0;
+ return inbuf_len;
+}
+
+
+/**
+ * @return 0: NAL is part of coded picture
+ * 2: NAL is not part of coded picture
+ * 1: NAL is the beginning of a new coded picture
+ * 3: NAL is marked as END_OF_SEQUENCE
+ */
+int parse_nal(uint8_t *buf, int buf_len, struct h264_parser *parser,
+ struct coded_picture **completed_picture)
+{
+ int ret = 0;
+
+ struct buf_reader bufr;
+
+ bufr.buf = buf;
+ bufr.cur_pos = buf;
+ bufr.cur_offset = 8;
+ bufr.len = buf_len;
+
+ *completed_picture = NULL;
+
+ struct nal_unit *nal = parse_nal_header(&bufr, parser->pic, parser);
+
+ /**
+ * we detect the start of a new access unit if
+ * a non-vcl nal unit is received after a vcl
+ * nal unit
+ * NAL_END_OF_SEQUENCE terminates the current
+ * access unit
+ */
+ if (nal->nal_unit_type >= NAL_SLICE &&
+ nal->nal_unit_type <= NAL_SLICE_IDR) {
+ parser->position = VCL;
+ } else if ((parser->position == VCL &&
+ nal->nal_unit_type >= NAL_SEI &&
+ nal->nal_unit_type <= NAL_PPS) ||
+ nal->nal_unit_type == NAL_AU_DELIMITER ||
+ nal->nal_unit_type == NAL_END_OF_SEQUENCE) {
+ /* start of a new access unit! */
+ *completed_picture = parser->pic;
+ parser->pic = create_coded_picture();
+
+ if(parser->last_vcl_nal != NULL) {
+ release_nal_unit(parser->last_vcl_nal);
+ parser->last_vcl_nal = NULL;
+ }
+ parser->position = NON_VCL;
+ } else {
+ parser->position = NON_VCL;
+ }
+
+ switch(nal->nal_unit_type) {
+ case NAL_SPS:
+ nal_buffer_append(parser->sps_buffer, nal);
+ break;
+ case NAL_PPS:
+ nal_buffer_append(parser->pps_buffer, nal);
+ break;
+ case NAL_SEI: {
+ if (parser->pic != NULL) {
+ if(parser->pic->sei_nal) {
+ release_nal_unit(parser->pic->sei_nal);
+ }
+ lock_nal_unit(nal);
+ parser->pic->sei_nal = nal;
+ interpret_sei(parser->pic);
+ }
+ }
+ default:
+ break;
+ }
+
+ /**
+ * in case of an access unit which does not contain any
+ * non-vcl nal units we have to detect the new access
+ * unit through the algorithm for detecting first vcl nal
+ * units of a primary coded picture
+ */
+ if (parser->position == VCL && parser->last_vcl_nal != NULL &&
+ nal->nal_unit_type >= NAL_SLICE && nal->nal_unit_type <= NAL_SLICE_IDR) {
+ /**
+ * frame boundary detection according to
+ * ITU-T Rec. H264 (11/2007) chapt 7.4.1.2.4, p65
+ */
+ struct nal_unit* last_nal = parser->last_vcl_nal;
+
+ if (nal == NULL || last_nal == NULL) {
+ ret = 1;
+ } else if (nal->slc.frame_num != last_nal->slc.frame_num) {
+ ret = 1;
+ } else if (nal->slc.pic_parameter_set_id
+ != last_nal->slc.pic_parameter_set_id) {
+ ret = 1;
+ } else if (nal->slc.field_pic_flag
+ != last_nal->slc.field_pic_flag) {
+ ret = 1;
+ } else if (nal->slc.bottom_field_flag
+ != last_nal->slc.bottom_field_flag) {
+ ret = 1;
+ } else if (nal->nal_ref_idc != last_nal->nal_ref_idc &&
+ (nal->nal_ref_idc == 0 || last_nal->nal_ref_idc == 0)) {
+ ret = 1;
+ } else if (nal->sps.pic_order_cnt_type == 0
+ && last_nal->sps.pic_order_cnt_type == 0
+ && (nal->slc.pic_order_cnt_lsb != last_nal->slc.pic_order_cnt_lsb
+ || nal->slc.delta_pic_order_cnt_bottom
+ != last_nal->slc.delta_pic_order_cnt_bottom)) {
+ ret = 1;
+ } else if (nal->sps.pic_order_cnt_type == 1
+ && last_nal->sps.pic_order_cnt_type == 1
+ && (nal->slc.delta_pic_order_cnt[0]
+ != last_nal->slc.delta_pic_order_cnt[0]
+ || nal->slc.delta_pic_order_cnt[1]
+ != last_nal->slc.delta_pic_order_cnt[1])) {
+ ret = 1;
+ } else if (nal->nal_unit_type != last_nal->nal_unit_type && (nal->nal_unit_type
+ == NAL_SLICE_IDR || last_nal->nal_unit_type == NAL_SLICE_IDR)) {
+ ret = 1;
+ } else if (nal->nal_unit_type == NAL_SLICE_IDR
+ && last_nal->nal_unit_type == NAL_SLICE_IDR && nal->slc.idr_pic_id
+ != last_nal->slc.idr_pic_id) {
+ ret = 1;
+ }
+
+ /* increase the slice_cnt until a new frame is detected */
+ if (ret && *completed_picture == NULL) {
+ *completed_picture = parser->pic;
+ parser->pic = create_coded_picture();
+ }
+
+ } else if (nal->nal_unit_type == NAL_PPS || nal->nal_unit_type == NAL_SPS) {
+ ret = 2;
+ } else if (nal->nal_unit_type == NAL_AU_DELIMITER) {
+ ret = 2;
+ } else if (nal->nal_unit_type == NAL_END_OF_SEQUENCE) {
+ ret = 3;
+ } else if (nal->nal_unit_type >= NAL_SEI) {
+ ret = 2;
+ }
+
+ if (parser->pic) {
+
+ if (nal->nal_unit_type == NAL_SLICE_IDR) {
+ parser->pic->flag_mask |= IDR_PIC;
+ }
+
+ /* reference flag is only set for slice NALs,
+ * as PPS/SPS/SEI only references are not relevant
+ * for the vdpau decoder.
+ */
+ if (nal->nal_ref_idc &&
+ nal->nal_unit_type <= NAL_SLICE_IDR) {
+ parser->pic->flag_mask |= REFERENCE;
+ } else if (!nal->nal_ref_idc &&
+ nal->nal_unit_type >= NAL_SLICE &&
+ nal->nal_unit_type <= NAL_PART_C) {
+ /* remove reference flag if a picture is not
+ * continously flagged as reference. */
+ parser->pic->flag_mask &= ~REFERENCE;
+ }
+
+ if (nal->nal_unit_type >= NAL_SLICE &&
+ nal->nal_unit_type <= NAL_SLICE_IDR) {
+ lock_nal_unit(nal);
+ if(parser->last_vcl_nal) {
+ release_nal_unit(parser->last_vcl_nal);
+ }
+ parser->last_vcl_nal = nal;
+
+ parser->pic->slice_cnt++;
+ if(parser->pic->slc_nal) {
+ release_nal_unit(parser->pic->slc_nal);
+ }
+ lock_nal_unit(nal);
+ parser->pic->slc_nal = nal;
+
+ interpret_slice_header(parser, nal);
+ }
+
+ if (*completed_picture != NULL &&
+ (*completed_picture)->slice_cnt > 0) {
+ calculate_pic_order(parser, *completed_picture,
+ &((*completed_picture)->slc_nal->slc));
+ interpret_sps(*completed_picture, parser);
+ interpret_pps(*completed_picture);
+ }
+ }
+
+ release_nal_unit(nal);
+ return ret;
+}
+
+int seek_for_nal(uint8_t *buf, int buf_len, struct h264_parser *parser)
+{
+ if(buf_len <= 0)
+ return -1;
+
+ if(parser->nal_size_length > 0) {
+ if(buf_len < parser->nal_size_length) {
+ return -1;
+ }
+
+ uint32_t next_nal = parser->next_nal_position;
+ if(!next_nal) {
+ struct buf_reader bufr;
+
+ bufr.buf = buf;
+ bufr.cur_pos = buf;
+ bufr.cur_offset = 8;
+ bufr.len = buf_len;
+
+ next_nal = read_bits(&bufr, parser->nal_size_length*8)+parser->nal_size_length;
+ }
+
+ if(next_nal > buf_len) {
+ parser->next_nal_position = next_nal;
+ return -1;
+ } else
+ parser->next_nal_position = 0;
+
+ return next_nal;
+ }
+
+ /* NAL_END_OF_SEQUENCE has only 1 byte, so
+ * we do not need to search for the next start sequence */
+ if(buf[0] == NAL_END_OF_SEQUENCE)
+ return 1;
+
+ int i;
+ for (i = 0; i < buf_len - 2; i++) {
+ if (buf[i] == 0x00 && buf[i + 1] == 0x00 && buf[i + 2] == 0x01) {
+ //lprintf("found nal at: %d\n", i);
+ return i;
+ }
+ }
+
+ return -1;
+}
diff --git a/src/video_dec/libvdpau/h264_parser.h b/src/video_dec/libvdpau/h264_parser.h
new file mode 100644
index 000000000..49bc56bab
--- /dev/null
+++ b/src/video_dec/libvdpau/h264_parser.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * h264_parser.h: Almost full-features H264 NAL-Parser
+ */
+
+#ifndef NAL_PARSER_H_
+#define NAL_PARSER_H_
+
+#include <stdlib.h>
+
+#include <xine/xine_internal.h>
+#include "nal.h"
+#include "dpb.h"
+
+#define MAX_FRAME_SIZE 1024*1024
+
+/* specifies wether the parser last parsed
+ * non-vcl or vcl nal units. depending on
+ * this the access unit boundaries are detected
+ */
+enum parser_position {
+ NON_VCL,
+ VCL
+};
+
+enum parser_flags {
+ CPB_DPB_DELAYS_PRESENT = 0x01,
+ PIC_STRUCT_PRESENT = 0x02
+};
+
+struct h264_parser {
+ uint8_t buf[MAX_FRAME_SIZE];
+ uint32_t buf_len;
+
+ /* prebuf is used to store the currently
+ * processed nal unit */
+ uint8_t prebuf[MAX_FRAME_SIZE];
+ uint32_t prebuf_len;
+ uint32_t next_nal_position;
+
+ uint8_t last_nal_res;
+
+ uint8_t nal_size_length;
+ uint32_t next_nal_size;
+ uint8_t *nal_size_length_buf;
+ uint8_t have_nal_size_length_buf;
+
+ enum parser_position position;
+
+ struct coded_picture *pic;
+
+ struct nal_unit *last_vcl_nal;
+ struct nal_buffer *sps_buffer;
+ struct nal_buffer *pps_buffer;
+
+ uint32_t prev_pic_order_cnt_lsb;
+ uint32_t prev_pic_order_cnt_msb;
+ uint32_t frame_num_offset;
+
+ int32_t prev_top_field_order_cnt;
+
+ uint32_t curr_pic_num;
+
+ uint16_t flag_mask;
+
+ /* this is dpb used for reference frame
+ * heading to vdpau + unordered frames
+ */
+ struct dpb *dpb;
+
+ xine_t *xine;
+};
+
+int parse_nal(uint8_t *buf, int buf_len, struct h264_parser *parser,
+ struct coded_picture **completed_picture);
+
+int seek_for_nal(uint8_t *buf, int buf_len, struct h264_parser *parser);
+
+struct h264_parser* init_parser(xine_t *xine);
+void reset_parser(struct h264_parser *parser);
+void free_parser(struct h264_parser *parser);
+int parse_frame(struct h264_parser *parser, uint8_t *inbuf, int inbuf_len,
+ int64_t pts,
+ uint8_t **ret_buf, uint32_t *ret_len, struct coded_picture **ret_pic);
+
+/* this has to be called after decoding the frame delivered by parse_frame,
+ * but before adding a decoded frame to the dpb.
+ */
+void process_mmc_operations(struct h264_parser *parser, struct coded_picture *picture);
+
+void parse_codec_private(struct h264_parser *parser, uint8_t *inbuf, int inbuf_len);
+
+#endif
diff --git a/src/video_dec/libvdpau/nal.c b/src/video_dec/libvdpau/nal.c
new file mode 100644
index 000000000..c3693c5f2
--- /dev/null
+++ b/src/video_dec/libvdpau/nal.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * nal.c: nal-structure utility functions
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "nal.h"
+#include <xine/xine_internal.h>
+
+struct nal_buffer* create_nal_buffer(uint8_t max_size)
+{
+ struct nal_buffer *nal_buffer = calloc(1, sizeof(struct nal_buffer));
+ nal_buffer->max_size = max_size;
+
+ return nal_buffer;
+}
+
+/**
+ * destroys a nal buffer. all referenced nals are released
+ */
+void free_nal_buffer(struct nal_buffer *nal_buffer)
+{
+ struct nal_unit *nal = nal_buffer->first;
+
+ while (nal) {
+ struct nal_unit *delete = nal;
+ nal = nal->next;
+ release_nal_unit(delete);
+ }
+
+ free(nal_buffer);
+}
+
+/**
+ * appends a nal unit to the end of the buffer
+ */
+void nal_buffer_append(struct nal_buffer *nal_buffer, struct nal_unit *nal)
+{
+ if(nal_buffer->used == nal_buffer->max_size) {
+ nal_buffer_remove(nal_buffer, nal_buffer->first);
+ }
+
+ if (nal_buffer->first == NULL) {
+ nal_buffer->first = nal_buffer->last = nal;
+ nal->prev = nal->next = NULL;
+
+ lock_nal_unit(nal);
+ nal_buffer->used++;
+ } else if (nal_buffer->last != NULL) {
+ nal_buffer->last->next = nal;
+ nal->prev = nal_buffer->last;
+ nal_buffer->last = nal;
+
+ lock_nal_unit(nal);
+ nal_buffer->used++;
+ } else {
+ lprintf("ERR: nal_buffer is in a broken state\n");
+ }
+}
+
+void nal_buffer_remove(struct nal_buffer *nal_buffer, struct nal_unit *nal)
+{
+ if (nal == nal_buffer->first && nal == nal_buffer->last) {
+ nal_buffer->first = nal_buffer->last = NULL;
+ } else {
+ if (nal == nal_buffer->first) {
+ nal_buffer->first = nal->next;
+ nal_buffer->first->prev = NULL;
+ } else {
+ nal->prev->next = nal->next;
+ }
+
+ if (nal == nal_buffer->last) {
+ nal_buffer->last = nal->prev;
+ nal_buffer->last->next = NULL;
+ } else {
+ nal->next->prev = nal->prev;
+ }
+ }
+
+ nal->next = nal->prev = NULL;
+ release_nal_unit(nal);
+
+ nal_buffer->used--;
+}
+
+void nal_buffer_flush(struct nal_buffer *nal_buffer)
+{
+ while(nal_buffer->used > 0) {
+ nal_buffer_remove(nal_buffer, nal_buffer->first);
+ }
+}
+
+/**
+ * returns the last element in the buffer
+ */
+struct nal_unit *nal_buffer_get_last(struct nal_buffer *nal_buffer)
+{
+ return nal_buffer->last;
+}
+
+/**
+ * get a nal unit from a nal_buffer from it's
+ * seq parameter_set_id
+ */
+struct nal_unit* nal_buffer_get_by_sps_id(struct nal_buffer *nal_buffer,
+ uint32_t seq_parameter_set_id)
+{
+ struct nal_unit *nal = nal_buffer->last;
+
+ if (nal != NULL) {
+ do {
+ if(nal->nal_unit_type == NAL_SPS) {
+ if(nal->sps.seq_parameter_set_id == seq_parameter_set_id) {
+ return nal;
+ }
+ }
+
+ nal = nal->prev;
+ } while(nal != NULL);
+ }
+
+ return NULL;
+}
+
+/**
+ * get a nal unit from a nal_buffer from it's
+ * pic parameter_set_id
+ */
+struct nal_unit* nal_buffer_get_by_pps_id(struct nal_buffer *nal_buffer,
+ uint32_t pic_parameter_set_id)
+{
+ struct nal_unit *nal = nal_buffer->last;
+
+ if (nal != NULL) {
+ do {
+ if(nal->nal_unit_type == NAL_PPS) {
+ if(nal->pps.pic_parameter_set_id == pic_parameter_set_id) {
+ return nal;
+ }
+ }
+
+ nal = nal->prev;
+ } while(nal != NULL);
+ }
+
+ return NULL;
+}
+
+/**
+ * create a new nal unit, with a lock_counter of 1
+ */
+struct nal_unit* create_nal_unit()
+{
+ struct nal_unit *nal = calloc(1, sizeof(struct nal_unit));
+ nal->lock_counter = 1;
+
+ return nal;
+}
+
+void lock_nal_unit(struct nal_unit *nal)
+{
+ nal->lock_counter++;
+}
+
+void release_nal_unit(struct nal_unit *nal)
+{
+ if(!nal)
+ return;
+
+ nal->lock_counter--;
+
+ if(nal->lock_counter <= 0) {
+ free(nal);
+ }
+}
+
+/**
+ * creates a copy of a nal unit with a single lock
+ */
+void copy_nal_unit(struct nal_unit *dest, struct nal_unit *src)
+{
+ /* size without pps, sps and slc units: */
+ int size = sizeof(struct nal_unit);
+
+ xine_fast_memcpy(dest, src, size);
+ dest->lock_counter = 1;
+ dest->prev = dest->next = NULL;
+}
diff --git a/src/video_dec/libvdpau/nal.h b/src/video_dec/libvdpau/nal.h
new file mode 100644
index 000000000..f40617cd0
--- /dev/null
+++ b/src/video_dec/libvdpau/nal.h
@@ -0,0 +1,501 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * nal.h: H264 NAL structures
+ */
+
+#ifndef NAL_H_
+#define NAL_H_
+#include <stdint.h>
+#include <vdpau/vdpau.h>
+
+enum nal_unit_types
+{
+ NAL_UNSPECIFIED = 0,
+ NAL_SLICE,
+ NAL_PART_A,
+ NAL_PART_B,
+ NAL_PART_C,
+ NAL_SLICE_IDR,
+ NAL_SEI,
+ NAL_SPS,
+ NAL_PPS,
+ NAL_AU_DELIMITER,
+ NAL_END_OF_SEQUENCE,
+ NAL_END_OF_STREAM,
+ NAL_FILLER_DATA,
+ NAL_SPS_EXT
+};
+
+enum pic_struct {
+ DISP_FRAME = 0,
+ DISP_TOP,
+ DISP_BOTTOM,
+ DISP_TOP_BOTTOM,
+ DISP_BOTTOM_TOP,
+ DISP_TOP_BOTTOM_TOP,
+ DISP_BOTTOM_TOP_BOTTOM,
+ DISP_FRAME_DOUBLING,
+ DISP_FRAME_TRIPLING
+};
+
+enum ct_type {
+ CT_PROGRESSIVE = 0,
+ CT_INTERLACED,
+ CT_UNKNOWN,
+ CT_RESERVED
+};
+
+/* slice types repeat from 5-9, we
+ * need a helper function for comparison
+ */
+enum slice_types
+{
+ SLICE_P = 0, SLICE_B, SLICE_I, SLICE_SP, SLICE_SI
+};
+
+enum aspect_ratio
+{
+ ASPECT_UNSPECIFIED = 0,
+ ASPECT_1_1,
+ ASPECT_12_11,
+ ASPECT_10_11,
+ ASPECT_16_11,
+ ASPECT_40_33,
+ ASPECT_24_11,
+ ASPECT_20_11,
+ ASPECT_32_11,
+ ASPECT_80_33,
+ ASPECT_18_11,
+ ASPECT_15_11,
+ ASPECT_64_33,
+ ASPECT_160_99,
+ ASPECT_4_3,
+ ASPECT_3_2,
+ ASPECT_2_1,
+ ASPECT_RESERVED,
+ ASPECT_EXTENDED_SAR=255
+};
+
+static const uint8_t zigzag_4x4[16] = {
+ 0+0*4, 1+0*4, 0+1*4, 0+2*4,
+ 1+1*4, 2+0*4, 3+0*4, 2+1*4,
+ 1+2*4, 0+3*4, 1+3*4, 2+2*4,
+ 3+1*4, 3+2*4, 2+3*4, 3+3*4,
+};
+
+static const uint8_t zigzag_8x8[64] = {
+ 0+0*8, 1+0*8, 0+1*8, 0+2*8,
+ 1+1*8, 2+0*8, 3+0*8, 2+1*8,
+ 1+2*8, 0+3*8, 0+4*8, 1+3*8,
+ 2+2*8, 3+1*8, 4+0*8, 5+0*8,
+ 4+1*8, 3+2*8, 2+3*8, 1+4*8,
+ 0+5*8, 0+6*8, 1+5*8, 2+4*8,
+ 3+3*8, 4+2*8, 5+1*8, 6+0*8,
+ 7+0*8, 6+1*8, 5+2*8, 4+3*8,
+ 3+4*8, 2+5*8, 1+6*8, 0+7*8,
+ 1+7*8, 2+6*8, 3+5*8, 4+4*8,
+ 5+3*8, 6+2*8, 7+1*8, 7+2*8,
+ 6+3*8, 5+4*8, 4+5*8, 3+6*8,
+ 2+7*8, 3+7*8, 4+6*8, 5+5*8,
+ 6+4*8, 7+3*8, 7+4*8, 6+5*8,
+ 5+6*8, 4+7*8, 5+7*8, 6+6*8,
+ 7+5*8, 7+6*8, 6+7*8, 7+7*8,
+};
+
+static inline uint32_t slice_type(uint32_t slice_type)
+{
+ return (slice_type < 10 ? slice_type % 5 : slice_type);
+}
+
+#if 0
+static inline void print_slice_type(uint32_t slice_type)
+{
+ switch(slice_type) {
+ case SLICE_P:
+ printf("SLICE_P\n");
+ break;
+ case SLICE_B:
+ printf("SLICE_B\n");
+ break;
+ case SLICE_I:
+ printf("SLICE_I\n");
+ break;
+ case SLICE_SP:
+ printf("SLICE_SP\n");
+ break;
+ case SLICE_SI:
+ printf("SLICE_SI\n");
+ break;
+ default:
+ printf("Unknown SLICE\n");
+ }
+}
+#endif
+
+struct hrd_parameters
+{
+ uint32_t cpb_cnt_minus1;
+ uint8_t bit_rate_scale;
+ uint8_t cpb_size_scale;
+
+ uint32_t bit_rate_value_minus1[32];
+ uint32_t cpb_size_value_minus1[32];
+ uint8_t cbr_flag[32];
+
+ uint8_t initial_cpb_removal_delay_length_minus1;
+ uint8_t cpb_removal_delay_length_minus1;
+ uint8_t dpb_output_delay_length_minus1;
+ uint8_t time_offset_length;
+};
+
+struct seq_parameter_set_rbsp
+{
+ uint8_t profile_idc; // 0xff
+ uint8_t constraint_setN_flag; // 0x0f
+ uint8_t level_idc; // 0xff
+ uint32_t seq_parameter_set_id;
+ uint32_t chroma_format_idc;
+ uint8_t separate_colour_plane_flag; // 0x01
+ uint32_t bit_depth_luma_minus8;
+ uint32_t bit_depth_chroma_minus8;
+ uint8_t qpprime_y_zero_transform_bypass_flag;
+ uint8_t seq_scaling_matrix_present_flag;
+
+ /* if(seq_scaling_matrix_present_flag) */
+ uint8_t seq_scaling_list_present_flag[8];
+
+ uint8_t scaling_lists_4x4[6][16];
+ uint8_t scaling_lists_8x8[2][64];
+ /* endif */
+
+ uint32_t log2_max_frame_num_minus4;
+ uint32_t max_frame_num;
+ uint32_t pic_order_cnt_type;
+ // if pic_order_cnt_type==0
+ uint32_t log2_max_pic_order_cnt_lsb_minus4;
+ // else
+ uint8_t delta_pic_order_always_zero_flag;
+ int32_t offset_for_non_ref_pic;
+ int32_t offset_for_top_to_bottom_field;
+ uint8_t num_ref_frames_in_pic_order_cnt_cycle;
+ int32_t offset_for_ref_frame[256];
+ // TODO: some more ignored here
+ uint32_t num_ref_frames;
+ uint8_t gaps_in_frame_num_value_allowed_flag;
+ /*uint32_t pic_width_in_mbs_minus1;
+ uint32_t pic_height_in_map_units_minus1;*/
+ uint32_t pic_width;
+ uint32_t pic_height;
+ uint8_t frame_mbs_only_flag;
+ uint8_t mb_adaptive_frame_field_flag;
+ uint8_t direct_8x8_inference_flag;
+ uint8_t frame_cropping_flag;
+ uint32_t frame_crop_left_offset;
+ uint32_t frame_crop_right_offset;
+ uint32_t frame_crop_top_offset;
+ uint32_t frame_crop_bottom_offset;
+ uint8_t vui_parameters_present_flag;
+
+ /* vui_parameters */
+ struct
+ {
+ uint8_t aspect_ration_info_present_flag;
+
+ /* aspect_ration_info_present_flag == 1 */
+ uint8_t aspect_ratio_idc;
+ uint16_t sar_width;
+ uint16_t sar_height;
+
+ uint8_t overscan_info_present_flag;
+ /* overscan_info_present_flag == 1 */
+ uint8_t overscan_appropriate_flag;
+
+ uint8_t video_signal_type_present_flag;
+ /* video_signal_type_present_flag == 1 */
+ uint8_t video_format;
+ uint8_t video_full_range_flag;
+ uint8_t colour_description_present;
+ /* colour_description_present == 1 */
+ uint8_t colour_primaries;
+ uint8_t transfer_characteristics;
+ uint8_t matrix_coefficients;
+
+ uint8_t chroma_loc_info_present_flag;
+ /* chroma_loc_info_present_flag == 1 */
+ uint8_t chroma_sample_loc_type_top_field;
+ uint8_t chroma_sample_loc_type_bottom_field;
+
+ uint8_t timing_info_present_flag;
+ /* timing_info_present_flag == 1 */
+ uint32_t num_units_in_tick;
+ uint32_t time_scale;
+ uint8_t fixed_frame_rate_flag;
+
+ uint8_t nal_hrd_parameters_present_flag;
+ struct hrd_parameters nal_hrd_parameters;
+
+ uint8_t vc1_hrd_parameters_present_flag;
+ struct hrd_parameters vc1_hrd_parameters;
+
+ uint8_t low_delay_hrd_flag;
+
+ uint8_t pic_struct_present_flag;
+ uint8_t bitstream_restriction_flag;
+
+ /* bitstream_restriction_flag == 1 */
+ uint8_t motion_vectors_over_pic_boundaries;
+ uint32_t max_bytes_per_pic_denom;
+ uint32_t max_bits_per_mb_denom;
+ uint32_t log2_max_mv_length_horizontal;
+ uint32_t log2_max_mv_length_vertical;
+ uint32_t num_reorder_frames;
+ uint32_t max_dec_frame_buffering;
+ } vui_parameters;
+
+};
+
+struct pic_parameter_set_rbsp
+{
+ uint32_t pic_parameter_set_id;
+ uint32_t seq_parameter_set_id;
+ uint8_t entropy_coding_mode_flag;
+ uint8_t pic_order_present_flag;
+
+ uint32_t num_slice_groups_minus1;
+
+ /* num_slice_groups_minus1 > 0 */
+ uint32_t slice_group_map_type;
+
+ /* slice_group_map_type == 1 */
+ uint32_t run_length_minus1[64];
+
+ /* slice_group_map_type == 2 */
+ uint32_t top_left[64];
+ uint32_t bottom_right[64];
+
+ /* slice_group_map_type == 3,4,5 */
+ uint8_t slice_group_change_direction_flag;
+ uint32_t slice_group_change_rate_minus1;
+
+ /* slice_group_map_type == 6 */
+ uint32_t pic_size_in_map_units_minus1;
+ uint8_t slice_group_id[64];
+
+ uint32_t num_ref_idx_l0_active_minus1;
+ uint32_t num_ref_idx_l1_active_minus1;
+ uint8_t weighted_pred_flag;
+ uint8_t weighted_bipred_idc;
+ int32_t pic_init_qp_minus26;
+ int32_t pic_init_qs_minus26;
+ int32_t chroma_qp_index_offset;
+ uint8_t deblocking_filter_control_present_flag;
+ uint8_t constrained_intra_pred_flag;
+ uint8_t redundant_pic_cnt_present_flag;
+
+ /* if(more_rbsp_data) */
+ uint8_t transform_8x8_mode_flag;
+ uint8_t pic_scaling_matrix_present_flag;
+
+ /* if(pic_scaling_matrix_present_flag) */
+ uint8_t pic_scaling_list_present_flag[8];
+
+ uint8_t scaling_lists_4x4[6][16];
+ uint8_t scaling_lists_8x8[2][64];
+
+ int32_t second_chroma_qp_index_offset;
+};
+
+/*struct clock_timestamp {
+ uint8_t ct_type;
+ uint8_t nuit_fiel_based_flag;
+ uint8_t counting_type;
+ uint8_t full_timestamp_flag;
+ uint8_t discontinuity_flag;
+ uint8_t cnt_dropped_flag;
+ uint8_t n_frames
+};*/
+
+/* sei contains several additional info, we do
+ * only care for pic_timing, to handle display
+ * reordering
+ */
+struct sei_message
+{
+ uint32_t payload_type;
+ uint8_t last_payload_type_byte;
+ uint32_t payload_size;
+ uint8_t last_payload_size_byte;
+
+ struct
+ {
+ /* cpb_dpb_delays_present_flag == 1 */
+ uint8_t cpb_removal_delay;
+ uint8_t dpb_output_delay;
+
+ uint8_t pic_struct;
+ uint8_t ct_type : 1;
+ uint8_t nuit_field_based_flag : 1;
+ uint8_t counting_type : 5;
+ uint8_t full_timestamp_flag : 1;
+ uint8_t discontinuity_flag : 1;
+ uint8_t cnt_dropped_flag : 1;
+ uint8_t n_frames;
+
+ uint8_t seconds_value : 6;
+ uint8_t minutes_value : 6;
+ uint8_t hours_value : 5;
+
+ int32_t time_offset;
+ } pic_timing;
+};
+
+struct slice_header
+{
+ uint32_t first_mb_in_slice;
+ uint32_t slice_type;
+ uint32_t pic_parameter_set_id;
+ uint8_t colour_plane_id;
+ uint32_t frame_num;
+ uint8_t field_pic_flag;
+ uint8_t bottom_field_flag;
+ uint32_t idr_pic_id;
+
+ /* sps->pic_order_cnt_type == 0 */
+ uint32_t pic_order_cnt_lsb;
+ int32_t delta_pic_order_cnt_bottom;
+ /* sps->pic_order_cnt_type == 1 && !sps->delta_pic_order_always_zero_flag */
+ int32_t delta_pic_order_cnt[2];
+
+ /* pps->redundant_pic_cnt_present_flag == 1 */
+ int32_t redundant_pic_cnt;
+
+ /* slice_type == B */
+ uint8_t direct_spatial_mv_pred_flag;
+
+ /* slice_type == P, SP, B */
+ uint8_t num_ref_idx_active_override_flag;
+ /* num_ref_idx_active_override_flag == 1 */
+ uint32_t num_ref_idx_l0_active_minus1;
+ /* slice type == B */
+ uint32_t num_ref_idx_l1_active_minus1;
+
+ /* ref_pic_list_reordering */
+ struct
+ {
+ /* slice_type != I && slice_type != SI */
+ uint8_t ref_pic_list_reordering_flag_l0;
+
+ /* slice_type == B */
+ uint8_t ref_pic_list_reordering_flag_l1;
+
+ /* ref_pic_list_reordering_flag_l0 == 1 */
+ uint32_t reordering_of_pic_nums_idc;
+
+ /* reordering_of_pic_nums_idc == 0, 1 */
+ uint32_t abs_diff_pic_num_minus1;
+
+ /* reordering_of_pic_nums_idc == 2) */
+ uint32_t long_term_pic_num;
+ } ref_pic_list_reordering;
+
+ /* pred_weight_table */
+ struct
+ {
+ uint32_t luma_log2_weight_denom;
+
+ /* chroma_format_idc != 0 */
+ uint32_t chroma_log2_weight_denom;
+
+ int32_t luma_weight_l0[32];
+ int32_t luma_offset_l0[32];
+
+ int32_t chroma_weight_l0[32][2];
+ int32_t chroma_offset_l0[32][2];
+
+ int32_t luma_weight_l1[32];
+ int32_t luma_offset_l1[32];
+
+ int32_t chroma_weight_l1[32][2];
+ int32_t chroma_offset_l1[32][2];
+ } pred_weight_table;
+
+ /* def_rec_pic_marking */
+ struct
+ {
+
+ /* nal_unit_type == NAL_SLICE_IDR */
+ uint8_t no_output_of_prior_pics_flag;
+ uint8_t long_term_reference_flag;
+
+ /* else */
+ uint8_t adaptive_ref_pic_marking_mode_flag;
+ uint32_t memory_management_control_operation;
+
+ uint32_t difference_of_pic_nums_minus1;
+ uint32_t long_term_pic_num;
+ uint32_t long_term_frame_idx;
+ uint32_t max_long_term_frame_idx_plus1;
+ } dec_ref_pic_marking[10];
+ uint32_t dec_ref_pic_marking_count;
+};
+
+struct nal_unit {
+ uint8_t nal_ref_idc; // 0x03
+ enum nal_unit_types nal_unit_type; // 0x1f
+
+ //union {
+ struct sei_message sei;
+ struct seq_parameter_set_rbsp sps;
+ struct pic_parameter_set_rbsp pps;
+ struct slice_header slc;
+ //};
+
+ struct nal_unit *prev;
+ struct nal_unit *next;
+
+ uint32_t lock_counter;
+};
+
+struct nal_buffer {
+ struct nal_unit *first;
+ struct nal_unit *last;
+
+ uint8_t max_size;
+ uint8_t used;
+};
+
+struct nal_buffer* create_nal_buffer(uint8_t max_size);
+void free_nal_buffer(struct nal_buffer *nal_buffer);
+void nal_buffer_append(struct nal_buffer *nal_buffer, struct nal_unit *nal);
+void nal_buffer_remove(struct nal_buffer *nal_buffer, struct nal_unit *nal);
+void nal_buffer_flush(struct nal_buffer *nal_buffer);
+
+struct nal_unit* nal_buffer_get_by_sps_id(struct nal_buffer *nal_buffer,
+ uint32_t seq_parameter_set_id);
+struct nal_unit* nal_buffer_get_by_pps_id(struct nal_buffer *nal_buffer,
+ uint32_t pic_parameter_set_id);
+struct nal_unit* nal_buffer_get_last(struct nal_buffer *nal_buffer);
+
+struct nal_unit* create_nal_unit(void);
+void lock_nal_unit(struct nal_unit *nal);
+void release_nal_unit(struct nal_unit *nal);
+void copy_nal_unit(struct nal_unit *dest, struct nal_unit *src);
+
+#endif /* NAL_H_ */
diff --git a/src/video_dec/libvdpau/vdpau_h264.c b/src/video_dec/libvdpau/vdpau_h264.c
new file mode 100644
index 000000000..25ed62295
--- /dev/null
+++ b/src/video_dec/libvdpau/vdpau_h264.c
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (C) 2008 Julian Scheel
+ *
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; remove-trailing-space on;
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * vdpau_h264.c: H264 Video Decoder utilizing nvidia VDPAU engine
+ */
+
+#define LOG_MODULE "vdpau_h264"
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <vdpau/vdpau.h>
+
+#include <xine/xine_internal.h>
+#include <xine/video_out.h>
+#include <xine/buffer.h>
+#include <xine/xineutils.h>
+#include "bswap.h"
+#include "accel_vdpau.h"
+#include "h264_parser.h"
+#include "dpb.h"
+#include "cpb.h"
+
+//#define DEBUG_H264
+
+#define VIDEOBUFSIZE 128*1024
+
+typedef struct {
+ video_decoder_class_t decoder_class;
+} vdpau_h264_class_t;
+
+typedef struct vdpau_h264_decoder_s {
+ video_decoder_t video_decoder; /* parent video decoder structure */
+
+ vdpau_h264_class_t *class;
+ xine_stream_t *stream;
+
+ /* these are traditional variables in a video decoder object */
+ uint64_t video_step; /* frame duration in pts units */
+ uint64_t reported_video_step; /* frame duration in pts units */
+
+ int width; /* the width of a video frame */
+ int height; /* the height of a video frame */
+ double ratio; /* the width to height ratio */
+
+
+ struct h264_parser *nal_parser; /* h264 nal parser. extracts stream data for vdpau */
+
+ struct decoded_picture *incomplete_pic;
+ uint32_t last_top_field_order_cnt;
+
+ int have_frame_boundary_marks;
+ int wait_for_frame_start;
+
+ VdpDecoder decoder;
+ int decoder_started;
+ int progressive_cnt; /* count of progressive marked frames in line */
+
+ VdpColorStandard color_standard;
+ VdpDecoderProfile profile;
+ vdpau_accel_t *vdpau_accel;
+
+ xine_t *xine;
+
+ struct coded_picture *completed_pic;
+ vo_frame_t *dangling_img;
+
+ uint8_t *codec_private;
+ uint32_t codec_private_len;
+
+ int vdp_runtime_nr;
+
+ int reset;
+
+} vdpau_h264_decoder_t;
+
+static void vdpau_h264_reset (video_decoder_t *this_gen);
+static void vdpau_h264_flush (video_decoder_t *this_gen);
+
+/**************************************************************************
+ * vdpau_h264 specific decode functions
+ *************************************************************************/
+
+/**************************************************************************
+ * xine video plugin functions
+ *************************************************************************/
+
+#ifdef DEBUG_H264
+static inline void dump_pictureinfo_h264(VdpPictureInfoH264 *pic)
+{
+ printf("C: slice_count: %d\n", pic->slice_count);
+ printf("C: field_order_cnt[0]: %d\n", pic->field_order_cnt[0]);
+ printf("C: field_order_cnt[1]: %d\n", pic->field_order_cnt[1]);
+ printf("C: is_reference: %d\n", pic->is_reference);
+ printf("C: frame_num: %d\n", pic->frame_num);
+ printf("C: field_pic_flag: %d\n", pic->field_pic_flag);
+ printf("C: bottom_field_flag: %d\n", pic->bottom_field_flag);
+ printf("C: num_ref_frames: %d\n", pic->num_ref_frames);
+ printf("C: mb_adaptive_frame_field_flag: %d\n", pic->mb_adaptive_frame_field_flag);
+ printf("C: constrained_intra_pred_flag: %d\n", pic->constrained_intra_pred_flag);
+ printf("C: weighted_pred_flag: %d\n", pic->weighted_pred_flag);
+ printf("C: weighted_bipred_idc: %d\n", pic->weighted_bipred_idc);
+ printf("C: frame_mbs_only_flag: %d\n", pic->frame_mbs_only_flag);
+ printf("C: transform_8x8_mode_flag: %d\n", pic->transform_8x8_mode_flag);
+ printf("C: chroma_qp_index_offset: %d\n", pic->chroma_qp_index_offset);
+ printf("C: second_chroma_qp_index_offset: %d\n", pic->second_chroma_qp_index_offset);
+ printf("C: pic_init_qp_minus26: %d\n", pic->pic_init_qp_minus26);
+ printf("C: num_ref_idx_l0_active_minus1: %d\n", pic->num_ref_idx_l0_active_minus1);
+ printf("C: num_ref_idx_l1_active_minus1: %d\n", pic->num_ref_idx_l1_active_minus1);
+ printf("C: log2_max_frame_num_minus4: %d\n", pic->log2_max_frame_num_minus4);
+ printf("C: pic_order_cnt_type: %d\n", pic->pic_order_cnt_type);
+ printf("C: log2_max_pic_order_cnt_lsb_minus4: %d\n", pic->log2_max_pic_order_cnt_lsb_minus4);
+ printf("C: delta_pic_order_always_zero_flag: %d\n", pic->delta_pic_order_always_zero_flag);
+ printf("C: direct_8x8_inference_flag: %d\n", pic->direct_8x8_inference_flag);
+ printf("C: entropy_coding_mode_flag: %d\n", pic->entropy_coding_mode_flag);
+ printf("C: pic_order_present_flag: %d\n", pic->pic_order_present_flag);
+ printf("C: deblocking_filter_control_present_flag: %d\n", pic->deblocking_filter_control_present_flag);
+ printf("C: redundant_pic_cnt_present_flag: %d\n", pic->redundant_pic_cnt_present_flag);
+
+ int i, j;
+ for(i = 0; i < 6; i++) {
+ printf("C: scalint_list4x4[%d]:\nC:", i);
+ for(j = 0; j < 16; j++) {
+ printf(" [%d]", pic->scaling_lists_4x4[i][j]);
+ if(j%8 == 0)
+ printf("\nC:");
+ }
+ printf("C: \n");
+ }
+ for(i = 0; i < 2; i++) {
+ printf("C: scalint_list8x8[%d]:\nC:", i);
+ for(j = 0; j < 64; j++) {
+ printf(" [%d] ", pic->scaling_lists_8x8[i][j]);
+ if(j%8 == 0)
+ printf("\nC:");
+ }
+ printf("C: \n");
+ }
+
+ //int i;
+ for(i = 0; i < 16; i++) {
+ if(pic->referenceFrames[i].surface != VDP_INVALID_HANDLE) {
+ printf("C: -------------------\n");
+ printf("C: Reference Frame %d:\n", i);
+ printf("C: frame_idx: %d\n", pic->referenceFrames[i].frame_idx);
+ printf("C: field_order_cnt[0]: %d\n", pic->referenceFrames[i].field_order_cnt[0]);
+ printf("C: field_order_cnt[1]: %d\n", pic->referenceFrames[i].field_order_cnt[0]);
+ printf("C: is_long_term: %d\n", pic->referenceFrames[i].is_long_term);
+ printf("C: top_is_reference: %d\n", pic->referenceFrames[i].top_is_reference);
+ printf("C: bottom_is_reference: %d\n", pic->referenceFrames[i].bottom_is_reference);
+ }
+ }
+ printf("C: ---------------------------------------------------------------\n");
+ /*memcpy(pic.scaling_lists_4x4, pps->scaling_lists_4x4, 6*16);
+ memcpy(pic.scaling_lists_8x8, pps->scaling_lists_8x8, 2*64);
+ memcpy(pic.referenceFrames, this->reference_frames, sizeof(this->reference_frames));*/
+
+}
+#endif
+
+static void set_ratio(video_decoder_t *this_gen)
+{
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *)this_gen;
+
+ this->ratio = (double)this->width / (double)this->height;
+ if(this->completed_pic->sps_nal->sps.vui_parameters.aspect_ration_info_present_flag) {
+ switch(this->completed_pic->sps_nal->sps.vui_parameters.aspect_ratio_idc) {
+ case ASPECT_1_1:
+ this->ratio = 1 * this->ratio;
+ break;
+ case ASPECT_12_11:
+ this->ratio *= 12.0/11.0;
+ break;
+ case ASPECT_10_11:
+ this->ratio *= 10.0/11.0;
+ break;
+ case ASPECT_16_11:
+ this->ratio *= 16.0/11.0;
+ break;
+ case ASPECT_40_33:
+ this->ratio *= 40.0/33.0;
+ break;
+ case ASPECT_24_11:
+ this->ratio *= 24.0/11.0;
+ break;
+ case ASPECT_20_11:
+ this->ratio *= 20.0/11.0;
+ break;
+ case ASPECT_32_11:
+ this->ratio *= 32.0/11.0;
+ break;
+ case ASPECT_80_33:
+ this->ratio *= 80.0/33.0;
+ break;
+ case ASPECT_18_11:
+ this->ratio *= 18.0/11.0;
+ break;
+ case ASPECT_15_11:
+ this->ratio *= 15.0/11.0;
+ break;
+ case ASPECT_64_33:
+ this->ratio *= 64.0/33.0;
+ break;
+ case ASPECT_160_99:
+ this->ratio *= 160.0/99.0;
+ break;
+ case ASPECT_4_3:
+ this->ratio *= 4.0/3.0;
+ break;
+ case ASPECT_3_2:
+ this->ratio *= 3.0/2.0;
+ break;
+ case ASPECT_2_1:
+ this->ratio *= 2.0/1.0;
+ break;
+ case ASPECT_EXTENDED_SAR:
+ this->ratio *=
+ (double)this->completed_pic->sps_nal->sps.vui_parameters.sar_width/
+ (double)this->completed_pic->sps_nal->sps.vui_parameters.sar_height;
+ break;
+ }
+ }
+}
+
+static void fill_vdpau_pictureinfo_h264(video_decoder_t *this_gen, uint32_t slice_count, VdpPictureInfoH264 *pic)
+{
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *)this_gen;
+
+ struct pic_parameter_set_rbsp *pps = &this->completed_pic->pps_nal->pps;
+ struct seq_parameter_set_rbsp *sps = &this->completed_pic->sps_nal->sps;
+ struct slice_header *slc = &this->completed_pic->slc_nal->slc;
+
+ pic->slice_count = slice_count;
+ pic->field_order_cnt[0] = this->completed_pic->top_field_order_cnt;
+ pic->field_order_cnt[1] = this->completed_pic->bottom_field_order_cnt;
+ pic->is_reference =
+ (this->completed_pic->flag_mask & REFERENCE) ? VDP_TRUE : VDP_FALSE;
+ pic->frame_num = slc->frame_num;
+ pic->field_pic_flag = slc->field_pic_flag;
+ pic->bottom_field_flag = slc->bottom_field_flag;
+ pic->num_ref_frames = sps->num_ref_frames;
+ pic->mb_adaptive_frame_field_flag = sps->mb_adaptive_frame_field_flag && !slc->field_pic_flag;
+ pic->constrained_intra_pred_flag = pps->constrained_intra_pred_flag;
+ pic->weighted_pred_flag = pps->weighted_pred_flag;
+ pic->weighted_bipred_idc = pps->weighted_bipred_idc;
+ pic->frame_mbs_only_flag = sps->frame_mbs_only_flag;
+ pic->transform_8x8_mode_flag = pps->transform_8x8_mode_flag;
+ pic->chroma_qp_index_offset = pps->chroma_qp_index_offset;
+ pic->second_chroma_qp_index_offset = pps->second_chroma_qp_index_offset;
+ pic->pic_init_qp_minus26 = pps->pic_init_qp_minus26;
+ pic->num_ref_idx_l0_active_minus1 = pps->num_ref_idx_l0_active_minus1;
+ pic->num_ref_idx_l1_active_minus1 = pps->num_ref_idx_l1_active_minus1;
+ pic->log2_max_frame_num_minus4 = sps->log2_max_frame_num_minus4;
+ pic->pic_order_cnt_type = sps->pic_order_cnt_type;
+ pic->log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_pic_order_cnt_lsb_minus4;
+ pic->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
+ pic->direct_8x8_inference_flag = sps->direct_8x8_inference_flag;
+ pic->entropy_coding_mode_flag = pps->entropy_coding_mode_flag;
+ pic->pic_order_present_flag = pps->pic_order_present_flag;
+ pic->deblocking_filter_control_present_flag = pps->deblocking_filter_control_present_flag;
+ pic->redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present_flag;
+
+ memcpy(pic->scaling_lists_4x4, pps->scaling_lists_4x4, sizeof(pic->scaling_lists_4x4));
+ memcpy(pic->scaling_lists_8x8, pps->scaling_lists_8x8, sizeof(pic->scaling_lists_8x8));
+
+ /* set num_ref_frames to the number of actually available reference frames,
+ * if this is not set generation 3 decoders will fail. */
+ /*pic->num_ref_frames =*/
+ fill_vdpau_reference_list(this->nal_parser->dpb, pic->referenceFrames);
+
+}
+
+static int check_progressive(video_decoder_t *this_gen, struct decoded_picture *dpic)
+{
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *)this_gen;
+ int progressive = 0;
+ int i;
+
+ for(i = 0; i < 2; i++) {
+ struct coded_picture *pic = dpic->coded_pic[i];
+ if (!pic) {
+ continue;
+ }
+
+ if (pic->flag_mask & PIC_STRUCT_PRESENT && pic->sei_nal != NULL) {
+ uint8_t pic_struct = pic->sei_nal->sei.pic_timing.pic_struct;
+
+ if (pic_struct == DISP_FRAME) {
+ progressive = 1;
+ continue;
+ } else if (pic_struct == DISP_TOP_BOTTOM ||
+ pic_struct == DISP_BOTTOM_TOP) {
+ progressive = 0;
+ break;
+ }
+
+ /* FIXME: seems unreliable, maybe it's has to be interpreted more complex */
+ /*if (pic->sei_nal->sei.pic_timing.ct_type == CT_INTERLACED) {
+ return 0;
+ } else if (pic->sei_nal->sei.pic_timing.ct_type == CT_PROGRESSIVE) {
+ return 1;
+ } */
+ }
+
+ if (pic->slc_nal->slc.field_pic_flag && pic->pps_nal->pps.pic_order_present_flag) {
+ if(pic->slc_nal->slc.delta_pic_order_cnt_bottom == 1 ||
+ pic->slc_nal->slc.delta_pic_order_cnt_bottom == -1) {
+ progressive = 0;
+ break;
+ } else {
+ progressive = 1;
+ continue;
+ }
+ }
+ if (!pic->slc_nal->slc.field_pic_flag && pic->sps_nal->sps.frame_mbs_only_flag) {
+ progressive = 1;
+ continue;
+ }
+ }
+
+ if (progressive) {
+ this->progressive_cnt++;
+ } else {
+ this->progressive_cnt = 0;
+ }
+
+ /* only switch to progressive mode if at least 5
+ * frames in order were marked as progressive */
+ return (this->progressive_cnt >= 5);
+}
+
+static int vdpau_decoder_init(video_decoder_t *this_gen)
+{
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *)this_gen;
+ vo_frame_t *img;
+
+ if(this->width == 0) {
+ this->width = this->completed_pic->sps_nal->sps.pic_width;
+ this->height = this->completed_pic->sps_nal->sps.pic_height;
+ }
+
+ set_ratio(this_gen);
+
+ _x_stream_info_set( this->stream, XINE_STREAM_INFO_VIDEO_WIDTH, this->width );
+ _x_stream_info_set( this->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, this->height );
+ _x_stream_info_set( this->stream, XINE_STREAM_INFO_VIDEO_RATIO, ((double)10000*this->ratio) );
+ _x_stream_info_set( this->stream, XINE_STREAM_INFO_FRAME_DURATION, (this->reported_video_step = this->video_step) );
+ _x_meta_info_set_utf8( this->stream, XINE_META_INFO_VIDEOCODEC, "H264/AVC (vdpau)" );
+ xine_event_t event;
+ xine_format_change_data_t data;
+ event.type = XINE_EVENT_FRAME_FORMAT_CHANGE;
+ event.stream = this->stream;
+ event.data = &data;
+ event.data_length = sizeof(data);
+ data.width = this->width;
+ data.height = this->height;
+ data.aspect = this->ratio;
+ xine_event_send( this->stream, &event );
+
+ switch(this->completed_pic->sps_nal->sps.profile_idc) {
+ case 100:
+ this->profile = VDP_DECODER_PROFILE_H264_HIGH;
+ break;
+ case 77:
+ this->profile = VDP_DECODER_PROFILE_H264_MAIN;
+ break;
+ case 66:
+ default:
+ // nvidia's VDPAU doesn't support BASELINE. But most (every?) streams marked BASELINE do not use BASELINE specifics,
+ // so, just force MAIN.
+ //this->profile = VDP_DECODER_PROFILE_H264_BASELINE;
+ this->profile = VDP_DECODER_PROFILE_H264_MAIN;
+ break;
+ }
+
+ // Level 4.1 limits:
+ int ref_frames = 0;
+ if(this->completed_pic->sps_nal->sps.num_ref_frames) {
+ ref_frames = this->completed_pic->sps_nal->sps.num_ref_frames;
+ } else {
+ uint32_t round_width = (this->width + 15) & ~15;
+ uint32_t round_height = (this->height + 15) & ~15;
+ uint32_t surf_size = (round_width * round_height * 3) / 2;
+ ref_frames = (12 * 1024 * 1024) / surf_size;
+ }
+
+ if (ref_frames > 16) {
+ ref_frames = 16;
+ }
+
+ xprintf(this->xine, XINE_VERBOSITY_LOG, "Allocate %d reference frames\n",
+ ref_frames);
+ /* get the vdpau context from vo */
+ //(this->stream->video_out->open) (this->stream->video_out, this->stream);
+ img = this->stream->video_out->get_frame (this->stream->video_out,
+ this->width, this->height,
+ this->ratio,
+ XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS | this->reset);
+ this->reset = 0;
+
+ this->vdpau_accel = (vdpau_accel_t*)img->accel_data;
+
+ img->free(img);
+ img = NULL;
+
+ /*VdpBool is_supported;
+ uint32_t max_level, max_references, max_width, max_height;*/
+ if(this->vdpau_accel->vdp_runtime_nr > 0) {
+ xprintf(this->xine, XINE_VERBOSITY_LOG,
+ "Create decoder: vdp_device: %d, profile: %d, res: %dx%d\n",
+ this->vdpau_accel->vdp_device, this->profile, this->width, this->height);
+
+ VdpStatus status = this->vdpau_accel->vdp_decoder_create(this->vdpau_accel->vdp_device,
+ this->profile, this->width, this->height, 16, &this->decoder);
+
+ if(status != VDP_STATUS_OK) {
+ xprintf(this->xine, XINE_VERBOSITY_LOG, "vdpau_h264: ERROR: VdpDecoderCreate returned status != OK (%s)\n", this->vdpau_accel->vdp_get_error_string(status));
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void draw_frames(video_decoder_t *this_gen, int flush)
+{
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *)this_gen;
+
+ struct decoded_picture *decoded_pic = NULL;
+ while ((decoded_pic = dpb_get_next_out_picture(this->nal_parser->dpb, flush)) != NULL) {
+ decoded_pic->img->top_field_first = dp_top_field_first(decoded_pic);
+ decoded_pic->img->progressive_frame = check_progressive(this_gen, decoded_pic);
+#ifdef DEBUG_H264
+ printf("progressive: %d\n", decoded_pic->img->progressive_frame);
+#endif
+ if (flush) {
+ xprintf(this->xine, XINE_VERBOSITY_DEBUG,
+ "h264 flush, draw pts: %"PRId64"\n", decoded_pic->img->pts);
+ }
+
+ decoded_pic->img->draw(decoded_pic->img, this->stream);
+ dpb_unmark_picture_delayed(this->nal_parser->dpb, decoded_pic);
+ decoded_pic = NULL;
+ }
+}
+
+static int vdpau_decoder_render(video_decoder_t *this_gen, VdpBitstreamBuffer *vdp_buffer, uint32_t slice_count)
+{
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *)this_gen;
+ vo_frame_t *img = NULL;
+
+ /* if we wait for a second field for this frame, we
+ * have to render to the same surface again.
+ */
+ if (this->incomplete_pic) {
+ img = this->incomplete_pic->img;
+ }
+
+ // FIXME: what is if this is the second field of a field coded
+ // picture? - should we keep the first field in dpb?
+ if(this->completed_pic->flag_mask & IDR_PIC) {
+ dpb_flush(this->nal_parser->dpb);
+ if(this->incomplete_pic) {
+ release_decoded_picture(this->incomplete_pic);
+ this->incomplete_pic = NULL;
+ }
+ }
+
+ struct seq_parameter_set_rbsp *sps = &this->completed_pic->sps_nal->sps;
+ struct slice_header *slc = &this->completed_pic->slc_nal->slc;
+
+ if(sps->vui_parameters_present_flag &&
+ sps->vui_parameters.timing_info_present_flag &&
+ this->video_step == 0) {
+ this->video_step = 2*90000/(1/((double)sps->vui_parameters.num_units_in_tick/(double)sps->vui_parameters.time_scale));
+ }
+
+ /* go and decode a frame */
+
+ /* check if we expect a second field, but got a frame */
+ if (this->incomplete_pic && img) {
+ if ((this->completed_pic->slc_nal->slc.frame_num !=
+ this->incomplete_pic->coded_pic[0]->slc_nal->slc.frame_num) ||
+ !slc->field_pic_flag) {
+ xprintf(this->xine, XINE_VERBOSITY_DEBUG, "H264 warning: Expected a second field, stream might be broken\n");
+
+ /* remove this pic from dpb, as it is not complete */
+ dpb_unmark_picture_delayed(this->nal_parser->dpb, this->incomplete_pic);
+ dpb_unmark_reference_picture(this->nal_parser->dpb, this->incomplete_pic);
+
+ release_decoded_picture(this->incomplete_pic);
+ this->incomplete_pic = NULL;
+ img = NULL;
+ }
+ }
+
+
+ VdpPictureInfoH264 pic;
+
+ fill_vdpau_pictureinfo_h264(this_gen, slice_count, &pic);
+
+#ifdef DEBUG_H264
+ dump_pictureinfo_h264(&pic);
+
+ int i;
+ printf("E: Bytes used: %d\n", vdp_buffer->bitstream_bytes);
+ printf("E: Decode data: \nE:");
+ for(i = 0; i < ((vdp_buffer->bitstream_bytes < 20) ? vdp_buffer->bitstream_bytes : 20); i++) {
+ printf("%02x ", ((uint8_t*)vdp_buffer->bitstream)[i]);
+ if((i+1) % 10 == 0)
+ printf("\nE:");
+ }
+ printf("\n...\n");
+ for(i = vdp_buffer->bitstream_bytes - 20; i < vdp_buffer->bitstream_bytes; i++) {
+ printf("%02x ", ((uint8_t*)vdp_buffer->bitstream)[i]);
+ if((i+1) % 10 == 0)
+ printf("\nE:");
+ }
+ printf("\nE: ---------------------------------------------------------------\n");
+#endif
+
+ if(!this->decoder_started && !pic.is_reference)
+ return 0;
+
+ this->decoder_started = 1;
+
+ if(img == NULL) {
+ img = this->stream->video_out->get_frame (this->stream->video_out,
+ this->width, this->height,
+ this->ratio,
+ XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS);
+ this->vdpau_accel = (vdpau_accel_t*)img->accel_data;
+
+ img->duration = this->video_step;
+ img->pts = this->completed_pic->pts;
+
+ if (this->dangling_img) {
+ xprintf(this->xine, XINE_VERBOSITY_LOG,
+ "broken stream: current img wasn't processed -- freeing it!\n");
+ this->dangling_img->free(this->dangling_img);
+ }
+ this->dangling_img = img;
+ } else {
+ if (img->pts == 0) {
+ img->pts = this->completed_pic->pts;
+ }
+ }
+
+ if(this->vdp_runtime_nr != *(this->vdpau_accel->current_vdp_runtime_nr)) {
+ xprintf(this->xine, XINE_VERBOSITY_LOG,
+ "VDPAU was preempted. Reinitialise the decoder.\n");
+ this->decoder = VDP_INVALID_HANDLE;
+ vdpau_h264_reset(this_gen);
+ this->vdp_runtime_nr = this->vdpau_accel->vdp_runtime_nr;
+ return 0;
+ }
+
+ VdpVideoSurface surface = this->vdpau_accel->surface;
+
+ /*xprintf(this->xine, XINE_VERBOSITY_DEBUG,
+ "Decode: NUM: %d, REF: %d, BYTES: %d, PTS: %lld\n", pic.frame_num, pic.is_reference, vdp_buffer->bitstream_bytes, this->completed_pic->pts);*/
+ VdpStatus status = this->vdpau_accel->vdp_decoder_render(this->decoder,
+ surface, (VdpPictureInfo*)&pic, 1, vdp_buffer);
+
+ /* free the image data */
+ if(((uint8_t*)vdp_buffer->bitstream) != NULL) {
+ free((uint8_t*)vdp_buffer->bitstream);
+ }
+
+ process_mmc_operations(this->nal_parser, this->completed_pic);
+
+ if(status != VDP_STATUS_OK)
+ {
+ xprintf(this->xine, XINE_VERBOSITY_LOG, "vdpau_h264: Decoder failure: %s\n", this->vdpau_accel->vdp_get_error_string(status));
+ if (this->dangling_img)
+ this->dangling_img->free(this->dangling_img);
+ img = NULL;
+ this->dangling_img = NULL;
+ free_coded_picture(this->completed_pic);
+ this->completed_pic = NULL;
+ }
+ else {
+ img->bad_frame = 0;
+
+ if(!img->progressive_frame && this->completed_pic->repeat_pic)
+ img->repeat_first_field = 1;
+ //else if(img->progressive_frame && this->nal_parser->current_nal->repeat_pic)
+ // img->duration *= this->nal_parser->current_nal->repeat_pic;
+
+ /* only bt601 and bt701 handled so far. others seem to be rarely used */
+ if(sps->vui_parameters.colour_description_present) {
+ switch (sps->vui_parameters.colour_primaries) {
+ case 1:
+ this->color_standard = VDP_COLOR_STANDARD_ITUR_BT_709;
+ break;
+ case 5:
+ case 6:
+ default:
+ this->color_standard = VDP_COLOR_STANDARD_ITUR_BT_601;
+ break;
+ }
+ }
+
+ this->vdpau_accel->color_standard = this->color_standard;
+
+ struct decoded_picture *decoded_pic = NULL;
+
+
+ uint8_t draw_frame = 0;
+ if (!slc->field_pic_flag) { /* frame coded: simply add to dpb */
+ decoded_pic = init_decoded_picture(this->completed_pic, img);
+ this->completed_pic = NULL;
+ this->dangling_img = NULL;
+
+ dpb_add_picture(this->nal_parser->dpb, decoded_pic, sps->num_ref_frames);
+
+ draw_frame = 1;
+ } else { /* field coded: check for second field */
+ if (!this->incomplete_pic) {
+ decoded_pic = init_decoded_picture(this->completed_pic, img);
+ this->completed_pic = NULL;
+ this->dangling_img = NULL;
+ this->incomplete_pic = decoded_pic;
+ lock_decoded_picture(this->incomplete_pic);
+
+ dpb_add_picture(this->nal_parser->dpb, decoded_pic, sps->num_ref_frames);
+
+ /* don't do a draw yet as the field was incomplete */
+ draw_frame = 0;
+ } else {
+ decoded_pic = this->incomplete_pic;
+ lock_decoded_picture(decoded_pic);
+
+ /* picture is complete now */
+ release_decoded_picture(this->incomplete_pic);
+ this->incomplete_pic = NULL;
+ this->dangling_img = NULL;
+
+ decoded_pic_add_field(decoded_pic, this->completed_pic);
+ this->completed_pic = NULL;
+
+ draw_frame = 1;
+ }
+ }
+
+ release_decoded_picture(decoded_pic);
+
+ /* draw the next frame in display order */
+ if (draw_frame) {
+ draw_frames(this_gen, 0);
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * This function receives a buffer of data from the demuxer layer and
+ * figures out how to handle it based on its header flags.
+ */
+static void vdpau_h264_decode_data (video_decoder_t *this_gen,
+ buf_element_t *buf) {
+
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *) this_gen;
+
+ VdpBitstreamBuffer vdp_buffer;
+ vdp_buffer.struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+
+ /* a video decoder does not care about this flag (?) */
+ if (buf->decoder_flags & BUF_FLAG_PREVIEW)
+ return;
+
+ if(buf->decoder_flags & BUF_FLAG_FRAME_START || buf->decoder_flags & BUF_FLAG_FRAME_END)
+ this->have_frame_boundary_marks = 1;
+
+ if (buf->decoder_flags & BUF_FLAG_FRAMERATE) {
+ this->video_step = buf->decoder_info[0];
+ _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, this->video_step);
+ }
+
+ if (this->video_step != this->reported_video_step){
+ _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, (this->reported_video_step = this->video_step));
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_STDHEADER) { /* need to initialize */
+ this->have_frame_boundary_marks = 0;
+
+ xine_bmiheader *bih = (xine_bmiheader*)buf->content;
+ this->width = bih->biWidth;
+ this->height = bih->biHeight;
+
+ uint8_t *codec_private = buf->content + sizeof(xine_bmiheader);
+ uint32_t codec_private_len = bih->biSize - sizeof(xine_bmiheader);
+ this->codec_private_len = codec_private_len;
+ this->codec_private = malloc(codec_private_len);
+ memcpy(this->codec_private, codec_private, codec_private_len);
+
+ if(codec_private_len > 0) {
+ parse_codec_private(this->nal_parser, codec_private, codec_private_len);
+ }
+ } else if (buf->decoder_flags & BUF_FLAG_SPECIAL) {
+ this->have_frame_boundary_marks = 0;
+
+ if(buf->decoder_info[1] == BUF_SPECIAL_DECODER_CONFIG) {
+ uint8_t *codec_private = buf->decoder_info_ptr[2];
+ uint32_t codec_private_len = buf->decoder_info[2];
+ this->codec_private_len = codec_private_len;
+ this->codec_private = malloc(codec_private_len);
+ memcpy(this->codec_private, codec_private, codec_private_len);
+
+ if(codec_private_len > 0) {
+ parse_codec_private(this->nal_parser, codec_private, codec_private_len);
+ }
+ } else if (buf->decoder_info[1] == BUF_SPECIAL_PALETTE) {
+ xprintf(this->xine, XINE_VERBOSITY_LOG,
+ "SPECIAL PALETTE is not yet handled\n");
+ } else
+ xprintf(this->xine, XINE_VERBOSITY_LOG,
+ "UNKNOWN SPECIAL HEADER\n");
+
+ } else {
+ /* parse the first nal packages to retrieve profile type */
+ int len = 0;
+
+ while(len < buf->size && !(this->wait_for_frame_start && !(buf->decoder_flags & BUF_FLAG_FRAME_START))) {
+ this->wait_for_frame_start = 0;
+ len += parse_frame(this->nal_parser, buf->content + len, buf->size - len,
+ buf->pts,
+ (uint8_t**)&vdp_buffer.bitstream, &vdp_buffer.bitstream_bytes, &this->completed_pic);
+
+ if(this->decoder == VDP_INVALID_HANDLE &&
+ this->completed_pic &&
+ this->completed_pic->sps_nal != NULL &&
+ this->completed_pic->sps_nal->sps.pic_width > 0 &&
+ this->completed_pic->sps_nal->sps.pic_height > 0) {
+
+ vdpau_decoder_init(this_gen);
+ }
+
+ if(this->completed_pic &&
+ this->completed_pic->sps_nal != NULL &&
+ this->completed_pic->sps_nal->sps.vui_parameters_present_flag &&
+ this->completed_pic->sps_nal->sps.vui_parameters.bitstream_restriction_flag) {
+
+ this->nal_parser->dpb->max_reorder_frames =
+ this->completed_pic->sps_nal->sps.vui_parameters.num_reorder_frames + 1;
+ this->nal_parser->dpb->max_dpb_frames = this->completed_pic->sps_nal->sps.vui_parameters.max_dec_frame_buffering + 1;
+
+ xprintf(this->xine, XINE_VERBOSITY_DEBUG,
+ "max reorder count: %d, max dpb count %d\n",
+ this->nal_parser->dpb->max_reorder_frames,
+ this->nal_parser->dpb->max_dpb_frames);
+ }
+
+ if(this->decoder != VDP_INVALID_HANDLE &&
+ vdp_buffer.bitstream_bytes > 0 &&
+ this->completed_pic->slc_nal != NULL &&
+ this->completed_pic->pps_nal != NULL) {
+ vdpau_decoder_render(this_gen, &vdp_buffer, this->completed_pic->slice_cnt);
+ } else if (this->completed_pic != NULL) {
+ free_coded_picture(this->completed_pic);
+ }
+
+ /* in case the last nal was detected as END_OF_SEQUENCE
+ * we will flush the dpb, so that all pictures get drawn
+ */
+ if(this->nal_parser->last_nal_res == 3) {
+ xprintf(this->xine, XINE_VERBOSITY_DEBUG,
+ "END_OF_SEQUENCE, flush buffers\n");
+ vdpau_h264_flush(this_gen);
+ }
+ }
+ }
+
+ if(buf->decoder_flags & BUF_FLAG_FRAME_END)
+ this->wait_for_frame_start = 0;
+}
+
+/*
+ * This function is called when xine needs to flush the system.
+ */
+static void vdpau_h264_flush (video_decoder_t *this_gen) {
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t*) this_gen;
+ struct decoded_picture *decoded_pic = NULL;
+
+ if(this->dangling_img){
+ this->dangling_img->free(this->dangling_img);
+ this->dangling_img = NULL;
+ }
+
+ if (this->incomplete_pic) {
+ release_decoded_picture(this->incomplete_pic);
+ this->incomplete_pic = NULL;
+ }
+
+ draw_frames(this_gen, 1);
+ dpb_free_all(this->nal_parser->dpb);
+ this->reset = VO_NEW_SEQUENCE_FLAG;
+}
+
+/*
+ * This function resets the video decoder.
+ */
+static void vdpau_h264_reset (video_decoder_t *this_gen) {
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *) this_gen;
+
+ dpb_free_all(this->nal_parser->dpb);
+
+ if (this->decoder != VDP_INVALID_HANDLE) {
+ this->vdpau_accel->vdp_decoder_destroy( this->decoder );
+ this->decoder = VDP_INVALID_HANDLE;
+ }
+
+ // Doing a full parser reinit here works more reliable than
+ // resetting
+
+ //reset_parser(this->nal_parser);
+ free_parser(this->nal_parser);
+ this->nal_parser = init_parser(this->xine);
+
+ this->color_standard = VDP_COLOR_STANDARD_ITUR_BT_601;
+ this->video_step = 0;
+
+ if(this->codec_private_len > 0) {
+ parse_codec_private(this->nal_parser, this->codec_private, this->codec_private_len);
+
+ /* if the stream does not contain frame boundary marks we
+ * have to hope that the next nal will start with the next
+ * incoming buf... seems to work, though...
+ */
+ this->wait_for_frame_start = this->have_frame_boundary_marks;
+ }
+
+ if (this->incomplete_pic) {
+ release_decoded_picture(this->incomplete_pic);
+ this->incomplete_pic = NULL;
+ }
+
+ if (this->dangling_img) {
+ this->dangling_img->free(this->dangling_img);
+ this->dangling_img = NULL;
+ }
+
+ this->progressive_cnt = 0;
+ this->reset = VO_NEW_SEQUENCE_FLAG;
+}
+
+/*
+ * The decoder should forget any stored pts values here.
+ */
+static void vdpau_h264_discontinuity (video_decoder_t *this_gen) {
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *) this_gen;
+
+ dpb_clear_all_pts(this->nal_parser->dpb);
+ this->reset = VO_NEW_SEQUENCE_FLAG;
+}
+
+/*
+ * This function frees the video decoder instance allocated to the decoder.
+ */
+static void vdpau_h264_dispose (video_decoder_t *this_gen) {
+
+ vdpau_h264_decoder_t *this = (vdpau_h264_decoder_t *) this_gen;
+
+ if (this->incomplete_pic) {
+ release_decoded_picture(this->incomplete_pic);
+ this->incomplete_pic = NULL;
+ }
+
+ if (this->dangling_img) {
+ this->dangling_img->free(this->dangling_img);
+ this->dangling_img = NULL;
+ }
+
+ dpb_free_all(this->nal_parser->dpb);
+
+ if (this->decoder != VDP_INVALID_HANDLE) {
+ this->vdpau_accel->vdp_decoder_destroy( this->decoder );
+ this->decoder = VDP_INVALID_HANDLE;
+ }
+
+ this->stream->video_out->close( this->stream->video_out, this->stream );
+
+ free_parser (this->nal_parser);
+ free (this_gen);
+}
+
+/*
+ * This function allocates, initializes, and returns a private video
+ * decoder structure.
+ */
+static video_decoder_t *open_plugin (video_decoder_class_t *class_gen, xine_stream_t *stream) {
+
+ vdpau_h264_decoder_t *this ;
+
+ /* the videoout must be vdpau-capable to support this decoder */
+ if ( !(stream->video_driver->get_capabilities(stream->video_driver) & VO_CAP_VDPAU_H264) )
+ return NULL;
+
+ /* now check if vdpau has free decoder resource */
+ vo_frame_t *img = stream->video_out->get_frame( stream->video_out, 1920, 1080, 1, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS );
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ int runtime_nr = accel->vdp_runtime_nr;
+ img->free(img);
+ VdpDecoder decoder;
+ VdpStatus st = accel->vdp_decoder_create( accel->vdp_device, VDP_DECODER_PROFILE_H264_MAIN, 1920, 1080, 16, &decoder );
+ if ( st!=VDP_STATUS_OK ) {
+ lprintf( "can't create vdpau decoder.\n" );
+ return NULL;
+ }
+
+ accel->vdp_decoder_destroy( decoder );
+
+ this = (vdpau_h264_decoder_t *) calloc(1, sizeof(vdpau_h264_decoder_t));
+
+ this->nal_parser = init_parser(stream->xine);
+
+ this->video_decoder.decode_data = vdpau_h264_decode_data;
+ this->video_decoder.flush = vdpau_h264_flush;
+ this->video_decoder.reset = vdpau_h264_reset;
+ this->video_decoder.discontinuity = vdpau_h264_discontinuity;
+ this->video_decoder.dispose = vdpau_h264_dispose;
+
+ this->stream = stream;
+ this->xine = stream->xine;
+ this->class = (vdpau_h264_class_t *) class_gen;
+
+ this->decoder = VDP_INVALID_HANDLE;
+ this->vdp_runtime_nr = runtime_nr;
+ this->color_standard = VDP_COLOR_STANDARD_ITUR_BT_601;
+ this->progressive_cnt = 0;
+
+ this->reset = VO_NEW_SEQUENCE_FLAG;
+
+ (this->stream->video_out->open) (this->stream->video_out, this->stream);
+
+ return &this->video_decoder;
+}
+
+/*
+ * This function allocates a private video decoder class and initializes
+ * the class's member functions.
+ */
+static void *init_plugin (xine_t *xine, void *data) {
+
+ vdpau_h264_class_t *this;
+
+ this = (vdpau_h264_class_t *) calloc(1, sizeof(vdpau_h264_class_t));
+
+ this->decoder_class.open_plugin = open_plugin;
+ this->decoder_class.identifier = "vdpau_h264";
+ this->decoder_class.description =
+ N_("vdpau_h264: h264 decoder plugin using VDPAU hardware decoding.\n"
+ "Must be used along with video_out_vdpau.");
+ this->decoder_class.dispose = default_video_decoder_class_dispose;
+
+ return this;
+}
+
+/*
+ * This is a list of all of the internal xine video buffer types that
+ * this decoder is able to handle. Check src/xine-engine/buffer.h for a
+ * list of valid buffer types (and add a new one if the one you need does
+ * not exist). Terminate the list with a 0.
+ */
+static const uint32_t video_types[] = {
+ /* BUF_VIDEO_FOOVIDEO, */
+ BUF_VIDEO_H264,
+ 0
+};
+
+/*
+ * This data structure combines the list of supported xine buffer types and
+ * the priority that the plugin should be given with respect to other
+ * plugins that handle the same buffer type. A plugin with priority (n+1)
+ * will be used instead of a plugin with priority (n).
+ */
+static const decoder_info_t dec_info_video = {
+ video_types, /* supported types */
+ 7 /* priority */
+};
+
+/*
+ * The plugin catalog entry. This is the only information that this plugin
+ * will export to the public.
+ */
+const plugin_info_t xine_plugin_info[] EXPORTED = {
+ /* { type, API, "name", version, special_info, init_function } */
+ { PLUGIN_VIDEO_DECODER | PLUGIN_MUST_PRELOAD, 19, "vdpau_h264", XINE_VERSION_CODE, &dec_info_video, init_plugin },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};
diff --git a/src/video_dec/libvdpau/vdpau_mpeg12.c b/src/video_dec/libvdpau/vdpau_mpeg12.c
new file mode 100644
index 000000000..1067f8634
--- /dev/null
+++ b/src/video_dec/libvdpau/vdpau_mpeg12.c
@@ -0,0 +1,1101 @@
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; remove-trailing-space on;
+ * Copyright (C) 2008 the xine project
+ * Copyright (C) 2008 Christophe Thommeret <hftom@free.fr>
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * vdpau_mpeg12.c, a mpeg1/2 video stream parser using VDPAU hardware decoder
+ *
+ */
+
+/*#define LOG*/
+#define LOG_MODULE "vdpau_mpeg12"
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <xine/xine_internal.h>
+#include <xine/video_out.h>
+#include <xine/buffer.h>
+#include <xine/xineutils.h>
+#include "accel_vdpau.h"
+#include "bits_reader.h"
+
+#include <vdpau/vdpau.h>
+
+#define sequence_header_code 0xb3
+#define sequence_error_code 0xb4
+#define sequence_end_code 0xb7
+#define group_start_code 0xb8
+#define extension_start_code 0xb5
+#define user_data_start_code 0xb2
+#define picture_start_code 0x00
+#define begin_slice_start_code 0x01
+#define end_slice_start_code 0xaf
+
+#define sequence_ext_sc 1
+#define quant_matrix_ext_sc 3
+#define picture_coding_ext_sc 8
+#define sequence_display_ext_sc 2
+
+#define I_FRAME 1
+#define P_FRAME 2
+#define B_FRAME 3
+
+#define PICTURE_TOP 1
+#define PICTURE_BOTTOM 2
+#define PICTURE_FRAME 3
+
+/*#define MAKE_DAT*/ /*do NOT define this, unless you know what you do */
+#ifdef MAKE_DAT
+static int nframes;
+static FILE *outfile;
+#endif
+
+
+
+/* default intra quant matrix, in zig-zag order */
+static const uint8_t default_intra_quantizer_matrix[64] = {
+ 8,
+ 16, 16,
+ 19, 16, 19,
+ 22, 22, 22, 22,
+ 22, 22, 26, 24, 26,
+ 27, 27, 27, 26, 26, 26,
+ 26, 27, 27, 27, 29, 29, 29,
+ 34, 34, 34, 29, 29, 29, 27, 27,
+ 29, 29, 32, 32, 34, 34, 37,
+ 38, 37, 35, 35, 34, 35,
+ 38, 38, 40, 40, 40,
+ 48, 48, 46, 46,
+ 56, 56, 58,
+ 69, 69,
+ 83
+};
+
+uint8_t mpeg2_scan_norm[64] = {
+ /* Zig-Zag scan pattern */
+ 0, 1, 8,16, 9, 2, 3,10,
+ 17,24,32,25,18,11, 4, 5,
+ 12,19,26,33,40,48,41,34,
+ 27,20,13, 6, 7,14,21,28,
+ 35,42,49,56,57,50,43,36,
+ 29,22,15,23,30,37,44,51,
+ 58,59,52,45,38,31,39,46,
+ 53,60,61,54,47,55,62,63
+};
+
+
+
+typedef struct {
+ VdpPictureInfoMPEG1Or2 vdp_infos; /* first field, also used for frame */
+ VdpPictureInfoMPEG1Or2 vdp_infos2; /* second field */
+ int slices_count, slices_count2;
+ uint8_t *slices;
+ int slices_size;
+ int slices_pos, slices_pos_top;
+
+ int progressive_frame;
+ int repeat_first_field;
+} picture_t;
+
+
+
+typedef struct {
+ uint32_t coded_width;
+ uint32_t coded_height;
+
+ double video_step; /* frame duration in pts units */
+ double reported_video_step; /* frame duration in pts units */
+ double ratio;
+
+ VdpDecoderProfile profile;
+ int horizontal_size_value;
+ int vertical_size_value;
+ int aspect_ratio_information;
+ int frame_rate_code;
+ int progressive_sequence;
+ int chroma;
+ int horizontal_size_extension;
+ int vertical_size_extension;
+ int frame_rate_extension_n;
+ int frame_rate_extension_d;
+ int display_horizontal_size;
+ int display_vertical_size;
+ int top_field_first;
+
+ int have_header;
+ int have_display_extension;
+
+ uint8_t *buf; /* accumulate data */
+ int bufseek;
+ uint32_t bufsize;
+ uint32_t bufpos;
+ int start;
+
+ picture_t picture;
+ vo_frame_t *forward_ref;
+ vo_frame_t *backward_ref;
+
+ int64_t cur_pts, seq_pts;
+
+ vdpau_accel_t *accel_vdpau;
+
+ bits_reader_t br;
+
+ int vdp_runtime_nr;
+ int reset;
+
+} sequence_t;
+
+
+
+typedef struct {
+ video_decoder_class_t decoder_class;
+} vdpau_mpeg12_class_t;
+
+
+
+typedef struct vdpau_mpeg12_decoder_s {
+ video_decoder_t video_decoder; /* parent video decoder structure */
+
+ vdpau_mpeg12_class_t *class;
+ xine_stream_t *stream;
+
+ sequence_t sequence;
+
+ VdpDecoder decoder;
+ VdpDecoderProfile decoder_profile;
+ uint32_t decoder_width;
+ uint32_t decoder_height;
+
+} vdpau_mpeg12_decoder_t;
+
+
+static void picture_ready( vdpau_mpeg12_decoder_t *vd, uint8_t end_of_sequence );
+
+
+
+static void reset_picture( picture_t *pic )
+{
+ lprintf( "reset_picture\n" );
+ pic->vdp_infos.picture_structure = pic->vdp_infos2.picture_structure = 0;
+ pic->vdp_infos2.intra_dc_precision = pic->vdp_infos.intra_dc_precision = 0;
+ pic->vdp_infos2.frame_pred_frame_dct = pic->vdp_infos.frame_pred_frame_dct = 1;
+ pic->vdp_infos2.concealment_motion_vectors = pic->vdp_infos.concealment_motion_vectors = 0;
+ pic->vdp_infos2.intra_vlc_format = pic->vdp_infos.intra_vlc_format = 0;
+ pic->vdp_infos2.alternate_scan = pic->vdp_infos.alternate_scan = 0;
+ pic->vdp_infos2.q_scale_type = pic->vdp_infos.q_scale_type = 0;
+ pic->vdp_infos2.top_field_first = pic->vdp_infos.top_field_first = 1;
+ pic->slices_count = 0;
+ pic->slices_count2 = 0;
+ pic->slices_pos = 0;
+ pic->slices_pos_top = 0;
+ pic->progressive_frame = 0;
+ pic->repeat_first_field = 0;
+}
+
+
+
+static void init_picture( picture_t *pic )
+{
+ pic->slices_size = 2048;
+ pic->slices = (uint8_t*)malloc(pic->slices_size);
+ reset_picture( pic );
+}
+
+
+
+static void reset_sequence( sequence_t *sequence, int free_refs )
+{
+ sequence->cur_pts = sequence->seq_pts = 0;
+ if ( sequence->forward_ref )
+ sequence->forward_ref->pts = 0;
+ if ( sequence->backward_ref )
+ sequence->backward_ref->pts = 0;
+
+ if ( !free_refs )
+ return;
+
+ sequence->bufpos = 0;
+ sequence->bufseek = 0;
+ sequence->start = -1;
+ if ( sequence->forward_ref )
+ sequence->forward_ref->free( sequence->forward_ref );
+ sequence->forward_ref = NULL;
+ if ( sequence->backward_ref )
+ sequence->backward_ref->free( sequence->backward_ref );
+ sequence->backward_ref = NULL;
+ sequence->top_field_first = 0;
+ sequence->reset = VO_NEW_SEQUENCE_FLAG;
+}
+
+
+
+static void free_sequence( sequence_t *sequence )
+{
+ lprintf( "init_sequence\n" );
+ sequence->have_header = 0;
+ sequence->profile = VDP_DECODER_PROFILE_MPEG1;
+ sequence->chroma = 0;
+ sequence->video_step = 3600;
+ reset_sequence( sequence, 1 );
+}
+
+
+
+static void sequence_header( vdpau_mpeg12_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ int i, j;
+
+ if ( !sequence->have_header )
+ sequence->have_header = 1;
+
+ sequence->profile = VDP_DECODER_PROFILE_MPEG1;
+ sequence->horizontal_size_extension = 0;
+ sequence->vertical_size_extension = 0;
+ sequence->have_display_extension = 0;
+
+ bits_reader_set( &sequence->br, buf, len );
+ sequence->horizontal_size_value = read_bits( &sequence->br, 12 );
+ lprintf( "horizontal_size_value: %d\n", sequence->horizontal_size_value );
+ sequence->vertical_size_value = read_bits( &sequence->br, 12 );
+ lprintf( "vertical_size_value: %d\n", sequence->vertical_size_value );
+ sequence->aspect_ratio_information = read_bits( &sequence->br, 4 );
+ lprintf( "aspect_ratio_information: %d\n", sequence->aspect_ratio_information );
+ sequence->frame_rate_code = read_bits( &sequence->br, 4 );
+ lprintf( "frame_rate_code: %d\n", sequence->frame_rate_code );
+ int tmp;
+ tmp = read_bits( &sequence->br, 18 );
+ lprintf( "bit_rate_value: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 1 );
+ lprintf( "marker_bit: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 10 );
+ lprintf( "vbv_buffer_size_value: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 1 );
+ lprintf( "constrained_parameters_flag: %d\n", tmp );
+ i = read_bits( &sequence->br, 1 );
+ lprintf( "load_intra_quantizer_matrix: %d\n", i );
+ if ( i ) {
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos2.intra_quantizer_matrix[mpeg2_scan_norm[j]] = sequence->picture.vdp_infos.intra_quantizer_matrix[mpeg2_scan_norm[j]] = read_bits( &sequence->br, 8 );
+ }
+ }
+ else {
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos2.intra_quantizer_matrix[mpeg2_scan_norm[j]] = sequence->picture.vdp_infos.intra_quantizer_matrix[mpeg2_scan_norm[j]] = default_intra_quantizer_matrix[j];
+ }
+ }
+
+ i = read_bits( &sequence->br, 1 );
+ lprintf( "load_non_intra_quantizer_matrix: %d\n", i );
+ if ( i ) {
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos2.non_intra_quantizer_matrix[mpeg2_scan_norm[j]] = sequence->picture.vdp_infos.non_intra_quantizer_matrix[mpeg2_scan_norm[j]] = read_bits( &sequence->br, 8 );
+ }
+ }
+ else {
+ memset( sequence->picture.vdp_infos.non_intra_quantizer_matrix, 16, 64 );
+ memset( sequence->picture.vdp_infos2.non_intra_quantizer_matrix, 16, 64 );
+ }
+}
+
+
+
+static void process_sequence_mpeg12_dependent_data( vdpau_mpeg12_decoder_t *this_gen )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ int frame_rate_value_n, frame_rate_value_d;
+
+ sequence->coded_width = sequence->horizontal_size_value | (sequence->horizontal_size_extension << 14);
+ sequence->coded_height = sequence->vertical_size_value | (sequence->vertical_size_extension << 14);
+
+ switch ( sequence->frame_rate_code ) {
+ case 1: frame_rate_value_n = 24; frame_rate_value_d = 1001; break; /* 23.976.. */
+ case 2: frame_rate_value_n = 24; frame_rate_value_d = 1000; break; /* 24 */
+ case 3: frame_rate_value_n = 25; frame_rate_value_d = 1000; break; /* 25 */
+ case 4: frame_rate_value_n = 30; frame_rate_value_d = 1001; break; /* 29.97.. */
+ case 5: frame_rate_value_n = 30; frame_rate_value_d = 1000; break; /* 30 */
+ case 6: frame_rate_value_n = 50; frame_rate_value_d = 1000; break; /* 50 */
+ case 7: frame_rate_value_n = 60; frame_rate_value_d = 1001; break; /* 59.94.. */
+ case 8: frame_rate_value_n = 60; frame_rate_value_d = 1000; break; /* 60 */
+ default: frame_rate_value_n = 50; frame_rate_value_d = 1000; /* assume 50 */
+ }
+
+ sequence->video_step = 90.0 * (frame_rate_value_d * (sequence->frame_rate_extension_d + 1))
+ / (frame_rate_value_n * (sequence->frame_rate_extension_n + 1));
+
+ if ( sequence->profile==VDP_DECODER_PROFILE_MPEG1 ) {
+ double pel_aspect_ratio; /* height / width */
+
+ switch ( sequence->aspect_ratio_information ) {
+ case 1: pel_aspect_ratio = 1.0000;
+ case 2: pel_aspect_ratio = 0.6735;
+ case 3: pel_aspect_ratio = 0.7031;
+ case 4: pel_aspect_ratio = 0.7615;
+ case 5: pel_aspect_ratio = 0.8055;
+ case 6: pel_aspect_ratio = 0.8437;
+ case 7: pel_aspect_ratio = 0.8935;
+ case 8: pel_aspect_ratio = 0.9157;
+ case 9: pel_aspect_ratio = 0.9815;
+ case 10: pel_aspect_ratio = 1.0255;
+ case 11: pel_aspect_ratio = 1.0695;
+ case 12: pel_aspect_ratio = 1.0950;
+ case 13: pel_aspect_ratio = 1.1575;
+ case 14: pel_aspect_ratio = 1.2015;
+ default: pel_aspect_ratio = 1.0000; /* fallback */
+ }
+
+ sequence->ratio = ((double)sequence->coded_width/(double)sequence->coded_height)/pel_aspect_ratio;
+ }
+ else {
+ switch ( sequence->aspect_ratio_information ) {
+ case 1: sequence->ratio = sequence->have_display_extension
+ ? ((double)sequence->display_horizontal_size/(double)sequence->display_vertical_size)/1.0
+ : ((double)sequence->coded_width/(double)sequence->coded_height)/1.0;
+ break;
+ case 2: sequence->ratio = 4.0/3.0; break;
+ case 3: sequence->ratio = 16.0/9.0; break;
+ case 4: sequence->ratio = 2.21; break;
+ default: sequence->ratio = ((double)sequence->coded_width/(double)sequence->coded_height)/1.0;
+ }
+ }
+
+ if ( sequence->have_header == 1 ) {
+ sequence->have_header = 2;
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_WIDTH, sequence->coded_width );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, sequence->coded_height );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_RATIO, ((double)10000*sequence->ratio) );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_FRAME_DURATION, (sequence->reported_video_step = sequence->video_step) );
+ _x_meta_info_set_utf8( this_gen->stream, XINE_META_INFO_VIDEOCODEC, "MPEG1/2 (vdpau)" );
+ xine_event_t event;
+ xine_format_change_data_t data;
+ event.type = XINE_EVENT_FRAME_FORMAT_CHANGE;
+ event.stream = this_gen->stream;
+ event.data = &data;
+ event.data_length = sizeof(data);
+ data.width = sequence->coded_width;
+ data.height = sequence->coded_height;
+ data.aspect = sequence->ratio;
+ xine_event_send( this_gen->stream, &event );
+ }
+ else if ( sequence->have_header == 2 && sequence->reported_video_step != sequence->video_step ) {
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_FRAME_DURATION, (sequence->reported_video_step = sequence->video_step) );
+ }
+}
+
+
+
+static void picture_header( vdpau_mpeg12_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ process_sequence_mpeg12_dependent_data(this_gen);
+
+ if ( sequence->profile==VDP_DECODER_PROFILE_MPEG1 )
+ sequence->picture.vdp_infos.picture_structure = PICTURE_FRAME;
+
+ VdpPictureInfoMPEG1Or2 *infos = &sequence->picture.vdp_infos;
+
+ if ( sequence->picture.vdp_infos.picture_structure==PICTURE_FRAME ) {
+ picture_ready( this_gen, 0 );
+ reset_picture( &sequence->picture );
+ }
+ else if ( sequence->picture.vdp_infos.picture_structure && sequence->picture.vdp_infos2.picture_structure ) {
+ picture_ready( this_gen, 0 );
+ reset_picture( &sequence->picture );
+ }
+ else if ( sequence->picture.vdp_infos.picture_structure ) {
+ infos = &sequence->picture.vdp_infos2;
+ sequence->picture.slices_pos_top = sequence->picture.slices_pos;
+
+ sequence->cur_pts = 0; /* ignore pts of second field */
+ }
+
+ /* take over pts for next issued image */
+ if ( sequence->cur_pts ) {
+ sequence->seq_pts = sequence->cur_pts;
+ sequence->cur_pts = 0;
+ }
+
+ bits_reader_set( &sequence->br, buf, len );
+ int tmp = read_bits( &sequence->br, 10 );
+ lprintf( "temporal_reference: %d\n", tmp );
+ infos->picture_coding_type = read_bits( &sequence->br, 3 );
+ lprintf( "picture_coding_type: %d\n", infos->picture_coding_type );
+ infos->forward_reference = VDP_INVALID_HANDLE;
+ infos->backward_reference = VDP_INVALID_HANDLE;
+ skip_bits( &sequence->br, 16 );
+ if ( infos->picture_coding_type > I_FRAME ) {
+ infos->full_pel_forward_vector = read_bits( &sequence->br, 1 );
+ infos->f_code[0][0] = infos->f_code[0][1] = read_bits( &sequence->br, 3 );
+ if ( infos->picture_coding_type==B_FRAME ) {
+ infos->full_pel_backward_vector = read_bits( &sequence->br, 1 );
+ infos->f_code[1][0] = infos->f_code[1][1] = read_bits( &sequence->br, 3 );
+ }
+ }
+ else {
+ infos->full_pel_forward_vector = 0;
+ infos->full_pel_backward_vector = 0;
+ }
+}
+
+
+
+static void sequence_extension( sequence_t *sequence, uint8_t *buf, int len )
+{
+ bits_reader_set( &sequence->br, buf, len );
+ int tmp = read_bits( &sequence->br, 4 );
+ lprintf( "extension_start_code_identifier: %d\n", tmp );
+ skip_bits( &sequence->br, 1 );
+ switch ( read_bits( &sequence->br, 3 ) ) {
+ case 5: sequence->profile = VDP_DECODER_PROFILE_MPEG2_SIMPLE; break;
+ default: sequence->profile = VDP_DECODER_PROFILE_MPEG2_MAIN;
+ }
+ skip_bits( &sequence->br, 4 );
+ sequence->progressive_sequence = read_bits( &sequence->br, 1 );
+ lprintf( "progressive_sequence: %d\n", sequence->progressive_sequence );
+ if ( read_bits( &sequence->br, 2 ) == 2 )
+ sequence->chroma = VO_CHROMA_422;
+ tmp = read_bits( &sequence->br, 2 );
+ lprintf( "horizontal_size_extension: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 2 );
+ lprintf( "vertical_size_extension: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 12 );
+ lprintf( "bit_rate_extension: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 1 );
+ lprintf( "marker_bit: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 8 );
+ lprintf( "vbv_buffer_size_extension: %d\n", tmp );
+ tmp = read_bits( &sequence->br, 1 );
+ lprintf( "low_delay: %d\n", tmp );
+ sequence->frame_rate_extension_n = read_bits( &sequence->br, 2 );
+ lprintf( "frame_rate_extension_n: %d\n", sequence->frame_rate_extension_n );
+ sequence->frame_rate_extension_d = read_bits( &sequence->br, 5 );
+ lprintf( "frame_rate_extension_d: %d\n", sequence->frame_rate_extension_d );
+}
+
+
+
+static void picture_coding_extension( sequence_t *sequence, uint8_t *buf, int len )
+{
+ VdpPictureInfoMPEG1Or2 *infos = &sequence->picture.vdp_infos;
+ if ( infos->picture_structure && infos->picture_structure!=PICTURE_FRAME )
+ infos = &sequence->picture.vdp_infos2;
+
+ bits_reader_set( &sequence->br, buf, len );
+ int tmp = read_bits( &sequence->br, 4 );
+ lprintf( "extension_start_code_identifier: %d\n", tmp );
+ infos->f_code[0][0] = read_bits( &sequence->br, 4 );
+ infos->f_code[0][1] = read_bits( &sequence->br, 4 );
+ infos->f_code[1][0] = read_bits( &sequence->br, 4 );
+ infos->f_code[1][1] = read_bits( &sequence->br, 4 );
+ lprintf( "f_code_0_0: %d\n", infos->f_code[0][0] );
+ lprintf( "f_code_0_1: %d\n", infos->f_code[0][1] );
+ lprintf( "f_code_1_0: %d\n", infos->f_code[1][0] );
+ lprintf( "f_code_1_1: %d\n", infos->f_code[1][1] );
+ infos->intra_dc_precision = read_bits( &sequence->br, 2 );
+ lprintf( "intra_dc_precision: %d\n", infos->intra_dc_precision );
+ infos->picture_structure = read_bits( &sequence->br, 2 );
+ lprintf( "picture_structure: %d\n", infos->picture_structure );
+ infos->top_field_first = read_bits( &sequence->br, 1 );
+ lprintf( "top_field_first: %d\n", infos->top_field_first );
+ infos->frame_pred_frame_dct = read_bits( &sequence->br, 1 );
+ lprintf( "frame_pred_frame_dct: %d\n", infos->frame_pred_frame_dct );
+ infos->concealment_motion_vectors = read_bits( &sequence->br, 1 );
+ lprintf( "concealment_motion_vectors: %d\n", infos->concealment_motion_vectors );
+ infos->q_scale_type = read_bits( &sequence->br, 1 );
+ lprintf( "q_scale_type: %d\n", infos->q_scale_type );
+ infos->intra_vlc_format = read_bits( &sequence->br, 1 );
+ lprintf( "intra_vlc_format: %d\n", infos->intra_vlc_format );
+ infos->alternate_scan = read_bits( &sequence->br, 1 );
+ lprintf( "alternate_scan: %d\n", infos->alternate_scan );
+ sequence->picture.repeat_first_field = read_bits( &sequence->br, 1 );
+ lprintf( "repeat_first_field: %d\n", sequence->picture.repeat_first_field );
+ tmp = read_bits( &sequence->br, 1 );
+ lprintf( "chroma_420_type: %d\n", tmp );
+ sequence->picture.progressive_frame = read_bits( &sequence->br, 1 );
+ lprintf( "progressive_frame: %d\n", sequence->picture.progressive_frame );
+}
+
+
+
+static void quant_matrix_extension( sequence_t *sequence, uint8_t *buf, int len )
+{
+ int i, j;
+
+ bits_reader_set( &sequence->br, buf, len );
+ skip_bits( &sequence->br, 4 );
+ i = read_bits( &sequence->br, 1 );
+ lprintf( "load_intra_quantizer_matrix: %d\n", i );
+ if ( i ) {
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos2.intra_quantizer_matrix[mpeg2_scan_norm[j]] = sequence->picture.vdp_infos.intra_quantizer_matrix[mpeg2_scan_norm[j]] = read_bits( &sequence->br, 8 );
+ }
+ }
+ else {
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos2.intra_quantizer_matrix[mpeg2_scan_norm[j]] = sequence->picture.vdp_infos.intra_quantizer_matrix[mpeg2_scan_norm[j]] = default_intra_quantizer_matrix[j];
+ }
+ }
+
+ i = read_bits( &sequence->br, 1 );
+ lprintf( "load_non_intra_quantizer_matrix: %d\n", i );
+ if ( i ) {
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos2.non_intra_quantizer_matrix[mpeg2_scan_norm[j]] = sequence->picture.vdp_infos.non_intra_quantizer_matrix[mpeg2_scan_norm[j]] = read_bits( &sequence->br, 8 );
+ }
+ }
+ else {
+ memset( sequence->picture.vdp_infos.non_intra_quantizer_matrix, 16, 64 );
+ memset( sequence->picture.vdp_infos2.non_intra_quantizer_matrix, 16, 64 );
+ }
+}
+
+
+
+static void copy_slice( sequence_t *sequence, uint8_t *buf, int len )
+{
+ int size = sequence->picture.slices_pos+len;
+ if ( sequence->picture.slices_size < size ) {
+ sequence->picture.slices_size = size+1024;
+ sequence->picture.slices = realloc( sequence->picture.slices, sequence->picture.slices_size );
+ }
+ xine_fast_memcpy( sequence->picture.slices+sequence->picture.slices_pos, buf, len );
+ sequence->picture.slices_pos += len;
+ if ( sequence->picture.slices_pos_top )
+ sequence->picture.slices_count2++;
+ else
+ sequence->picture.slices_count++;
+}
+
+
+
+static int parse_code( vdpau_mpeg12_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ if ( !sequence->have_header && buf[3]!=sequence_header_code ) {
+ lprintf( " ----------- no sequence header yet.\n" );
+ return 0;
+ }
+
+ if ( (buf[3] >= begin_slice_start_code) && (buf[3] <= end_slice_start_code) ) {
+ lprintf( " ----------- slice_start_code\n" );
+ copy_slice( sequence, buf, len );
+ return 0;
+ }
+
+ switch ( buf[3] ) {
+ case sequence_header_code:
+ lprintf( " ----------- sequence_header_code\n" );
+ sequence_header( this_gen, buf+4, len-4 );
+ break;
+ case extension_start_code: {
+ switch ( buf[4]>>4 ) {
+ case sequence_ext_sc:
+ lprintf( " ----------- sequence_extension_start_code\n" );
+ sequence_extension( sequence, buf+4, len-4 );
+ break;
+ case quant_matrix_ext_sc:
+ lprintf( " ----------- quant_matrix_extension_start_code\n" );
+ quant_matrix_extension( sequence, buf+4, len-4 );
+ break;
+ case picture_coding_ext_sc:
+ lprintf( " ----------- picture_coding_extension_start_code\n" );
+ picture_coding_extension( sequence, buf+4, len-4 );
+ break;
+ case sequence_display_ext_sc:
+ lprintf( " ----------- sequence_display_extension_start_code\n" );
+ break;
+ }
+ break;
+ }
+ case user_data_start_code:
+ lprintf( " ----------- user_data_start_code\n" );
+ break;
+ case group_start_code:
+ lprintf( " ----------- group_start_code\n" );
+ break;
+ case picture_start_code:
+ lprintf( " ----------- picture_start_code\n" );
+ picture_header( this_gen, buf+4, len-4 );
+ break;
+ case sequence_error_code:
+ lprintf( " ----------- sequence_error_code\n" );
+ break;
+ case sequence_end_code:
+ lprintf( " ----------- sequence_end_code\n" );
+ break;
+ }
+ return 0;
+}
+
+
+
+static void decode_render( vdpau_mpeg12_decoder_t *vd, vdpau_accel_t *accel )
+{
+ sequence_t *seq = (sequence_t*)&vd->sequence;
+ picture_t *pic = (picture_t*)&seq->picture;
+
+ pic->vdp_infos.slice_count = pic->slices_count;
+ pic->vdp_infos2.slice_count = pic->slices_count2;
+
+ VdpStatus st;
+ if ( vd->decoder==VDP_INVALID_HANDLE || vd->decoder_profile!=seq->profile || vd->decoder_width!=seq->coded_width || vd->decoder_height!=seq->coded_height ) {
+ if ( vd->decoder!=VDP_INVALID_HANDLE ) {
+ accel->vdp_decoder_destroy( vd->decoder );
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+ st = accel->vdp_decoder_create( accel->vdp_device, seq->profile, seq->coded_width, seq->coded_height, 2, &vd->decoder);
+ if ( st!=VDP_STATUS_OK )
+ lprintf( "failed to create decoder !! %s\n", accel->vdp_get_error_string( st ) );
+ else {
+ vd->decoder_profile = seq->profile;
+ vd->decoder_width = seq->coded_width;
+ vd->decoder_height = seq->coded_height;
+ seq->vdp_runtime_nr = accel->vdp_runtime_nr;
+ }
+ }
+
+ VdpBitstreamBuffer vbit;
+ vbit.struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ vbit.bitstream = pic->slices;
+ vbit.bitstream_bytes = (pic->vdp_infos.picture_structure==PICTURE_FRAME)? pic->slices_pos : pic->slices_pos_top;
+ st = accel->vdp_decoder_render( vd->decoder, accel->surface, (VdpPictureInfo*)&pic->vdp_infos, 1, &vbit );
+ if ( st!=VDP_STATUS_OK )
+ lprintf( "decoder failed : %d!! %s\n", st, accel->vdp_get_error_string( st ) );
+ else {
+ lprintf( "DECODER SUCCESS : frame_type:%d, slices=%d, slices_bytes=%d, current=%d, forwref:%d, backref:%d, pts:%lld\n",
+ pic->vdp_infos.picture_coding_type, pic->vdp_infos.slice_count, vbit.bitstream_bytes, accel->surface, pic->vdp_infos.forward_reference, pic->vdp_infos.backward_reference, seq->cur_pts );
+ VdpPictureInfoMPEG1Or2 *info = &pic->vdp_infos;
+ lprintf("%d %d %d %d %d %d %d %d %d %d %d %d %d\n", info->intra_dc_precision, info->frame_pred_frame_dct, info->concealment_motion_vectors,
+ info->intra_vlc_format, info->alternate_scan, info->q_scale_type, info->top_field_first, info->full_pel_forward_vector,
+ info->full_pel_backward_vector, info->f_code[0][0], info->f_code[0][1], info->f_code[1][0], info->f_code[1][1] );
+ }
+
+ if ( pic->vdp_infos.picture_structure != PICTURE_FRAME ) {
+ pic->vdp_infos2.backward_reference = VDP_INVALID_HANDLE;
+ pic->vdp_infos2.forward_reference = VDP_INVALID_HANDLE;
+ if ( pic->vdp_infos2.picture_coding_type==P_FRAME ) {
+ if ( pic->vdp_infos.picture_coding_type==I_FRAME )
+ pic->vdp_infos2.forward_reference = accel->surface;
+ else
+ pic->vdp_infos2.forward_reference = pic->vdp_infos.forward_reference;
+ }
+ else if ( pic->vdp_infos.picture_coding_type==B_FRAME ) {
+ pic->vdp_infos2.forward_reference = pic->vdp_infos.forward_reference;
+ pic->vdp_infos2.backward_reference = pic->vdp_infos.backward_reference;
+ }
+ vbit.struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ vbit.bitstream = pic->slices+pic->slices_pos_top;
+ vbit.bitstream_bytes = pic->slices_pos-pic->slices_pos_top;
+ st = accel->vdp_decoder_render( vd->decoder, accel->surface, (VdpPictureInfo*)&pic->vdp_infos2, 1, &vbit );
+ if ( st!=VDP_STATUS_OK )
+ lprintf( "decoder failed : %d!! %s\n", st, accel->vdp_get_error_string( st ) );
+ else
+ lprintf( "DECODER SUCCESS : frame_type:%d, slices=%d, current=%d, forwref:%d, backref:%d, pts:%lld\n",
+ pic->vdp_infos2.picture_coding_type, pic->vdp_infos2.slice_count, accel->surface, pic->vdp_infos2.forward_reference, pic->vdp_infos2.backward_reference, seq->cur_pts );
+ }
+}
+
+
+
+static void decode_picture( vdpau_mpeg12_decoder_t *vd, uint8_t end_of_sequence )
+{
+ sequence_t *seq = (sequence_t*)&vd->sequence;
+ picture_t *pic = (picture_t*)&seq->picture;
+ vdpau_accel_t *ref_accel;
+
+ if ( seq->profile == VDP_DECODER_PROFILE_MPEG1 )
+ pic->vdp_infos.picture_structure=PICTURE_FRAME;
+
+ if ( pic->vdp_infos.picture_coding_type==P_FRAME ) {
+ if ( seq->backward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->backward_ref->accel_data;
+ pic->vdp_infos.forward_reference = ref_accel->surface;
+ }
+ else
+ return;
+ }
+ else if ( pic->vdp_infos.picture_coding_type==B_FRAME ) {
+ if ( seq->forward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->forward_ref->accel_data;
+ pic->vdp_infos.forward_reference = ref_accel->surface;
+ }
+ else
+ return;
+ if ( seq->backward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->backward_ref->accel_data;
+ pic->vdp_infos.backward_reference = ref_accel->surface;
+ }
+ else
+ return;
+ }
+
+ int still_image = (end_of_sequence) ? VO_STILL_IMAGE : 0;
+ vo_frame_t *img = vd->stream->video_out->get_frame( vd->stream->video_out, seq->coded_width, seq->coded_height,
+ seq->ratio, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS | seq->chroma | seq->reset | still_image );
+ seq->reset = 0;
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ if ( !seq->accel_vdpau )
+ seq->accel_vdpau = accel;
+
+ if( seq->vdp_runtime_nr != *(seq->accel_vdpau->current_vdp_runtime_nr) ) {
+ seq->accel_vdpau = accel;
+ if ( seq->forward_ref )
+ seq->forward_ref->free( seq->forward_ref );
+ seq->forward_ref = NULL;
+ if ( seq->backward_ref )
+ seq->backward_ref->free( seq->backward_ref );
+ seq->backward_ref = NULL;
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+
+ decode_render( vd, accel );
+
+#ifdef MAKE_DAT
+ if ( nframes==0 ) {
+ fwrite( &seq->coded_width, 1, sizeof(seq->coded_width), outfile );
+ fwrite( &seq->coded_height, 1, sizeof(seq->coded_height), outfile );
+ fwrite( &seq->ratio, 1, sizeof(seq->ratio), outfile );
+ fwrite( &seq->profile, 1, sizeof(seq->profile), outfile );
+ }
+
+ if ( nframes++ < 25 ) {
+ fwrite( &pic->vdp_infos, 1, sizeof(pic->vdp_infos), outfile );
+ fwrite( &pic->slices_pos, 1, sizeof(pic->slices_pos), outfile );
+ fwrite( pic->slices, 1, pic->slices_pos, outfile );
+ }
+#endif
+
+ img->drawn = 0;
+ img->pts = seq->seq_pts;
+ seq->seq_pts = 0; /* reset */
+ img->bad_frame = 0;
+
+ if ( end_of_sequence ) {
+ if ( seq->backward_ref )
+ seq->backward_ref->free( seq->backward_ref );
+ seq->backward_ref = NULL;
+ }
+
+#if 0
+ /* trying to deal with (french) buggy streams that randomly set bottom_field_first
+ while stream is top_field_first. So we assume that when top_field_first
+ is set one time, the stream _is_ top_field_first. */
+ lprintf("pic->vdp_infos.top_field_first = %d\n", pic->vdp_infos.top_field_first);
+ if ( pic->vdp_infos.top_field_first )
+ seq->top_field_first = 1;
+ img->top_field_first = seq->top_field_first;
+#else
+ img->top_field_first = pic->vdp_infos.top_field_first;
+#endif
+
+ /* progressive_frame is unreliable with most mpeg2 streams */
+ if ( pic->vdp_infos.picture_structure!=PICTURE_FRAME )
+ img->progressive_frame = 0;
+ else
+ img->progressive_frame = pic->progressive_frame;
+
+ img->repeat_first_field = pic->repeat_first_field;
+
+ double duration = seq->video_step;
+
+ if ( img->repeat_first_field ) {
+ if( !seq->progressive_sequence && pic->progressive_frame ) {
+ /* decoder should output 3 fields, so adjust duration to
+ count on this extra field time */
+ duration *= 3;
+ duration /= 2;
+ } else if ( seq->progressive_sequence ) {
+ /* for progressive sequences the output should repeat the
+ frame 1 or 2 times depending on top_field_first flag. */
+ duration *= (pic->vdp_infos.top_field_first ? 3 : 2);
+ }
+ }
+
+ img->duration = (int)(duration + .5);
+
+ if ( pic->vdp_infos.picture_coding_type!=B_FRAME ) {
+ if ( pic->vdp_infos.picture_coding_type==I_FRAME && !seq->backward_ref ) {
+ img->pts = 0;
+ img->draw( img, vd->stream );
+ ++img->drawn;
+ }
+ if ( seq->forward_ref ) {
+ seq->forward_ref->drawn = 0;
+ seq->forward_ref->free( seq->forward_ref );
+ }
+ seq->forward_ref = seq->backward_ref;
+ if ( seq->forward_ref && !seq->forward_ref->drawn ) {
+ seq->forward_ref->draw( seq->forward_ref, vd->stream );
+ }
+ seq->backward_ref = img;
+ }
+ else {
+ img->draw( img, vd->stream );
+ img->free( img );
+ }
+}
+
+
+
+static void picture_ready( vdpau_mpeg12_decoder_t *vd, uint8_t end_of_sequence )
+{
+ picture_t *pic = (picture_t*)&vd->sequence.picture;
+ if ( !pic->slices_count )
+ return;
+ if ( pic->vdp_infos2.picture_structure && !pic->slices_count2 )
+ return;
+ decode_picture( vd, end_of_sequence );
+}
+
+
+
+/*
+ * This function receives a buffer of data from the demuxer layer and
+ * figures out how to handle it based on its header flags.
+ */
+static void vdpau_mpeg12_decode_data (video_decoder_t *this_gen, buf_element_t *buf)
+{
+ vdpau_mpeg12_decoder_t *this = (vdpau_mpeg12_decoder_t *) this_gen;
+ sequence_t *seq = (sequence_t*)&this->sequence;
+
+ /* preview buffers shall not be decoded and drawn -- use them only to supply stream information */
+ if (buf->decoder_flags & BUF_FLAG_PREVIEW)
+ return;
+
+ if ( !buf->size )
+ return;
+
+ if ( buf->pts )
+ seq->cur_pts = buf->pts;
+
+ int size = seq->bufpos+buf->size;
+ if ( seq->bufsize < size ) {
+ seq->bufsize = size+1024;
+ seq->buf = realloc( seq->buf, seq->bufsize );
+ }
+ xine_fast_memcpy( seq->buf+seq->bufpos, buf->content, buf->size );
+ seq->bufpos += buf->size;
+
+ while ( seq->bufseek <= seq->bufpos-4 ) {
+ uint8_t *buffer = seq->buf+seq->bufseek;
+ if ( buffer[0]==0 && buffer[1]==0 && buffer[2]==1 ) {
+ if ( seq->start<0 ) {
+ seq->start = seq->bufseek;
+ }
+ else {
+ parse_code( this, seq->buf+seq->start, seq->bufseek-seq->start );
+ uint8_t *tmp = (uint8_t*)malloc(seq->bufsize);
+ xine_fast_memcpy( tmp, seq->buf+seq->bufseek, seq->bufpos-seq->bufseek );
+ seq->bufpos -= seq->bufseek;
+ seq->start = -1;
+ seq->bufseek = -1;
+ free( seq->buf );
+ seq->buf = tmp;
+ }
+ }
+ ++seq->bufseek;
+ }
+
+ /* still image detection -- don't wait for further data if buffer ends in sequence end code */
+ if (seq->start >= 0 && seq->buf[seq->start + 3] == sequence_end_code) {
+ decode_picture(this, 1);
+ parse_code(this, seq->buf+seq->start, 4);
+ seq->start = -1;
+ }
+}
+
+/*
+ * This function is called when xine needs to flush the system.
+ */
+static void vdpau_mpeg12_flush (video_decoder_t *this_gen) {
+ vdpau_mpeg12_decoder_t *this = (vdpau_mpeg12_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg12_flush\n" );
+}
+
+/*
+ * This function resets the video decoder.
+ */
+static void vdpau_mpeg12_reset (video_decoder_t *this_gen) {
+ vdpau_mpeg12_decoder_t *this = (vdpau_mpeg12_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg12_reset\n" );
+ reset_sequence( &this->sequence, 1 );
+}
+
+/*
+ * The decoder should forget any stored pts values here.
+ */
+static void vdpau_mpeg12_discontinuity (video_decoder_t *this_gen) {
+ vdpau_mpeg12_decoder_t *this = (vdpau_mpeg12_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg12_discontinuity\n" );
+ reset_sequence( &this->sequence, 0 );
+}
+
+/*
+ * This function frees the video decoder instance allocated to the decoder.
+ */
+static void vdpau_mpeg12_dispose (video_decoder_t *this_gen) {
+
+ vdpau_mpeg12_decoder_t *this = (vdpau_mpeg12_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg12_dispose\n" );
+
+ if ( this->decoder!=VDP_INVALID_HANDLE && this->sequence.accel_vdpau ) {
+ this->sequence.accel_vdpau->vdp_decoder_destroy( this->decoder );
+ this->decoder = VDP_INVALID_HANDLE;
+ }
+
+ free_sequence( &this->sequence );
+
+ this->stream->video_out->close( this->stream->video_out, this->stream );
+
+ free( this->sequence.picture.slices );
+ free( this->sequence.buf );
+ free( this_gen );
+}
+
+/*
+ * This function allocates, initializes, and returns a private video
+ * decoder structure.
+ */
+static video_decoder_t *open_plugin (video_decoder_class_t *class_gen, xine_stream_t *stream) {
+
+ vdpau_mpeg12_decoder_t *this ;
+
+ lprintf( "open_plugin\n" );
+
+ /* the videoout must be vdpau-capable to support this decoder */
+ if ( !(stream->video_driver->get_capabilities(stream->video_driver) & VO_CAP_VDPAU_MPEG12) )
+ return NULL;
+
+ /* now check if vdpau has free decoder resource */
+ vo_frame_t *img = stream->video_out->get_frame( stream->video_out, 1920, 1080, 1, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS );
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ int runtime_nr = accel->vdp_runtime_nr;
+ img->free(img);
+ VdpDecoder decoder;
+ VdpStatus st = accel->vdp_decoder_create( accel->vdp_device, VDP_DECODER_PROFILE_MPEG2_MAIN, 1920, 1080, 2, &decoder );
+ if ( st!=VDP_STATUS_OK ) {
+ lprintf( "can't create vdpau decoder.\n" );
+ return NULL;
+ }
+
+ accel->vdp_decoder_destroy( decoder );
+
+ this = (vdpau_mpeg12_decoder_t *) calloc(1, sizeof(vdpau_mpeg12_decoder_t));
+
+ this->video_decoder.decode_data = vdpau_mpeg12_decode_data;
+ this->video_decoder.flush = vdpau_mpeg12_flush;
+ this->video_decoder.reset = vdpau_mpeg12_reset;
+ this->video_decoder.discontinuity = vdpau_mpeg12_discontinuity;
+ this->video_decoder.dispose = vdpau_mpeg12_dispose;
+
+ this->stream = stream;
+ this->class = (vdpau_mpeg12_class_t *) class_gen;
+
+ this->sequence.bufsize = 1024;
+ this->sequence.buf = (uint8_t*)malloc(this->sequence.bufsize);
+ this->sequence.forward_ref = 0;
+ this->sequence.backward_ref = 0;
+ this->sequence.vdp_runtime_nr = runtime_nr;
+ free_sequence( &this->sequence );
+ this->sequence.ratio = 1;
+ this->sequence.reset = VO_NEW_SEQUENCE_FLAG;
+
+ init_picture( &this->sequence.picture );
+
+ this->decoder = VDP_INVALID_HANDLE;
+ this->sequence.accel_vdpau = NULL;
+
+ (stream->video_out->open)(stream->video_out, stream);
+
+#ifdef MAKE_DAT
+ outfile = fopen( "/tmp/mpg.dat","w");
+ nframes = 0;
+#endif
+
+ return &this->video_decoder;
+}
+
+/*
+ * This function allocates a private video decoder class and initializes
+ * the class's member functions.
+ */
+static void *init_plugin (xine_t *xine, void *data) {
+
+ vdpau_mpeg12_class_t *this;
+
+ this = (vdpau_mpeg12_class_t *) calloc(1, sizeof(vdpau_mpeg12_class_t));
+
+ this->decoder_class.open_plugin = open_plugin;
+ this->decoder_class.identifier = "vdpau_mpeg12";
+ this->decoder_class.description =
+ N_("vdpau_mpeg12: mpeg1/2 decoder plugin using VDPAU hardware decoding.\n"
+ "Must be used along with video_out_vdpau.");
+ this->decoder_class.dispose = default_video_decoder_class_dispose;
+
+ return this;
+}
+
+/*
+ * This is a list of all of the internal xine video buffer types that
+ * this decoder is able to handle. Check src/xine-engine/buffer.h for a
+ * list of valid buffer types (and add a new one if the one you need does
+ * not exist). Terminate the list with a 0.
+ */
+static const uint32_t video_types[] = {
+ BUF_VIDEO_MPEG,
+ 0
+};
+
+/*
+ * This data structure combines the list of supported xine buffer types and
+ * the priority that the plugin should be given with respect to other
+ * plugins that handle the same buffer type. A plugin with priority (n+1)
+ * will be used instead of a plugin with priority (n).
+ */
+static const decoder_info_t dec_info_video = {
+ video_types, /* supported types */
+ 8 /* priority */
+};
+
+/*
+ * The plugin catalog entry. This is the only information that this plugin
+ * will export to the public.
+ */
+const plugin_info_t xine_plugin_info[] EXPORTED = {
+ /* { type, API, "name", version, special_info, init_function } */
+ { PLUGIN_VIDEO_DECODER, 19, "vdpau_mpeg12", XINE_VERSION_CODE, &dec_info_video, init_plugin },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};
diff --git a/src/video_dec/libvdpau/vdpau_mpeg4.c b/src/video_dec/libvdpau/vdpau_mpeg4.c
new file mode 100644
index 000000000..4d7dee1ed
--- /dev/null
+++ b/src/video_dec/libvdpau/vdpau_mpeg4.c
@@ -0,0 +1,1194 @@
+/*
+ * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; remove-trailing-space on;
+ *
+ * Copyright (C) 2010 the xine project
+ * Copyright (C) 2010 Christophe Thommeret <hftom@free.fr>
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * vdpau_mpeg4.c, a mpeg4-part-2 video stream parser using VDPAU hardware decoder
+ *
+ */
+
+/*#define LOG*/
+#define LOG_MODULE "vdpau_mpeg4"
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <xine/xine_internal.h>
+#include <xine/video_out.h>
+#include <xine/buffer.h>
+#include <xine/xineutils.h>
+#include "accel_vdpau.h"
+#include "bits_reader.h"
+
+#include <vdpau/vdpau.h>
+
+#define begin_vo_start_code 0x00
+#define end_vo_start_code 0x1f
+#define begin_vol_start_code 0x20
+#define end_vol_start_code 0x2f
+#define viso_sequence_start_code 0xb0
+#define viso_sequence_end_code 0xb1
+#define viso_start_code 0xb5
+#define group_start_code 0xb3
+#define user_data_start_code 0xb2
+#define vop_start_code 0xb6
+
+#define I_FRAME 0
+#define P_FRAME 1
+#define B_FRAME 2
+
+#define PICTURE_TOP 1
+#define PICTURE_BOTTOM 2
+#define PICTURE_FRAME 3
+
+#define SHAPE_RECT 0
+#define SHAPE_BIN 1
+#define SHAPE_BINONLY 2
+#define SHAPE_GRAY 3
+
+#define SPRITE_STATIC 1
+#define SPRITE_GMC 2
+
+static int nframe;
+
+/*#define MAKE_DAT*/ /*do NOT define this, unless you know what you do */
+#ifdef MAKE_DAT
+static int nframes;
+static FILE *outfile;
+#endif
+
+
+
+/* default intra quant matrix, in zig-zag order */
+static const uint8_t default_intra_quantizer_matrix[64] = {
+ 8,
+ 17, 17,
+ 20, 18, 18,
+ 19, 19, 21, 21,
+ 22, 22, 22, 21, 21,
+ 23, 23, 23, 23, 23, 23,
+ 25, 24, 24, 24, 24, 25, 25,
+ 27, 27, 26, 26, 26, 26, 26, 27,
+ 28, 28, 28, 28, 28, 28, 28,
+ 30, 30, 30, 30, 30, 30,
+ 32, 32, 32, 32, 32,
+ 35, 35, 35, 35,
+ 38, 38, 38,
+ 41, 41,
+ 45
+};
+
+/* default non intra quant matrix, in zig-zag order */
+static const uint8_t default_non_intra_quantizer_matrix[64] = {
+ 16,
+ 17, 17,
+ 18, 18, 18,
+ 19, 19, 19, 19,
+ 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 25, 24, 24, 24,
+ 25, 26, 26, 26, 26, 25,
+ 27, 27, 27, 27, 27,
+ 28, 28, 28, 28,
+ 30, 30, 30,
+ 31, 31,
+ 33
+};
+
+uint8_t mpeg_scan_norm[64] = {
+ /* Zig-Zag scan pattern */
+ 0, 1, 8,16, 9, 2, 3,10,
+ 17,24,32,25,18,11, 4, 5,
+ 12,19,26,33,40,48,41,34,
+ 27,20,13, 6, 7,14,21,28,
+ 35,42,49,56,57,50,43,36,
+ 29,22,15,23,30,37,44,51,
+ 58,59,52,45,38,31,39,46,
+ 53,60,61,54,47,55,62,63
+};
+
+
+
+typedef struct {
+ VdpPictureInfoMPEG4Part2 vdp_infos; /* first field, also used for frame */
+
+ int viso_verid;
+ int newpred_enable;
+ int reduced_resolution_vop_enable;
+ int vol_shape;
+ int complexity_estimation_disable;
+ int sprite_enable;
+ int quant_precision;
+
+ int progressive_frame;
+} picture_t;
+
+
+
+typedef struct {
+ uint32_t coded_width;
+ uint32_t coded_height;
+
+ uint64_t video_step; /* frame duration in pts units */
+ double ratio;
+ VdpDecoderProfile profile;
+ int chroma;
+ int top_field_first;
+
+ int have_header;
+
+ uint8_t *buf; /* accumulate data */
+ int bufseek;
+ uint32_t bufsize;
+ uint32_t bufpos;
+ int start;
+
+ picture_t picture;
+ vo_frame_t *forward_ref;
+ vo_frame_t *backward_ref;
+
+ int64_t cur_pts, seq_pts;
+
+ vdpau_accel_t *accel_vdpau;
+
+ VdpColorStandard color_standard;
+
+ bits_reader_t br;
+
+ int vdp_runtime_nr;
+ int reset;
+
+ int have_codec_name;
+ char codec_name[256];
+
+ int fixed_vop_time_increment;
+ int time_increment_bits;
+ int last_time_base;
+ int time_base;
+ int time;
+ int last_non_b_time;
+ int t_frame;
+
+} sequence_t;
+
+
+
+typedef struct {
+ video_decoder_class_t decoder_class;
+} vdpau_mpeg4_class_t;
+
+
+
+typedef struct vdpau_mpeg4_decoder_s {
+ video_decoder_t video_decoder; /* parent video decoder structure */
+
+ vdpau_mpeg4_class_t *class;
+ xine_stream_t *stream;
+
+ sequence_t sequence;
+
+ VdpDecoder decoder;
+ VdpDecoderProfile decoder_profile;
+ uint32_t decoder_width;
+ uint32_t decoder_height;
+
+} vdpau_mpeg4_decoder_t;
+
+
+
+static void reset_picture( picture_t *pic )
+{
+ lprintf( "reset_picture\n" );
+ pic->vdp_infos.vop_coding_type = 0;
+ pic->vdp_infos.alternate_vertical_scan_flag = 0;
+ pic->vdp_infos.quant_type = 0;
+ pic->vdp_infos.vop_time_increment_resolution = 0;
+ pic->vdp_infos.vop_fcode_forward = 1;
+ pic->vdp_infos.vop_fcode_backward = 1;
+ pic->vdp_infos.resync_marker_disable = 0;
+ pic->vdp_infos.interlaced = 0;
+ pic->vdp_infos.quarter_sample = 0;
+ pic->vdp_infos.short_video_header = 0;
+ pic->vdp_infos.rounding_control = 0;
+ pic->vdp_infos.top_field_first = 1;
+ pic->progressive_frame = 1;
+ pic->viso_verid = 1;
+ pic->newpred_enable = 0;
+ pic->reduced_resolution_vop_enable = 0;
+ pic->complexity_estimation_disable = 1;
+ pic->vol_shape = SHAPE_RECT;
+ pic->quant_precision = 5;
+ pic->vdp_infos.trd[0] = pic->vdp_infos.trd[1] = 0;
+ pic->vdp_infos.trb[0] = pic->vdp_infos.trb[1] = 0;
+}
+
+
+
+static void init_picture( picture_t *pic )
+{
+ reset_picture( pic );
+}
+
+
+
+static void reset_sequence( sequence_t *sequence, int free_refs )
+{
+ sequence->cur_pts = sequence->seq_pts = 0;
+ if ( sequence->forward_ref )
+ sequence->forward_ref->pts = 0;
+ if ( sequence->backward_ref )
+ sequence->backward_ref->pts = 0;
+
+ if ( !free_refs )
+ return;
+
+ sequence->bufpos = 0;
+ sequence->bufseek = 0;
+ sequence->start = -1;
+ if ( sequence->forward_ref )
+ sequence->forward_ref->free( sequence->forward_ref );
+ sequence->forward_ref = NULL;
+ if ( sequence->backward_ref )
+ sequence->backward_ref->free( sequence->backward_ref );
+ sequence->backward_ref = NULL;
+ sequence->top_field_first = 0;
+ sequence->reset = VO_NEW_SEQUENCE_FLAG;
+ sequence->color_standard = VDP_COLOR_STANDARD_ITUR_BT_709;
+
+ sequence->last_time_base = 0;
+ sequence->time_base = 0;
+ sequence->time = 0;
+ sequence->last_non_b_time = 0;
+ sequence->t_frame = 0;
+}
+
+
+
+static void free_sequence( sequence_t *sequence )
+{
+ lprintf( "init_sequence\n" );
+ sequence->have_header = 0;
+ sequence->profile = VDP_DECODER_PROFILE_MPEG4_PART2_ASP;
+ sequence->chroma = 0;
+ sequence->video_step = 3600;
+ sequence->have_codec_name = 0;
+ strcpy( sequence->codec_name, "MPEG4 / XviD / DivX (vdpau)" );
+ reset_sequence( sequence, 1 );
+}
+
+
+
+static void update_metadata( vdpau_mpeg4_decoder_t *this_gen )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_WIDTH, sequence->coded_width );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, sequence->coded_height );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_RATIO, ((double)10000*sequence->ratio) );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_FRAME_DURATION, sequence->video_step );
+ _x_meta_info_set_utf8( this_gen->stream, XINE_META_INFO_VIDEOCODEC, sequence->codec_name );
+ xine_event_t event;
+ xine_format_change_data_t data;
+ event.type = XINE_EVENT_FRAME_FORMAT_CHANGE;
+ event.stream = this_gen->stream;
+ event.data = &data;
+ event.data_length = sizeof(data);
+ data.width = sequence->coded_width;
+ data.height = sequence->coded_height;
+ data.aspect = sequence->ratio;
+ xine_event_send( this_gen->stream, &event );
+}
+
+
+
+static void visual_object( vdpau_mpeg4_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ picture_t *picture = (picture_t*)&sequence->picture;
+ bits_reader_set( &sequence->br, buf, len );
+
+ if ( read_bits( &sequence->br, 1 ) ) {
+ picture->viso_verid = read_bits( &sequence->br, 4 );
+ lprintf("visual_object_verid: %d\n", picture->viso_verid);
+ skip_bits( &sequence->br, 3 );
+ }
+ if ( read_bits( &sequence->br, 4 ) == 1 ) {
+ if ( read_bits( &sequence->br, 1 ) ) {
+ skip_bits( &sequence->br, 4 );
+ if ( read_bits( &sequence->br, 1 ) ) {
+ if ( read_bits( &sequence->br, 8 ) == 7 ) {
+ lprintf("color_standard: smpte_240M\n");
+ sequence->color_standard = VDP_COLOR_STANDARD_SMPTE_240M;
+ }
+ skip_bits( &sequence->br, 16 );
+ }
+ }
+ }
+}
+
+
+
+static void video_object_layer( vdpau_mpeg4_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ picture_t *picture = (picture_t*)&sequence->picture;
+ bits_reader_set( &sequence->br, buf, len );
+
+ int vol_verid = 1;
+
+ picture->vdp_infos.short_video_header = 0;
+ sequence->t_frame = 0;
+
+ skip_bits( &sequence->br, 9 );
+ if ( read_bits( &sequence->br, 1 ) ) {
+ vol_verid = read_bits( &sequence->br, 4 );
+ lprintf("video_object_layer_verid: %d\n", vol_verid);
+ skip_bits( &sequence->br, 3 );
+ }
+ double parw=1, parh=1;
+ int ar = read_bits( &sequence->br, 4 );
+ lprintf("aspect_ratio_info: %d\n", ar);
+ switch ( ar ) {
+ case 1: parw = parh = 1; break;
+ case 2: parw = 12; parh = 11; break;
+ case 3: parw = 10; parh = 11; break;
+ case 4: parw = 16; parh = 11; break;
+ case 5: parw = 40; parh = 33; break;
+ case 15: {
+ parw = read_bits( &sequence->br, 8 );
+ parh = read_bits( &sequence->br, 8 );
+ break;
+ }
+ }
+ lprintf("parw: %f, parh: %f\n", parw, parh);
+ if ( read_bits( &sequence->br, 1 ) ) {
+ skip_bits( &sequence->br, 3 );
+ if ( read_bits( &sequence->br, 1 ) ) {
+ read_bits( &sequence->br, 16 );
+ read_bits( &sequence->br, 16 );
+ read_bits( &sequence->br, 16 );
+ read_bits( &sequence->br, 15 );
+ read_bits( &sequence->br, 16 );
+ }
+ }
+
+ picture->vol_shape = read_bits( &sequence->br, 2 );
+ if ( (picture->vol_shape == SHAPE_GRAY) && (vol_verid != 1) ) {
+ skip_bits( &sequence->br, 4 );
+ fprintf(stderr, "vdpau_mpeg4: unsupported SHAPE_GRAY!\n");
+ }
+ skip_bits( &sequence->br, 1 );
+ picture->vdp_infos.vop_time_increment_resolution = read_bits( &sequence->br, 16 );
+ lprintf("vop_time_increment_resolution: %d\n", picture->vdp_infos.vop_time_increment_resolution);
+ int length=1, max=2;
+ while ( (max - 1) < picture->vdp_infos.vop_time_increment_resolution ) {
+ ++length;
+ max *= 2;
+ }
+ sequence->time_increment_bits = length;
+ if ( sequence->time_increment_bits < 1 )
+ sequence->time_increment_bits = 1;
+ skip_bits( &sequence->br, 1 );
+
+ if ( read_bits( &sequence->br, 1 ) ) {
+ sequence->fixed_vop_time_increment = read_bits( &sequence->br, sequence->time_increment_bits );
+ }
+ else
+ sequence->fixed_vop_time_increment = 1;
+
+ sequence->video_step = 90000 / (picture->vdp_infos.vop_time_increment_resolution / sequence->fixed_vop_time_increment);
+ lprintf("fixed_vop_time_increment: %d\n", sequence->fixed_vop_time_increment);
+ lprintf("video_step: %d\n", (int)sequence->video_step);
+
+ if ( picture->vol_shape != SHAPE_BINONLY ) {
+ if ( picture->vol_shape == SHAPE_RECT ) {
+ skip_bits( &sequence->br, 1 );
+ sequence->coded_width = read_bits( &sequence->br, 13 );
+ lprintf("vol_width: %d\n", sequence->coded_width);
+ skip_bits( &sequence->br, 1 );
+ sequence->coded_height = read_bits( &sequence->br, 13 );
+ lprintf("vol_height: %d\n", sequence->coded_height);
+ skip_bits( &sequence->br, 1 );
+ }
+ sequence->ratio = ((double)sequence->coded_width * parw) / ((double)sequence->coded_height * parh);
+ lprintf("aspect_ratio: %f\n", sequence->ratio);
+ picture->vdp_infos.interlaced = read_bits( &sequence->br, 1 );
+ skip_bits( &sequence->br, 1 );
+
+ picture->sprite_enable = 0;
+ if ( vol_verid == 1 )
+ picture->sprite_enable = read_bits( &sequence->br, 1 );
+ else
+ picture->sprite_enable = read_bits( &sequence->br, 2 );
+
+ if ( (picture->sprite_enable == SPRITE_STATIC) || (picture->sprite_enable == SPRITE_GMC) ) {
+ if ( picture->sprite_enable != SPRITE_GMC ) {
+ skip_bits( &sequence->br, 14 );
+ skip_bits( &sequence->br, 14 );
+ skip_bits( &sequence->br, 14 );
+ skip_bits( &sequence->br, 14 );
+ }
+ skip_bits( &sequence->br, 9 );
+ if ( picture->sprite_enable != SPRITE_GMC )
+ skip_bits( &sequence->br, 1 );
+ }
+ if ( (vol_verid != 1) && (picture->vol_shape != SHAPE_RECT) )
+ skip_bits( &sequence->br, 1 );
+
+ if ( read_bits( &sequence->br, 1 ) ) {
+ picture->quant_precision = read_bits( &sequence->br, 4 );
+ skip_bits( &sequence->br, 4 );
+ }
+ else
+ picture->quant_precision = 5;
+
+ if ( picture->vol_shape == SHAPE_GRAY )
+ skip_bits( &sequence->br, 3 );
+
+ picture->vdp_infos.quant_type = read_bits( &sequence->br, 1 );
+
+ /* load default matrices */
+ int j;
+ for ( j=0; j<64; ++j ) {
+ sequence->picture.vdp_infos.intra_quantizer_matrix[mpeg_scan_norm[j]] = default_intra_quantizer_matrix[j];
+ sequence->picture.vdp_infos.non_intra_quantizer_matrix[mpeg_scan_norm[j]] = default_non_intra_quantizer_matrix[j];
+ }
+ if ( picture->vdp_infos.quant_type ) {
+ int val, last = 0;
+ if ( read_bits( &sequence->br, 1 ) ) { /* load_intra_quant_matrix */
+ lprintf("load_intra_quant_matrix\n");
+ for ( j=0; j<64; ++j ) {
+ val = read_bits( &sequence->br, 8 );
+ if ( !val )
+ break;
+ last = sequence->picture.vdp_infos.intra_quantizer_matrix[j] = val;
+ }
+ for ( ; j<64; ++j )
+ sequence->picture.vdp_infos.intra_quantizer_matrix[j] = last;
+ }
+ if ( read_bits( &sequence->br, 1 ) ) { /* load_non_intra_quant_matrix */
+ lprintf("load_non_intra_quant_matrix\n");
+ for ( j=0; j<64; ++j ) {
+ val = read_bits( &sequence->br, 8 );
+ if ( !val )
+ break;
+ last = sequence->picture.vdp_infos.non_intra_quantizer_matrix[j] = val;
+ }
+ for ( ; j<64; ++j )
+ sequence->picture.vdp_infos.non_intra_quantizer_matrix[j] = last;
+ }
+ if ( picture->vol_shape == SHAPE_GRAY ) { /* FIXME */
+ fprintf(stderr, "vdpau_mpeg4: grayscale shape not supported!\n");
+ return;
+ }
+ }
+ if ( vol_verid != 1 )
+ sequence->picture.vdp_infos.quarter_sample = read_bits( &sequence->br, 1 );
+ else
+ sequence->picture.vdp_infos.quarter_sample = 0;
+
+ picture->complexity_estimation_disable = read_bits( &sequence->br, 1 );
+ if ( !picture->complexity_estimation_disable ) { /* define_vop_complexity_estimation_header */
+ int estimation_method = read_bits( &sequence->br, 2 );
+ if ( (estimation_method == 0) || (estimation_method == 1) ){
+ if ( !read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 6 );
+ if ( !read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 4 );
+ skip_bits( &sequence->br, 1 );
+ if ( !read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 4 );
+ if ( !read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 6 );
+ skip_bits( &sequence->br, 1 );
+ if ( estimation_method == 1 ) {
+ if ( !read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 2 );
+ }
+ }
+ }
+
+ picture->vdp_infos.resync_marker_disable = read_bits( &sequence->br, 1 );
+
+ if ( read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 1 );
+ if ( vol_verid != 1 ) {
+ picture->newpred_enable = read_bits( &sequence->br, 1 );
+ if ( picture->newpred_enable )
+ skip_bits( &sequence->br, 3 );
+ picture->reduced_resolution_vop_enable = read_bits( &sequence->br, 1 );
+ }
+ else {
+ picture->newpred_enable = 0;
+ picture->reduced_resolution_vop_enable = 0;
+ }
+ /* .... */
+ }
+ else {
+ if ( vol_verid != 1 ) {
+ if ( read_bits( &sequence->br, 1 ) )
+ skip_bits( &sequence->br, 24 );
+ }
+ picture->vdp_infos.resync_marker_disable = read_bits( &sequence->br, 1 );
+ }
+
+ if ( !sequence->have_header ) {
+ update_metadata( this_gen );
+ sequence->have_header = 1;
+ }
+}
+
+
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+
+static void video_object_plane( vdpau_mpeg4_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ picture_t *picture = (picture_t*)&sequence->picture;
+ bits_reader_set( &sequence->br, buf, len );
+ int time_inc=0, time_increment;
+
+ sequence->seq_pts = sequence->cur_pts;
+ sequence->cur_pts = 0;
+
+ picture->vdp_infos.vop_coding_type = read_bits( &sequence->br, 2 );
+ while ( read_bits( &sequence->br, 1 ) )
+ ++time_inc;
+
+ skip_bits( &sequence->br, 1 );
+
+ if ( sequence->time_increment_bits == 0 || !(get_bits( &sequence->br, sequence->time_increment_bits + 1) & 1) ) {
+ for ( sequence->time_increment_bits = 1; sequence->time_increment_bits < 16; ++sequence->time_increment_bits ) {
+ if ( picture->vdp_infos.vop_coding_type == P_FRAME ) {
+ if ( (get_bits( &sequence->br, sequence->time_increment_bits + 6 ) & 0x37) == 0x30 )
+ break;
+ }
+ else {
+ if ( (get_bits( &sequence->br, sequence->time_increment_bits + 5 ) & 0x1f) == 0x18 )
+ break;
+ }
+ fprintf(stderr, "Headers are not complete, guessing time_increment_bits: %d\n", sequence->time_increment_bits);
+ }
+ }
+
+ time_increment = read_bits( &sequence->br, sequence->time_increment_bits );
+
+ if ( picture->vdp_infos.vop_coding_type != B_FRAME ) {
+ sequence->last_time_base = sequence->time_base;
+ sequence->time_base += time_inc;
+ sequence->time = sequence->time_base * picture->vdp_infos.vop_time_increment_resolution + time_increment;
+ if ( sequence->time < sequence->last_non_b_time ) {
+ ++sequence->time_base;
+ sequence->time += picture->vdp_infos.vop_time_increment_resolution;
+ }
+ picture->vdp_infos.trd[0] = sequence->time - sequence->last_non_b_time;
+ sequence->last_non_b_time = sequence->time;
+ }
+ else {
+ sequence->time = (sequence->last_time_base + time_inc) * picture->vdp_infos.vop_time_increment_resolution + time_increment;
+ picture->vdp_infos.trb[0] = picture->vdp_infos.trd[0] - (sequence->last_non_b_time - sequence->time);
+ if ( (picture->vdp_infos.trd[0] <= picture->vdp_infos.trb[0] ) || (picture->vdp_infos.trd[0] <= (picture->vdp_infos.trd[0] - picture->vdp_infos.trb[0])) || (picture->vdp_infos.trd[0] <= 0) ) {
+ /* FIXME */
+ }
+ if ( sequence->t_frame == 0 )
+ sequence->t_frame = picture->vdp_infos.trb[0];
+ if ( sequence->t_frame == 0 )
+ sequence->t_frame = 1;
+ picture->vdp_infos.trd[1] = ( ROUNDED_DIV(sequence->last_non_b_time, sequence->t_frame) - ROUNDED_DIV(sequence->last_non_b_time - picture->vdp_infos.trd[0], sequence->t_frame));
+ picture->vdp_infos.trb[1] = ( ROUNDED_DIV(sequence->time, sequence->t_frame) - ROUNDED_DIV(sequence->last_non_b_time - picture->vdp_infos.trd[0], sequence->t_frame));
+ if ( picture->vdp_infos.interlaced ) {
+ /* FIXME */
+ }
+ }
+
+ /*if ( sequence->fixed_vop_time_increment )
+ sequence->seq_pts = ( sequence->time + sequence->fixed_vop_time_increment/2 ) / sequence->fixed_vop_time_increment;*/
+
+ skip_bits( &sequence->br, 1 );
+ if ( !read_bits( &sequence->br, 1 ) )
+ return; /* vop_coded == 0 */
+
+ if ( picture->newpred_enable ) { /* FIXME */
+ fprintf(stderr, "vdpau_mpeg4: newpred_enable, dunno what to do !!!\n");
+ return;
+ }
+
+ if ( (picture->vol_shape != SHAPE_BINONLY) && (picture->vdp_infos.vop_coding_type == P_FRAME) )
+ picture->vdp_infos.rounding_control = read_bits( &sequence->br, 1 );
+ else
+ picture->vdp_infos.rounding_control = 0;
+
+ if ( picture->reduced_resolution_vop_enable && (picture->vol_shape == SHAPE_RECT) && (picture->vdp_infos.vop_coding_type != B_FRAME) )
+ skip_bits( &sequence->br, 1 );
+ if ( picture->vol_shape != SHAPE_RECT ) { /* FIXME */
+ fprintf(stderr, "vdpau_mpeg4: vol_shape != SHAPE_RECT, return\n");
+ return;
+ }
+
+ if ( picture->vol_shape != SHAPE_BINONLY ) {
+ if ( !picture->complexity_estimation_disable ) { /* FIXME */
+ fprintf(stderr, "vdpau_mpeg4: TODO: read_vop_complexity_estimation_header\n");
+ return;
+ }
+ }
+
+ if ( picture->vol_shape != SHAPE_BINONLY ) {
+ skip_bits( &sequence->br, 3 );
+ if ( picture->vdp_infos.interlaced ) {
+ picture->vdp_infos.top_field_first = read_bits( &sequence->br, 1 );
+ picture->vdp_infos.alternate_vertical_scan_flag = read_bits( &sequence->br, 1 );
+ }
+ }
+
+ if ( picture->vol_shape != SHAPE_BINONLY ) {
+ skip_bits( &sequence->br, picture->quant_precision );
+ if ( picture->vol_shape == SHAPE_GRAY ) { /* FIXME */
+ fprintf(stderr, "vdpau_mpeg4: unsupported SHAPE_GRAY!\n");
+ return;
+ }
+ if ( picture->vdp_infos.vop_coding_type != I_FRAME )
+ picture->vdp_infos.vop_fcode_forward = read_bits( &sequence->br, 3 );
+ if ( picture->vdp_infos.vop_coding_type == B_FRAME )
+ picture->vdp_infos.vop_fcode_backward = read_bits( &sequence->br, 3 );
+ }
+}
+
+
+
+static void gop_header( vdpau_mpeg4_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ int h, m, s;
+
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ bits_reader_set( &sequence->br, buf, len );
+
+ h = read_bits( &sequence->br, 5 );
+ m = read_bits( &sequence->br, 6 );
+ skip_bits( &sequence->br, 1 );
+ s = read_bits( &sequence->br, 6 );
+
+ sequence->time_base = s + (60 * (m + (60 * h)));
+}
+
+
+
+static void user_data( vdpau_mpeg4_decoder_t *this_gen, uint8_t *buffer, int len )
+{
+ /* code from ffmpeg's mpeg4videodec.c */
+
+ char buf[256];
+ int i, e, ver = 0, build = 0, ver2 = 0, ver3 = 0;
+ char last;
+
+ if ( this_gen->sequence.have_codec_name )
+ return;
+
+ for( i=0; i<255 && i<len; i++ ) {
+ if ( buffer[i] == 0 )
+ break;
+ buf[i]= buffer[i];
+ }
+ buf[i]=0;
+
+ /* divx detection */
+ e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last);
+ if ( e < 2 )
+ e=sscanf(buf, "DivX%db%d%c", &ver, &build, &last);
+ if ( e >= 2 ) {
+ strcpy( this_gen->sequence.codec_name, "MPEG4 / DivX " );
+ sprintf( buf, "%d", ver );
+ strcat( this_gen->sequence.codec_name, " (vdpau)" );
+ this_gen->sequence.have_codec_name = 1;
+ }
+
+ /* ffmpeg detection */
+ e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3;
+ if ( e != 4 )
+ e=sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build);
+ if ( e != 4 ) {
+ e=sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3)+1;
+ if ( e > 1 )
+ build= (ver<<16) + (ver2<<8) + ver3;
+ }
+ if ( e == 4 ) {
+ strcpy( this_gen->sequence.codec_name, "MPEG4 / FFmpeg " );
+ sprintf( buf, "%d", build );
+ strcat( this_gen->sequence.codec_name, " (vdpau)" );
+ this_gen->sequence.have_codec_name = 1;
+ }
+ else {
+ if(strcmp(buf, "ffmpeg")==0) {
+ strcpy( this_gen->sequence.codec_name, "MPEG4 / FFmpeg " );
+ strcpy( this_gen->sequence.codec_name, "4600" );
+ strcat( this_gen->sequence.codec_name, " (vdpau)" );
+ this_gen->sequence.have_codec_name = 1;
+ }
+ }
+
+ /* Xvid detection */
+ e = sscanf(buf, "XviD%d", &build);
+ if ( e == 1 ) {
+ strcpy( this_gen->sequence.codec_name, "MPEG4 / XviD " );
+ sprintf( buf, "%d", build );
+ strcat( this_gen->sequence.codec_name, " (vdpau)" );
+ this_gen->sequence.have_codec_name = 1;
+ }
+
+ update_metadata( this_gen );
+}
+
+
+
+static int parse_code( vdpau_mpeg4_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ if ( (buf[3] >= begin_vo_start_code) && (buf[3] <= end_vo_start_code) ) {
+ lprintf( " ----------- vo_start_code\n" );
+ return 0;
+ }
+
+ if ( (buf[3] >= begin_vol_start_code) && (buf[3] <= end_vol_start_code) ) {
+ lprintf( " ----------- vol_start_code\n" );
+ video_object_layer( this_gen, buf+4, len-4);
+ return 0;
+ }
+
+ switch ( buf[3] ) {
+ case viso_sequence_start_code:
+ lprintf( " ----------- viso_sequence_start_code\n" );
+ break;
+ case viso_sequence_end_code:
+ lprintf( " ----------- viso_sequence_end_code\n" );
+ break;
+ case viso_start_code:
+ lprintf( " ----------- viso_start_code\n" );
+ visual_object( this_gen, buf+4, len-4 );
+ break;
+ }
+
+ if ( !sequence->have_header )
+ return 0;
+
+ switch ( buf[3] ) {
+ case group_start_code:
+ lprintf( " ----------- group_start_code\n" );
+ gop_header( this_gen, buf+4, len-4 );
+ break;
+ case user_data_start_code:
+ lprintf( " ----------- user_data_start_code\n" );
+ user_data( this_gen, buf+4, len-4 );
+ break;
+ case vop_start_code:
+ lprintf( " ----------- vop_start_code\n" );
+ video_object_plane( this_gen, buf+4, len-4 );
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+
+
+static void decode_render( vdpau_mpeg4_decoder_t *vd, vdpau_accel_t *accel, uint8_t *buf, int len )
+{
+ sequence_t *seq = (sequence_t*)&vd->sequence;
+ picture_t *pic = (picture_t*)&seq->picture;
+
+ VdpStatus st;
+ if ( vd->decoder==VDP_INVALID_HANDLE || vd->decoder_profile!=seq->profile || vd->decoder_width!=seq->coded_width || vd->decoder_height!=seq->coded_height ) {
+ if ( vd->decoder!=VDP_INVALID_HANDLE ) {
+ accel->vdp_decoder_destroy( vd->decoder );
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+ st = accel->vdp_decoder_create( accel->vdp_device, seq->profile, seq->coded_width, seq->coded_height, 2, &vd->decoder);
+ if ( st!=VDP_STATUS_OK )
+ fprintf(stderr, "vdpau_mpeg4: failed to create decoder !! %s\n", accel->vdp_get_error_string( st ) );
+ else {
+ lprintf( "decoder created.\n" );
+ vd->decoder_profile = seq->profile;
+ vd->decoder_width = seq->coded_width;
+ vd->decoder_height = seq->coded_height;
+ seq->vdp_runtime_nr = accel->vdp_runtime_nr;
+ }
+ }
+
+ VdpPictureInfoMPEG4Part2 *infos = (VdpPictureInfoMPEG4Part2*)&pic->vdp_infos;
+ printf("%d: %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n", ++nframe, infos->vop_coding_type,infos->vop_time_increment_resolution, infos->vop_fcode_forward, infos->vop_fcode_backward, infos->resync_marker_disable, infos->interlaced, infos->quant_type, infos->quarter_sample, infos->short_video_header, infos->rounding_control, infos->alternate_vertical_scan_flag, len, infos->trd[0], infos->trd[1], infos->trb[0], infos->trb[1]);
+
+ VdpBitstreamBuffer vbit;
+ vbit.struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ vbit.bitstream = buf;
+ vbit.bitstream_bytes = len;
+ st = accel->vdp_decoder_render( vd->decoder, accel->surface, (VdpPictureInfo*)&pic->vdp_infos, 1, &vbit );
+ if ( st!=VDP_STATUS_OK )
+ fprintf(stderr, "vdpau_mpeg4: decoder failed : %d!! %s\n", st, accel->vdp_get_error_string( st ) );
+ else {
+ lprintf( "DECODER SUCCESS : vop_coding_type=%d, bytes=%d, current=%d, forwref:%d, backref:%d, pts:%lld\n",
+ pic->vdp_infos.vop_coding_type, vbit.bitstream_bytes, accel->surface, pic->vdp_infos.forward_reference, pic->vdp_infos.backward_reference, seq->seq_pts );
+ }
+}
+
+
+
+static void decode_picture( vdpau_mpeg4_decoder_t *vd )
+{
+ sequence_t *seq = (sequence_t*)&vd->sequence;
+ picture_t *pic = (picture_t*)&seq->picture;
+ vdpau_accel_t *ref_accel;
+
+ uint8_t *buf = seq->buf;
+ int len = seq->bufpos;
+
+ pic->vdp_infos.forward_reference = VDP_INVALID_HANDLE;
+ pic->vdp_infos.backward_reference = VDP_INVALID_HANDLE;
+
+ if ( pic->vdp_infos.vop_coding_type == P_FRAME ) {
+ if ( seq->backward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->backward_ref->accel_data;
+ pic->vdp_infos.forward_reference = ref_accel->surface;
+ }
+ else {
+ /* reset_picture( &seq->picture ); */
+ return;
+ }
+ }
+ else if ( pic->vdp_infos.vop_coding_type == B_FRAME ) {
+ if ( seq->forward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->forward_ref->accel_data;
+ pic->vdp_infos.forward_reference = ref_accel->surface;
+ }
+ else {
+ /* reset_picture( &seq->picture ); */
+ return;
+ }
+ if ( seq->backward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->backward_ref->accel_data;
+ pic->vdp_infos.backward_reference = ref_accel->surface;
+ }
+ else {
+ /* reset_picture( &seq->picture );*/
+ return;
+ }
+ }
+
+ vo_frame_t *img = vd->stream->video_out->get_frame( vd->stream->video_out, seq->coded_width, seq->coded_height, seq->ratio, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS );
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ if ( !seq->accel_vdpau )
+ seq->accel_vdpau = accel;
+
+ if( seq->vdp_runtime_nr != *(seq->accel_vdpau->current_vdp_runtime_nr) ) {
+ seq->accel_vdpau = accel;
+ if ( seq->forward_ref )
+ seq->forward_ref->free( seq->forward_ref );
+ seq->forward_ref = NULL;
+ if ( seq->backward_ref )
+ seq->backward_ref->free( seq->backward_ref );
+ seq->backward_ref = NULL;
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+
+ decode_render( vd, accel, buf, len );
+
+
+#ifdef MAKE_DAT
+ if ( nframes==0 ) {
+ fwrite( &seq->coded_width, 1, sizeof(seq->coded_width), outfile );
+ fwrite( &seq->coded_height, 1, sizeof(seq->coded_height), outfile );
+ fwrite( &seq->ratio, 1, sizeof(seq->ratio), outfile );
+ fwrite( &seq->profile, 1, sizeof(seq->profile), outfile );
+ }
+
+ if ( nframes++ < 25 ) {
+ fwrite( &pic->vdp_infos, 1, sizeof(pic->vdp_infos), outfile );
+ fwrite( &len, 1, sizeof(len), outfile );
+ fwrite( buf, 1, len, outfile );
+ printf( "picture_type = %d\n", pic->vdp_infos.picture_type);
+ }
+#endif
+
+ if ( pic->vdp_infos.interlaced ) {
+ img->progressive_frame = 0;
+ img->top_field_first = pic->vdp_infos.top_field_first;
+ }
+ else {
+ img->progressive_frame = -1; /* set to -1 to let the vo know that it MUST NOT deinterlace */
+ img->top_field_first = 1;
+ }
+ img->pts = seq->seq_pts;
+ img->bad_frame = 0;
+ if ( seq->video_step > 900 ) /* some buggy streams */
+ img->duration = seq->video_step;
+ accel->color_standard = seq->color_standard;
+
+ if ( pic->vdp_infos.vop_coding_type < B_FRAME ) {
+ if ( pic->vdp_infos.vop_coding_type == I_FRAME && !seq->backward_ref ) {
+ img->pts = 0;
+ img->draw( img, vd->stream );
+ ++img->drawn;
+ }
+ if ( seq->forward_ref ) {
+ seq->forward_ref->drawn = 0;
+ seq->forward_ref->free( seq->forward_ref );
+ }
+ seq->forward_ref = seq->backward_ref;
+ if ( seq->forward_ref && !seq->forward_ref->drawn ) {
+ seq->forward_ref->draw( seq->forward_ref, vd->stream );
+ }
+ seq->backward_ref = img;
+ }
+ else {
+ img->draw( img, vd->stream );
+ img->free( img );
+ }
+}
+
+
+
+
+/*
+ * This function receives a buffer of data from the demuxer layer and
+ * figures out how to handle it based on its header flags.
+ */
+static void vdpau_mpeg4_decode_data (video_decoder_t *this_gen, buf_element_t *buf)
+{
+ vdpau_mpeg4_decoder_t *this = (vdpau_mpeg4_decoder_t *) this_gen;
+ sequence_t *seq = (sequence_t*)&this->sequence;
+
+ /* preview buffers shall not be decoded and drawn -- use them only to supply stream information */
+ if (buf->decoder_flags & BUF_FLAG_PREVIEW)
+ return;
+
+ if ( !buf->size )
+ return;
+
+ if ( buf->pts )
+ seq->cur_pts = buf->pts;
+
+ int size = seq->bufpos+buf->size;
+ if ( seq->bufsize < size ) {
+ seq->bufsize = size+1024;
+ seq->buf = realloc( seq->buf, seq->bufsize );
+ }
+ xine_fast_memcpy( seq->buf+seq->bufpos, buf->content, buf->size );
+ seq->bufpos += buf->size;
+
+ while ( seq->bufseek <= seq->bufpos-4 ) {
+ uint8_t *buffer = seq->buf+seq->bufseek;
+ if ( buffer[0]==0 && buffer[1]==0 && buffer[2]==1 ) {
+ if ( seq->start<0 ) {
+ seq->start = seq->bufseek;
+ }
+ else {
+ if ( parse_code( this, seq->buf+seq->start, seq->bufseek-seq->start ) ) {
+ decode_picture( this );
+ }
+ uint8_t *tmp = (uint8_t*)malloc(seq->bufsize);
+ xine_fast_memcpy( tmp, seq->buf+seq->bufseek, seq->bufpos-seq->bufseek );
+ seq->bufpos -= seq->bufseek;
+ seq->start = -1;
+ seq->bufseek = -1;
+ free( seq->buf );
+ seq->buf = tmp;
+ }
+ }
+ ++seq->bufseek;
+ }
+}
+
+/*
+ * This function is called when xine needs to flush the system.
+ */
+static void vdpau_mpeg4_flush (video_decoder_t *this_gen) {
+ vdpau_mpeg4_decoder_t *this = (vdpau_mpeg4_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg4_flush\n" );
+}
+
+/*
+ * This function resets the video decoder.
+ */
+static void vdpau_mpeg4_reset (video_decoder_t *this_gen) {
+ vdpau_mpeg4_decoder_t *this = (vdpau_mpeg4_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg4_reset\n" );
+ reset_sequence( &this->sequence, 1 );
+}
+
+/*
+ * The decoder should forget any stored pts values here.
+ */
+static void vdpau_mpeg4_discontinuity (video_decoder_t *this_gen) {
+ vdpau_mpeg4_decoder_t *this = (vdpau_mpeg4_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg4_discontinuity\n" );
+ reset_sequence( &this->sequence, 0 );
+}
+
+/*
+ * This function frees the video decoder instance allocated to the decoder.
+ */
+static void vdpau_mpeg4_dispose (video_decoder_t *this_gen) {
+
+ vdpau_mpeg4_decoder_t *this = (vdpau_mpeg4_decoder_t *) this_gen;
+
+ lprintf( "vdpau_mpeg4_dispose\n" );
+
+ if ( this->decoder!=VDP_INVALID_HANDLE && this->sequence.accel_vdpau ) {
+ this->sequence.accel_vdpau->vdp_decoder_destroy( this->decoder );
+ this->decoder = VDP_INVALID_HANDLE;
+ }
+
+ free_sequence( &this->sequence );
+
+ this->stream->video_out->close( this->stream->video_out, this->stream );
+
+ free( this->sequence.buf );
+ free( this_gen );
+}
+
+/*
+ * This function allocates, initializes, and returns a private video
+ * decoder structure.
+ */
+static video_decoder_t *open_plugin (video_decoder_class_t *class_gen, xine_stream_t *stream) {
+
+ vdpau_mpeg4_decoder_t *this ;
+
+ lprintf( "open_plugin\n" );
+
+ /* the videoout must be vdpau-capable to support this decoder */
+ if ( !(stream->video_driver->get_capabilities(stream->video_driver) & VO_CAP_VDPAU_MPEG4) )
+ return NULL;
+
+ /* now check if vdpau has free decoder resource */
+ vo_frame_t *img = stream->video_out->get_frame( stream->video_out, 1920, 1080, 1, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS );
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ int runtime_nr = accel->vdp_runtime_nr;
+ img->free(img);
+ VdpDecoder decoder;
+ VdpStatus st = accel->vdp_decoder_create( accel->vdp_device, VDP_DECODER_PROFILE_MPEG4_PART2_ASP, 1920, 1080, 2, &decoder );
+ if ( st!=VDP_STATUS_OK ) {
+ lprintf( "can't create vdpau decoder.\n" );
+ return NULL;
+ }
+
+ accel->vdp_decoder_destroy( decoder );
+
+ this = (vdpau_mpeg4_decoder_t *) calloc(1, sizeof(vdpau_mpeg4_decoder_t));
+
+ this->video_decoder.decode_data = vdpau_mpeg4_decode_data;
+ this->video_decoder.flush = vdpau_mpeg4_flush;
+ this->video_decoder.reset = vdpau_mpeg4_reset;
+ this->video_decoder.discontinuity = vdpau_mpeg4_discontinuity;
+ this->video_decoder.dispose = vdpau_mpeg4_dispose;
+
+ this->stream = stream;
+ this->class = (vdpau_mpeg4_class_t *) class_gen;
+
+ this->sequence.bufsize = 1024;
+ this->sequence.buf = (uint8_t*)malloc(this->sequence.bufsize);
+ this->sequence.forward_ref = 0;
+ this->sequence.backward_ref = 0;
+ this->sequence.vdp_runtime_nr = runtime_nr;
+ free_sequence( &this->sequence );
+ this->sequence.ratio = 1;
+ this->sequence.reset = VO_NEW_SEQUENCE_FLAG;
+
+ init_picture( &this->sequence.picture );
+
+ this->decoder = VDP_INVALID_HANDLE;
+ this->sequence.accel_vdpau = NULL;
+
+ (stream->video_out->open)(stream->video_out, stream);
+
+#ifdef MAKE_DAT
+ outfile = fopen( "/tmp/mpeg4.dat","w");
+ nframes = 0;
+#endif
+ nframe = 0;
+
+ return &this->video_decoder;
+}
+
+/*
+ * This function allocates a private video decoder class and initializes
+ * the class's member functions.
+ */
+static void *init_plugin (xine_t *xine, void *data) {
+
+ vdpau_mpeg4_class_t *this;
+
+ this = (vdpau_mpeg4_class_t *) calloc(1, sizeof(vdpau_mpeg4_class_t));
+
+ this->decoder_class.open_plugin = open_plugin;
+ this->decoder_class.identifier = "vdpau_mpeg4";
+ this->decoder_class.description =
+ N_("vdpau_mpeg4: mpeg4 part 2 decoder plugin using VDPAU hardware decoding.\n"
+ "Must be used along with video_out_vdpau.");
+ this->decoder_class.dispose = default_video_decoder_class_dispose;
+
+ return this;
+}
+
+/*
+ * This is a list of all of the internal xine video buffer types that
+ * this decoder is able to handle. Check src/xine-engine/buffer.h for a
+ * list of valid buffer types (and add a new one if the one you need does
+ * not exist). Terminate the list with a 0.
+ */
+static const uint32_t video_types[] = {
+ BUF_VIDEO_MPEG4,
+ BUF_VIDEO_XVID,
+ BUF_VIDEO_DIVX5,
+ BUF_VIDEO_3IVX,
+ 0
+};
+
+/*
+ * This data structure combines the list of supported xine buffer types and
+ * the priority that the plugin should be given with respect to other
+ * plugins that handle the same buffer type. A plugin with priority (n+1)
+ * will be used instead of a plugin with priority (n).
+ */
+static const decoder_info_t dec_info_video = {
+ video_types, /* supported types */
+ 0 /* priority */
+};
+
+/*
+ * The plugin catalog entry. This is the only information that this plugin
+ * will export to the public.
+ */
+const plugin_info_t xine_plugin_info[] EXPORTED = {
+ /* { type, API, "name", version, special_info, init_function } */
+ { PLUGIN_VIDEO_DECODER, 19, "vdpau_mpeg4", XINE_VERSION_CODE, &dec_info_video, init_plugin },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};
diff --git a/src/video_dec/libvdpau/vdpau_vc1.c b/src/video_dec/libvdpau/vdpau_vc1.c
new file mode 100644
index 000000000..fe6ce26b4
--- /dev/null
+++ b/src/video_dec/libvdpau/vdpau_vc1.c
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2008 the xine project
+ * Copyright (C) 2008 Christophe Thommeret <hftom@free.fr>
+ *
+ * This file is part of xine, a free video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * vdpau_vc1.c, a vc1 video stream parser using VDPAU hardware decoder
+ *
+ */
+
+/*#define LOG*/
+#define LOG_MODULE "vdpau_vc1"
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <xine/xine_internal.h>
+#include <xine/video_out.h>
+#include <xine/buffer.h>
+#include <xine/xineutils.h>
+#include "accel_vdpau.h"
+#include "bits_reader.h"
+
+#include <vdpau/vdpau.h>
+
+#define sequence_header_code 0x0f
+#define sequence_end_code 0x0a
+#define entry_point_code 0x0e
+#define frame_start_code 0x0d
+#define field_start_code 0x0c
+#define slice_start_code 0x0b
+
+#define PICTURE_FRAME 0
+#define PICTURE_FRAME_INTERLACE 2
+#define PICTURE_FIELD_INTERLACE 3
+
+#define I_FRAME 0
+#define P_FRAME 1
+#define B_FRAME 3
+#define BI_FRAME 4
+
+#define FIELDS_I_I 0
+#define FIELDS_I_P 1
+#define FIELDS_P_I 2
+#define FIELDS_P_P 3
+#define FIELDS_B_B 4
+#define FIELDS_B_BI 5
+#define FIELDS_BI_B 6
+#define FIELDS_BI_BI 7
+
+#define MODE_STARTCODE 0
+#define MODE_FRAME 1
+
+/*#define MAKE_DAT*/ /*do NOT define this, unless you know what you do */
+#ifdef MAKE_DAT
+static int nframes;
+static FILE *outfile;
+#endif
+
+
+
+const double aspect_ratio[] = {
+ 0.0,
+ 1.0,
+ 12./11.,
+ 10./11.,
+ 16./11.,
+ 40./33.,
+ 24./11.,
+ 20./11.,
+ 32./11.,
+ 80./33.,
+ 18./11.,
+ 15./11.,
+ 64./33.,
+ 160./99.
+};
+
+
+
+typedef struct {
+ VdpPictureInfoVC1 vdp_infos;
+ int slices;
+ int fptype;
+ int field;
+ int header_size;
+ int hrd_param_flag;
+ int hrd_num_leaky_buckets;
+ int repeat_first_field;
+ int top_field_first;
+ int skipped;
+} picture_t;
+
+
+
+typedef struct {
+ uint32_t coded_width;
+ uint32_t coded_height;
+
+ uint64_t video_step; /* frame duration in pts units */
+ uint64_t reported_video_step; /* frame duration in pts units */
+ double ratio;
+ VdpDecoderProfile profile;
+
+ int mode;
+ int have_header;
+
+ uint8_t *buf; /* accumulate data */
+ int bufseek;
+ int start;
+ int code_start, current_code;
+ uint32_t bufsize;
+ uint32_t bufpos;
+
+ picture_t picture;
+ vo_frame_t *forward_ref;
+ vo_frame_t *backward_ref;
+
+ int64_t seq_pts;
+ int64_t cur_pts;
+
+ vdpau_accel_t *accel_vdpau;
+
+ bits_reader_t br;
+
+ int vdp_runtime_nr;
+
+} sequence_t;
+
+
+
+typedef struct {
+ video_decoder_class_t decoder_class;
+} vdpau_vc1_class_t;
+
+
+
+typedef struct vdpau_vc1_decoder_s {
+ video_decoder_t video_decoder; /* parent video decoder structure */
+
+ vdpau_vc1_class_t *class;
+ xine_stream_t *stream;
+
+ sequence_t sequence;
+
+ VdpDecoder decoder;
+ VdpDecoderProfile decoder_profile;
+ uint32_t decoder_width;
+ uint32_t decoder_height;
+
+} vdpau_vc1_decoder_t;
+
+
+
+static void init_picture( picture_t *pic )
+{
+ memset( pic, 0, sizeof( picture_t ) );
+}
+
+
+
+static void reset_picture( picture_t *pic )
+{
+ pic->slices = 1;
+}
+
+
+
+static void reset_sequence( sequence_t *sequence )
+{
+ lprintf( "reset_sequence\n" );
+ sequence->bufpos = 0;
+ sequence->bufseek = 0;
+ sequence->start = -1;
+ sequence->code_start = sequence->current_code = 0;
+ sequence->seq_pts = sequence->cur_pts = 0;
+ if ( sequence->forward_ref )
+ sequence->forward_ref->free( sequence->forward_ref );
+ sequence->forward_ref = NULL;
+ if ( sequence->backward_ref )
+ sequence->backward_ref->free( sequence->backward_ref );
+ sequence->backward_ref = NULL;
+ reset_picture( &sequence->picture );
+}
+
+
+
+static void init_sequence( sequence_t *sequence )
+{
+ lprintf( "init_sequence\n" );
+ sequence->have_header = 0;
+ sequence->profile = VDP_DECODER_PROFILE_VC1_SIMPLE;
+ sequence->ratio = 0;
+ sequence->video_step = 0;
+ sequence->picture.hrd_param_flag = 0;
+ reset_sequence( sequence );
+}
+
+
+
+static void update_metadata( vdpau_vc1_decoder_t *this_gen )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ if ( !sequence->have_header ) {
+ sequence->have_header = 1;
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_WIDTH, sequence->coded_width );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, sequence->coded_height );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_VIDEO_RATIO, ((double)10000*sequence->ratio) );
+ _x_stream_info_set( this_gen->stream, XINE_STREAM_INFO_FRAME_DURATION, (sequence->reported_video_step = sequence->video_step) );
+ _x_meta_info_set_utf8( this_gen->stream, XINE_META_INFO_VIDEOCODEC, "VC1/WMV9 (vdpau)" );
+ xine_event_t event;
+ xine_format_change_data_t data;
+ event.type = XINE_EVENT_FRAME_FORMAT_CHANGE;
+ event.stream = this_gen->stream;
+ event.data = &data;
+ event.data_length = sizeof(data);
+ data.width = sequence->coded_width;
+ data.height = sequence->coded_height;
+ data.aspect = sequence->ratio;
+ xine_event_send( this_gen->stream, &event );
+ }
+}
+
+
+
+static void sequence_header_advanced( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ lprintf( "sequence_header_advanced\n" );
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ if ( len < 5 )
+ return;
+
+ sequence->profile = VDP_DECODER_PROFILE_VC1_ADVANCED;
+ lprintf("VDP_DECODER_PROFILE_VC1_ADVANCED\n");
+ bits_reader_set( &sequence->br, buf, len );
+ skip_bits( &sequence->br, 15 );
+ sequence->picture.vdp_infos.postprocflag = read_bits( &sequence->br, 1 );
+ sequence->coded_width = (read_bits( &sequence->br, 12 )+1)<<1;
+ sequence->coded_height = (read_bits( &sequence->br, 12 )+1)<<1;
+ sequence->picture.vdp_infos.pulldown = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.interlace = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.tfcntrflag = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.finterpflag = read_bits( &sequence->br, 1 );
+ skip_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.psf = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.maxbframes = 7;
+ if ( read_bits( &sequence->br, 1 ) ) {
+ double w, h;
+ int ar=0;
+ w = read_bits( &sequence->br, 14 )+1;
+ h = read_bits( &sequence->br, 14 )+1;
+ if ( read_bits( &sequence->br, 1 ) ) {
+ ar = read_bits( &sequence->br, 4 );
+ }
+ if ( ar==15 ) {
+ w = read_bits( &sequence->br, 8 );
+ h = read_bits( &sequence->br, 8 );
+ sequence->ratio = w/h;
+ lprintf("aspect_ratio (w/h) = %f\n", sequence->ratio);
+ }
+ else if ( ar && ar<14 ) {
+ sequence->ratio = sequence->coded_width*aspect_ratio[ar]/sequence->coded_height;
+ lprintf("aspect_ratio = %f\n", sequence->ratio);
+ }
+
+ if ( read_bits( &sequence->br, 1 ) ) {
+ if ( read_bits( &sequence->br, 1 ) ) {
+ int exp = read_bits( &sequence->br, 16 );
+ lprintf("framerate exp = %d\n", exp);
+ }
+ else {
+ double nr = read_bits( &sequence->br, 8 );
+ switch ((int)nr) {
+ case 1: nr = 24000; break;
+ case 2: nr = 25000; break;
+ case 3: nr = 30000; break;
+ case 4: nr = 50000; break;
+ case 5: nr = 60000; break;
+ default: nr = 0;
+ }
+ double dr = read_bits( &sequence->br, 4 );
+ switch ((int)dr) {
+ case 2: dr = 1001; break;
+ default: dr = 1000;
+ }
+ sequence->video_step = 90000/(nr/dr);
+ lprintf("framerate = %f video_step = %d\n", nr/dr, sequence->video_step);
+ }
+ }
+ if ( read_bits( &sequence->br, 1 ) ) {
+ int col = read_bits( &sequence->br, 8 );
+ lprintf("color_standard = %d\n", col);
+ skip_bits( &sequence->br, 16 );
+ }
+ }
+ sequence->picture.hrd_param_flag = read_bits( &sequence->br, 1 );
+ if ( sequence->picture.hrd_param_flag )
+ sequence->picture.hrd_num_leaky_buckets = read_bits( &sequence->br, 5 );
+
+ update_metadata( this_gen );
+}
+
+
+
+static void sequence_header( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ lprintf( "sequence_header\n" );
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ if ( len < 4 )
+ return;
+
+ bits_reader_set( &sequence->br, buf, len );
+ switch ( read_bits( &sequence->br, 2 ) ) {
+ case 0: sequence->profile = VDP_DECODER_PROFILE_VC1_SIMPLE; lprintf("VDP_DECODER_PROFILE_VC1_SIMPLE\n"); break;
+ case 1: sequence->profile = VDP_DECODER_PROFILE_VC1_MAIN; lprintf("VDP_DECODER_PROFILE_VC1_MAIN\n"); break;
+ case 2: sequence->profile = VDP_DECODER_PROFILE_VC1_MAIN; fprintf(stderr, "vc1_complex profile not supported by vdpau, forcing vc1_main, expect corruption!.\n"); break;
+ case 3: return sequence_header_advanced( this_gen, buf, len ); break;
+ default: return; /* illegal value, broken header? */
+ }
+ skip_bits( &sequence->br, 10 );
+ sequence->picture.vdp_infos.loopfilter = read_bits( &sequence->br, 1 );
+ skip_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.multires = read_bits( &sequence->br, 1 );
+ skip_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.fastuvmc = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.extended_mv = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.dquant = read_bits( &sequence->br, 2 );
+ sequence->picture.vdp_infos.vstransform = read_bits( &sequence->br, 1 );
+ skip_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.overlap = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.syncmarker = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.rangered = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.maxbframes = read_bits( &sequence->br, 3 );
+ sequence->picture.vdp_infos.quantizer = read_bits( &sequence->br, 2 );
+ sequence->picture.vdp_infos.finterpflag = read_bits( &sequence->br, 1 );
+
+ update_metadata( this_gen );
+}
+
+
+
+static void entry_point( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ lprintf( "entry_point\n" );
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ bits_reader_set( &sequence->br, buf, len );
+ skip_bits( &sequence->br, 2 );
+ sequence->picture.vdp_infos.panscan_flag = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.refdist_flag = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.loopfilter = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.fastuvmc = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.extended_mv = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.dquant = read_bits( &sequence->br, 2 );
+ sequence->picture.vdp_infos.vstransform = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.overlap = read_bits( &sequence->br, 1 );
+ sequence->picture.vdp_infos.quantizer = read_bits( &sequence->br, 2 );
+
+ if ( sequence->picture.hrd_param_flag ) {
+ int i;
+ for ( i=0; i<sequence->picture.hrd_num_leaky_buckets; ++i )
+ skip_bits( &sequence->br, 8 );
+ }
+
+ if ( read_bits( &sequence->br, 1 ) ) {
+ sequence->coded_width = (read_bits( &sequence->br, 12 )+1)<<1;
+ sequence->coded_height = (read_bits( &sequence->br, 12 )+1)<<1;
+ }
+
+ if ( sequence->picture.vdp_infos.extended_mv )
+ sequence->picture.vdp_infos.extended_dmv = read_bits( &sequence->br, 1 );
+
+ sequence->picture.vdp_infos.range_mapy_flag = read_bits( &sequence->br, 1 );
+ if ( sequence->picture.vdp_infos.range_mapy_flag ) {
+ sequence->picture.vdp_infos.range_mapy = read_bits( &sequence->br, 3 );
+ }
+ sequence->picture.vdp_infos.range_mapuv_flag = read_bits( &sequence->br, 1 );
+ if ( sequence->picture.vdp_infos.range_mapuv_flag ) {
+ sequence->picture.vdp_infos.range_mapuv = read_bits( &sequence->br, 3 );
+ }
+}
+
+
+
+static void picture_header( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ picture_t *pic = (picture_t*)&sequence->picture;
+ VdpPictureInfoVC1 *info = &(sequence->picture.vdp_infos);
+ int tmp;
+
+ lprintf("picture_header\n");
+
+ bits_reader_set( &sequence->br, buf, len );
+ skip_bits( &sequence->br, 2 );
+
+ if ( info->finterpflag )
+ skip_bits( &sequence->br, 1 );
+ if ( info->rangered ) {
+ /*info->rangered &= ~2;
+ info->rangered |= get_bits( buf,off++,1 ) << 1;*/
+ info->rangered = (read_bits( &sequence->br, 1 ) << 1) +1;
+ }
+ if ( !info->maxbframes ) {
+ if ( read_bits( &sequence->br, 1 ) )
+ info->picture_type = P_FRAME;
+ else
+ info->picture_type = I_FRAME;
+ }
+ else {
+ if ( read_bits( &sequence->br, 1 ) )
+ info->picture_type = P_FRAME;
+ else {
+ if ( read_bits( &sequence->br, 1 ) )
+ info->picture_type = I_FRAME;
+ else
+ info->picture_type = B_FRAME;
+ }
+ }
+ if ( info->picture_type == B_FRAME ) {
+ tmp = read_bits( &sequence->br, 3 );
+ if ( tmp==7 ) {
+ tmp = (tmp<<4) | read_bits( &sequence->br, 4 );
+ if ( tmp==127 )
+ info->picture_type = BI_FRAME;
+ }
+ }
+}
+
+
+
+static void picture_header_advanced( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ picture_t *pic = (picture_t*)&sequence->picture;
+ VdpPictureInfoVC1 *info = &(sequence->picture.vdp_infos);
+
+ lprintf("picture_header_advanced\n");
+
+ bits_reader_set( &sequence->br, buf, len );
+
+ if ( info->interlace ) {
+ lprintf("frame->interlace=1\n");
+ if ( !read_bits( &sequence->br, 1 ) ) {
+ lprintf("progressive frame\n");
+ info->frame_coding_mode = PICTURE_FRAME;
+ }
+ else {
+ if ( !read_bits( &sequence->br, 1 ) ) {
+ lprintf("frame interlaced\n");
+ info->frame_coding_mode = PICTURE_FRAME_INTERLACE;
+ }
+ else {
+ lprintf("field interlaced\n");
+ info->frame_coding_mode = PICTURE_FIELD_INTERLACE;
+ }
+ }
+ }
+ if ( info->interlace && info->frame_coding_mode == PICTURE_FIELD_INTERLACE ) {
+ pic->fptype = read_bits( &sequence->br, 3 );
+ switch ( pic->fptype ) {
+ case FIELDS_I_I:
+ case FIELDS_I_P:
+ info->picture_type = I_FRAME; break;
+ case FIELDS_P_I:
+ case FIELDS_P_P:
+ info->picture_type = P_FRAME; break;
+ case FIELDS_B_B:
+ case FIELDS_B_BI:
+ info->picture_type = B_FRAME; break;
+ default:
+ info->picture_type = BI_FRAME;
+ }
+ }
+ else {
+ if ( !read_bits( &sequence->br, 1 ) )
+ info->picture_type = P_FRAME;
+ else {
+ if ( !read_bits( &sequence->br, 1 ) )
+ info->picture_type = B_FRAME;
+ else {
+ if ( !read_bits( &sequence->br, 1 ) )
+ info->picture_type = I_FRAME;
+ else {
+ if ( !read_bits( &sequence->br, 1 ) )
+ info->picture_type = BI_FRAME;
+ else {
+ info->picture_type = P_FRAME;
+ pic->skipped = 1;
+ }
+ }
+ }
+ }
+ }
+ if ( info->tfcntrflag ) {
+ lprintf("tfcntrflag=1\n");
+ skip_bits( &sequence->br, 8 );
+ }
+ if ( info->pulldown && info->interlace ) {
+ pic->top_field_first = read_bits( &sequence->br, 1 );
+ pic->repeat_first_field = read_bits( &sequence->br, 1 );
+ }
+}
+
+
+
+static void parse_header( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+ int off=0;
+
+ while ( off < (len-4) ) {
+ uint8_t *buffer = buf+off;
+ if ( buffer[0]==0 && buffer[1]==0 && buffer[2]==1 ) {
+ switch ( buffer[3] ) {
+ case sequence_header_code: sequence_header( this_gen, buf+off+4, len-off-4 ); break;
+ case entry_point_code: entry_point( this_gen, buf+off+4, len-off-4 ); break;
+ }
+ }
+ ++off;
+ }
+ if ( !sequence->have_header )
+ sequence_header( this_gen, buf, len );
+}
+
+
+
+static void remove_emulation_prevention( uint8_t *src, uint8_t *dst, int src_len, int *dst_len )
+{
+ int i;
+ int len = 0;
+ int removed = 0;
+
+ for ( i=0; i<src_len-3; ++i ) {
+ if ( src[i]==0 && src[i+1]==0 && src[i+2]==3 ) {
+ lprintf("removed emulation prevention byte\n");
+ dst[len++] = src[i];
+ dst[len++] = src[i+1];
+ i += 2;
+ ++removed;
+ }
+ else {
+ memcpy( dst+len, src+i, 4 );
+ ++len;
+ }
+ }
+ for ( ; i<src_len; ++i )
+ dst[len++] = src[i];
+ *dst_len = src_len-removed;
+}
+
+
+
+static int parse_code( vdpau_vc1_decoder_t *this_gen, uint8_t *buf, int len )
+{
+ sequence_t *sequence = (sequence_t*)&this_gen->sequence;
+
+ if ( !sequence->have_header && buf[3]!=sequence_header_code )
+ return 0;
+
+ if ( sequence->code_start == frame_start_code ) {
+ if ( sequence->current_code==field_start_code || sequence->current_code==slice_start_code ) {
+ sequence->picture.slices++;
+ return -1;
+ }
+ return 1; /* frame complete, decode */
+ }
+
+ switch ( buf[3] ) {
+ int dst_len;
+ uint8_t *tmp;
+ case sequence_header_code:
+ lprintf("sequence_header_code\n");
+ tmp = malloc( len );
+ remove_emulation_prevention( buf, tmp, len, &dst_len );
+ sequence_header( this_gen, tmp+4, dst_len-4 );
+ free( tmp );
+ break;
+ case entry_point_code:
+ lprintf("entry_point_code\n");
+ tmp = malloc( len );
+ remove_emulation_prevention( buf, tmp, len, &dst_len );
+ entry_point( this_gen, tmp+4, dst_len-4 );
+ free( tmp );
+ break;
+ case sequence_end_code:
+ lprintf("sequence_end_code\n");
+ break;
+ case frame_start_code:
+ lprintf("frame_start_code, len=%d\n", len);
+ break;
+ case field_start_code:
+ lprintf("field_start_code\n");
+ break;
+ case slice_start_code:
+ lprintf("slice_start_code, len=%d\n", len);
+ break;
+ }
+ return 0;
+}
+
+
+
+static void decode_render( vdpau_vc1_decoder_t *vd, vdpau_accel_t *accel, uint8_t *buf, int len )
+{
+ sequence_t *seq = (sequence_t*)&vd->sequence;
+ picture_t *pic = (picture_t*)&seq->picture;
+
+ VdpStatus st;
+ if ( vd->decoder==VDP_INVALID_HANDLE || vd->decoder_profile!=seq->profile || vd->decoder_width!=seq->coded_width || vd->decoder_height!=seq->coded_height ) {
+ if ( vd->decoder!=VDP_INVALID_HANDLE ) {
+ accel->vdp_decoder_destroy( vd->decoder );
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+ st = accel->vdp_decoder_create( accel->vdp_device, seq->profile, seq->coded_width, seq->coded_height, 2, &vd->decoder);
+ if ( st!=VDP_STATUS_OK )
+ fprintf(stderr, "vdpau_vc1: failed to create decoder !! %s\n", accel->vdp_get_error_string( st ) );
+ else {
+ lprintf( "decoder created.\n" );
+ vd->decoder_profile = seq->profile;
+ vd->decoder_width = seq->coded_width;
+ vd->decoder_height = seq->coded_height;
+ seq->vdp_runtime_nr = accel->vdp_runtime_nr;
+ }
+ }
+
+ VdpBitstreamBuffer vbit;
+ vbit.struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ vbit.bitstream = buf;
+ vbit.bitstream_bytes = len;
+ if ( pic->field )
+ vbit.bitstream_bytes = pic->field;
+ st = accel->vdp_decoder_render( vd->decoder, accel->surface, (VdpPictureInfo*)&pic->vdp_infos, 1, &vbit );
+ if ( st!=VDP_STATUS_OK )
+ fprintf(stderr, "vdpau_vc1: decoder failed : %d!! %s\n", st, accel->vdp_get_error_string( st ) );
+ else {
+ lprintf( "DECODER SUCCESS : slices=%d, slices_bytes=%d, current=%d, forwref:%d, backref:%d, pts:%lld\n",
+ pic->vdp_infos.slice_count, vbit.bitstream_bytes, accel->surface, pic->vdp_infos.forward_reference, pic->vdp_infos.backward_reference, seq->seq_pts );
+ }
+ VdpPictureInfoVC1 *info = &(seq->picture.vdp_infos);
+ lprintf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n", info->slice_count, info->picture_type, info->frame_coding_mode,
+ info->postprocflag, info->pulldown, info->interlace, info->tfcntrflag, info->finterpflag, info->psf, info->dquant, info->panscan_flag, info->refdist_flag,
+ info->quantizer, info->extended_mv, info->extended_dmv, info->overlap, info->vstransform, info->loopfilter, info->fastuvmc, info->range_mapy_flag, info->range_mapy,
+ info->range_mapuv_flag, info->range_mapuv, info->multires, info->syncmarker, info->rangered, info->maxbframes, info->deblockEnable, info->pquant );
+
+ if ( pic->field ) {
+ int old_type = pic->vdp_infos.picture_type;
+ switch ( pic->fptype ) {
+ case FIELDS_I_I:
+ case FIELDS_P_I:
+ pic->vdp_infos.picture_type = I_FRAME;
+ pic->vdp_infos.backward_reference = VDP_INVALID_HANDLE;
+ pic->vdp_infos.forward_reference = VDP_INVALID_HANDLE;
+ break;
+ case FIELDS_I_P:
+ pic->vdp_infos.forward_reference = accel->surface;
+ pic->vdp_infos.picture_type = P_FRAME;
+ break;
+ case FIELDS_P_P:
+ if ( seq->backward_ref )
+ pic->vdp_infos.forward_reference = ((vdpau_accel_t*)seq->backward_ref->accel_data)->surface;
+ pic->vdp_infos.picture_type = P_FRAME;
+ break;
+ case FIELDS_B_B:
+ case FIELDS_BI_B:
+ pic->vdp_infos.picture_type = B_FRAME;
+ break;
+ default:
+ pic->vdp_infos.picture_type = BI_FRAME;
+ }
+ vbit.bitstream = buf+pic->field+4;
+ vbit.bitstream_bytes = len-pic->field-4;
+ st = accel->vdp_decoder_render( vd->decoder, accel->surface, (VdpPictureInfo*)&pic->vdp_infos, 1, &vbit );
+ if ( st!=VDP_STATUS_OK )
+ fprintf(stderr, "vdpau_vc1: decoder failed : %d!! %s\n", st, accel->vdp_get_error_string( st ) );
+ else {
+ lprintf( "DECODER SUCCESS (second field): slices=%d, slices_bytes=%d, current=%d, forwref:%d, backref:%d, pts:%lld\n",
+ pic->vdp_infos.slice_count, vbit.bitstream_bytes, accel->surface, pic->vdp_infos.forward_reference, pic->vdp_infos.backward_reference, seq->seq_pts );
+ }
+ VdpPictureInfoVC1 *info = &(seq->picture.vdp_infos);
+ lprintf("%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n", info->slice_count, info->picture_type, info->frame_coding_mode,
+ info->postprocflag, info->pulldown, info->interlace, info->tfcntrflag, info->finterpflag, info->psf, info->dquant, info->panscan_flag, info->refdist_flag,
+ info->quantizer, info->extended_mv, info->extended_dmv, info->overlap, info->vstransform, info->loopfilter, info->fastuvmc, info->range_mapy_flag, info->range_mapy,
+ info->range_mapuv_flag, info->range_mapuv, info->multires, info->syncmarker, info->rangered, info->maxbframes, info->deblockEnable, info->pquant );
+
+ pic->vdp_infos.picture_type = old_type;
+ }
+}
+
+
+
+static int search_field( vdpau_vc1_decoder_t *vd, uint8_t *buf, int len )
+{
+ int i;
+ lprintf("search_fields, len=%d\n", len);
+ for ( i=0; i<len-4; ++i ) {
+ if ( buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==field_start_code ) {
+ lprintf("found field_start_code at %d\n", i);
+ return i;
+ }
+ }
+ return 0;
+}
+
+
+
+static void decode_picture( vdpau_vc1_decoder_t *vd )
+{
+ sequence_t *seq = (sequence_t*)&vd->sequence;
+ picture_t *pic = (picture_t*)&seq->picture;
+ vdpau_accel_t *ref_accel;
+ int field;
+
+ uint8_t *buf;
+ int len;
+
+ pic->skipped = 0;
+ pic->field = 0;
+
+ if ( seq->mode == MODE_FRAME ) {
+ buf = seq->buf;
+ len = seq->bufpos;
+ if ( seq->profile==VDP_DECODER_PROFILE_VC1_ADVANCED )
+ picture_header_advanced( vd, buf, len );
+ else
+ picture_header( vd, buf, len );
+
+ if ( len < 2 )
+ pic->skipped = 1;
+ }
+ else {
+ seq->picture.vdp_infos.slice_count = seq->picture.slices;
+ buf = seq->buf+seq->start+4;
+ len = seq->bufseek-seq->start-4;
+ if ( seq->profile==VDP_DECODER_PROFILE_VC1_ADVANCED ) {
+ int tmplen = (len>50) ? 50 : len;
+ uint8_t *tmp = malloc( tmplen );
+ remove_emulation_prevention( buf, tmp, tmplen, &tmplen );
+ picture_header_advanced( vd, tmp, tmplen );
+ free( tmp );
+ }
+ else
+ picture_header( vd, buf, len );
+
+ if ( len < 2 )
+ pic->skipped = 1;
+ }
+
+ if ( pic->skipped )
+ pic->vdp_infos.picture_type = P_FRAME;
+
+ if ( pic->vdp_infos.interlace && pic->vdp_infos.frame_coding_mode == PICTURE_FIELD_INTERLACE ) {
+ if ( !(field = search_field( vd, buf, len )) )
+ lprintf("error, no fields found!\n");
+ else
+ pic->field = field;
+ }
+
+ pic->vdp_infos.forward_reference = VDP_INVALID_HANDLE;
+ pic->vdp_infos.backward_reference = VDP_INVALID_HANDLE;
+
+ if ( pic->vdp_infos.picture_type==P_FRAME ) {
+ if ( seq->backward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->backward_ref->accel_data;
+ pic->vdp_infos.forward_reference = ref_accel->surface;
+ }
+ else {
+ reset_picture( &seq->picture );
+ return;
+ }
+ }
+ else if ( pic->vdp_infos.picture_type>=B_FRAME ) {
+ if ( seq->forward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->forward_ref->accel_data;
+ pic->vdp_infos.forward_reference = ref_accel->surface;
+ }
+ else {
+ reset_picture( &seq->picture );
+ return;
+ }
+ if ( seq->backward_ref ) {
+ ref_accel = (vdpau_accel_t*)seq->backward_ref->accel_data;
+ pic->vdp_infos.backward_reference = ref_accel->surface;
+ }
+ else {
+ reset_picture( &seq->picture );
+ return;
+ }
+ }
+
+ vo_frame_t *img = vd->stream->video_out->get_frame( vd->stream->video_out, seq->coded_width, seq->coded_height,
+ seq->ratio, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS );
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ if ( !seq->accel_vdpau )
+ seq->accel_vdpau = accel;
+
+ if( seq->vdp_runtime_nr != *(seq->accel_vdpau->current_vdp_runtime_nr) ) {
+ seq->accel_vdpau = accel;
+ if ( seq->forward_ref )
+ seq->forward_ref->free( seq->forward_ref );
+ seq->forward_ref = NULL;
+ if ( seq->backward_ref )
+ seq->backward_ref->free( seq->backward_ref );
+ seq->backward_ref = NULL;
+ vd->decoder = VDP_INVALID_HANDLE;
+ }
+
+ decode_render( vd, accel, buf, len );
+
+
+#ifdef MAKE_DAT
+ if ( nframes==0 ) {
+ fwrite( &seq->coded_width, 1, sizeof(seq->coded_width), outfile );
+ fwrite( &seq->coded_height, 1, sizeof(seq->coded_height), outfile );
+ fwrite( &seq->ratio, 1, sizeof(seq->ratio), outfile );
+ fwrite( &seq->profile, 1, sizeof(seq->profile), outfile );
+ }
+
+ if ( nframes++ < 25 ) {
+ fwrite( &pic->vdp_infos, 1, sizeof(pic->vdp_infos), outfile );
+ fwrite( &len, 1, sizeof(len), outfile );
+ fwrite( buf, 1, len, outfile );
+ printf( "picture_type = %d\n", pic->vdp_infos.picture_type);
+ }
+#endif
+
+ if ( pic->vdp_infos.interlace && pic->vdp_infos.frame_coding_mode ) {
+ img->progressive_frame = 0;
+ img->top_field_first = pic->top_field_first;
+ }
+ else {
+ img->progressive_frame = 1;
+ img->top_field_first = 1;
+ }
+ img->pts = seq->seq_pts;
+ img->bad_frame = 0;
+ img->duration = seq->video_step;
+ accel->color_standard = VDP_COLOR_STANDARD_ITUR_BT_709;
+
+ if ( pic->vdp_infos.picture_type<B_FRAME ) {
+ if ( pic->vdp_infos.picture_type==I_FRAME && !seq->backward_ref ) {
+ img->pts = 0;
+ img->draw( img, vd->stream );
+ ++img->drawn;
+ }
+ if ( seq->forward_ref ) {
+ seq->forward_ref->drawn = 0;
+ seq->forward_ref->free( seq->forward_ref );
+ }
+ seq->forward_ref = seq->backward_ref;
+ if ( seq->forward_ref && !seq->forward_ref->drawn ) {
+ seq->forward_ref->draw( seq->forward_ref, vd->stream );
+ }
+ seq->backward_ref = img;
+ }
+ else {
+ img->draw( img, vd->stream );
+ img->free( img );
+ }
+
+ seq->seq_pts +=seq->video_step;
+
+ reset_picture( &seq->picture );
+}
+
+
+
+/*
+ * This function receives a buffer of data from the demuxer layer and
+ * figures out how to handle it based on its header flags.
+ */
+static void vdpau_vc1_decode_data (video_decoder_t *this_gen, buf_element_t *buf)
+{
+ vdpau_vc1_decoder_t *this = (vdpau_vc1_decoder_t *) this_gen;
+ sequence_t *seq = (sequence_t*)&this->sequence;
+
+ /* a video decoder does not care about this flag (?) */
+ if (buf->decoder_flags & BUF_FLAG_PREVIEW) {
+ lprintf("BUF_FLAG_PREVIEW\n");
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_FRAMERATE) {
+ lprintf("BUF_FLAG_FRAMERATE=%d\n", buf->decoder_info[0]);
+ if ( buf->decoder_info[0] > 0 ) {
+ this->sequence.video_step = buf->decoder_info[0];
+ _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, this->sequence.video_step);
+ }
+ }
+
+ if (this->sequence.reported_video_step != this->sequence.video_step){
+ _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, (this->sequence.reported_video_step = this->sequence.video_step));
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_HEADER) {
+ lprintf("BUF_FLAG_HEADER\n");
+ }
+
+ if (buf->decoder_flags & BUF_FLAG_ASPECT) {
+ lprintf("BUF_FLAG_ASPECT\n");
+ seq->ratio = (double)buf->decoder_info[1]/(double)buf->decoder_info[2];
+ lprintf("arx=%d ary=%d ratio=%f\n", buf->decoder_info[1], buf->decoder_info[2], seq->ratio);
+ }
+
+ if ( !buf->size )
+ return;
+
+ seq->cur_pts = buf->pts;
+
+ if (buf->decoder_flags & BUF_FLAG_STDHEADER) {
+ lprintf("BUF_FLAG_STDHEADER\n");
+ xine_bmiheader *bih = (xine_bmiheader *) buf->content;
+ int bs = sizeof( xine_bmiheader );
+ seq->coded_width = bih->biWidth;
+ seq->coded_height = bih->biHeight;
+ lprintf( "width=%d height=%d\n", bih->biWidth, bih->biHeight );
+ if ( buf->size > bs ) {
+ seq->mode = MODE_FRAME;
+ parse_header( this, buf->content+bs, buf->size-bs );
+ }
+ return;
+ }
+
+ int size = seq->bufpos+buf->size;
+ if ( seq->bufsize < size ) {
+ seq->bufsize = size+10000;
+ seq->buf = realloc( seq->buf, seq->bufsize );
+ lprintf("sequence buffer realloced = %d\n", seq->bufsize );
+ }
+ xine_fast_memcpy( seq->buf+seq->bufpos, buf->content, buf->size );
+ seq->bufpos += buf->size;
+
+ if (buf->decoder_flags & BUF_FLAG_FRAME_START) {
+ lprintf("BUF_FLAG_FRAME_START\n");
+ seq->seq_pts = buf->pts;
+ seq->mode = MODE_FRAME;
+ if ( seq->bufpos > 3 ) {
+ if ( seq->buf[0]==0 && seq->buf[1]==0 && seq->buf[2]==1 ) {
+ seq->mode = MODE_STARTCODE;
+ }
+ }
+ }
+
+ if ( seq->mode == MODE_FRAME ) {
+ if ( buf->decoder_flags & BUF_FLAG_FRAME_END ) {
+ lprintf("BUF_FLAG_FRAME_END\n");
+ decode_picture( this );
+ seq->bufpos = 0;
+ }
+ return;
+ }
+
+ int res, startcode=0;
+ while ( seq->bufseek <= seq->bufpos-4 ) {
+ uint8_t *buffer = seq->buf+seq->bufseek;
+ if ( buffer[0]==0 && buffer[1]==0 && buffer[2]==1 ) {
+ startcode = 1;
+ seq->current_code = buffer[3];
+ lprintf("current_code = %d\n", seq->current_code);
+ if ( seq->start<0 ) {
+ seq->start = seq->bufseek;
+ seq->code_start = buffer[3];
+ lprintf("code_start = %d\n", seq->code_start);
+ if ( seq->cur_pts )
+ seq->seq_pts = seq->cur_pts;
+ }
+ else {
+ res = parse_code( this, seq->buf+seq->start, seq->bufseek-seq->start );
+ if ( res==1 ) {
+ seq->mode = MODE_STARTCODE;
+ decode_picture( this );
+ parse_code( this, seq->buf+seq->start, seq->bufseek-seq->start );
+ }
+ if ( res!=-1 ) {
+ uint8_t *tmp = (uint8_t*)malloc(seq->bufsize);
+ xine_fast_memcpy( tmp, seq->buf+seq->bufseek, seq->bufpos-seq->bufseek );
+ seq->bufpos -= seq->bufseek;
+ seq->start = -1;
+ seq->bufseek = -1;
+ free( seq->buf );
+ seq->buf = tmp;
+ }
+ }
+ }
+ ++seq->bufseek;
+ }
+}
+
+
+
+/*
+ * This function is called when xine needs to flush the system.
+ */
+static void vdpau_vc1_flush (video_decoder_t *this_gen) {
+ vdpau_vc1_decoder_t *this = (vdpau_vc1_decoder_t *) this_gen;
+
+ lprintf( "vdpau_vc1_flush\n" );
+}
+
+/*
+ * This function resets the video decoder.
+ */
+static void vdpau_vc1_reset (video_decoder_t *this_gen) {
+ vdpau_vc1_decoder_t *this = (vdpau_vc1_decoder_t *) this_gen;
+
+ lprintf( "vdpau_vc1_reset\n" );
+ reset_sequence( &this->sequence );
+}
+
+/*
+ * The decoder should forget any stored pts values here.
+ */
+static void vdpau_vc1_discontinuity (video_decoder_t *this_gen) {
+ vdpau_vc1_decoder_t *this = (vdpau_vc1_decoder_t *) this_gen;
+
+ lprintf( "vdpau_vc1_discontinuity\n" );
+}
+
+/*
+ * This function frees the video decoder instance allocated to the decoder.
+ */
+static void vdpau_vc1_dispose (video_decoder_t *this_gen) {
+
+ vdpau_vc1_decoder_t *this = (vdpau_vc1_decoder_t *) this_gen;
+
+ lprintf( "vdpau_vc1_dispose\n" );
+
+ if ( this->decoder!=VDP_INVALID_HANDLE && this->sequence.accel_vdpau ) {
+ this->sequence.accel_vdpau->vdp_decoder_destroy( this->decoder );
+ this->decoder = VDP_INVALID_HANDLE;
+ }
+
+ reset_sequence( &this->sequence );
+
+ this->stream->video_out->close( this->stream->video_out, this->stream );
+
+ free( this->sequence.buf );
+ free( this_gen );
+}
+
+/*
+ * This function allocates, initializes, and returns a private video
+ * decoder structure.
+ */
+static video_decoder_t *open_plugin (video_decoder_class_t *class_gen, xine_stream_t *stream) {
+
+ vdpau_vc1_decoder_t *this ;
+
+ lprintf( "open_plugin\n" );
+
+ /* the videoout must be vdpau-capable to support this decoder */
+ if ( !(stream->video_driver->get_capabilities(stream->video_driver) & VO_CAP_VDPAU_VC1) )
+ return NULL;
+
+ /* now check if vdpau has free decoder resource */
+ vo_frame_t *img = stream->video_out->get_frame( stream->video_out, 1920, 1080, 1, XINE_IMGFMT_VDPAU, VO_BOTH_FIELDS );
+ vdpau_accel_t *accel = (vdpau_accel_t*)img->accel_data;
+ int runtime_nr = accel->vdp_runtime_nr;
+ img->free(img);
+ VdpDecoder decoder;
+ VdpStatus st = accel->vdp_decoder_create( accel->vdp_device, VDP_DECODER_PROFILE_VC1_MAIN, 1920, 1080, 2, &decoder );
+ if ( st!=VDP_STATUS_OK ) {
+ lprintf( "can't create vdpau decoder.\n" );
+ return NULL;
+ }
+
+ accel->vdp_decoder_destroy( decoder );
+
+ this = (vdpau_vc1_decoder_t *) calloc(1, sizeof(vdpau_vc1_decoder_t));
+
+ this->video_decoder.decode_data = vdpau_vc1_decode_data;
+ this->video_decoder.flush = vdpau_vc1_flush;
+ this->video_decoder.reset = vdpau_vc1_reset;
+ this->video_decoder.discontinuity = vdpau_vc1_discontinuity;
+ this->video_decoder.dispose = vdpau_vc1_dispose;
+
+ this->stream = stream;
+ this->class = (vdpau_vc1_class_t *) class_gen;
+
+ this->sequence.bufsize = 10000;
+ this->sequence.buf = (uint8_t*)malloc(this->sequence.bufsize);
+ this->sequence.forward_ref = 0;
+ this->sequence.backward_ref = 0;
+ this->sequence.vdp_runtime_nr = runtime_nr;
+ init_sequence( &this->sequence );
+
+ init_picture( &this->sequence.picture );
+
+ this->decoder = VDP_INVALID_HANDLE;
+ this->sequence.accel_vdpau = NULL;
+ this->sequence.mode = MODE_STARTCODE;
+
+ (stream->video_out->open)(stream->video_out, stream);
+
+#ifdef MAKE_DAT
+ outfile = fopen( "/tmp/vc1.dat","w");
+ nframes = 0;
+#endif
+
+ return &this->video_decoder;
+}
+
+/*
+ * This function allocates a private video decoder class and initializes
+ * the class's member functions.
+ */
+static void *init_plugin (xine_t *xine, void *data) {
+
+ vdpau_vc1_class_t *this;
+
+ this = (vdpau_vc1_class_t *) calloc(1, sizeof(vdpau_vc1_class_t));
+
+ this->decoder_class.open_plugin = open_plugin;
+ this->decoder_class.identifier = "vdpau_vc1";
+ this->decoder_class.description =
+ N_("vdpau_vc1: vc1 decoder plugin using VDPAU hardware decoding.\n"
+ "Must be used along with video_out_vdpau.");
+ this->decoder_class.dispose = default_video_decoder_class_dispose;
+
+ return this;
+}
+
+/*
+ * This is a list of all of the internal xine video buffer types that
+ * this decoder is able to handle. Check src/xine-engine/buffer.h for a
+ * list of valid buffer types (and add a new one if the one you need does
+ * not exist). Terminate the list with a 0.
+ */
+static const uint32_t video_types[] = {
+ BUF_VIDEO_VC1, BUF_VIDEO_WMV9,
+ 0
+};
+
+/*
+ * This data structure combines the list of supported xine buffer types and
+ * the priority that the plugin should be given with respect to other
+ * plugins that handle the same buffer type. A plugin with priority (n+1)
+ * will be used instead of a plugin with priority (n).
+ */
+static const decoder_info_t dec_info_video = {
+ video_types, /* supported types */
+ 8 /* priority */
+};
+
+/*
+ * The plugin catalog entry. This is the only information that this plugin
+ * will export to the public.
+ */
+const plugin_info_t xine_plugin_info[] EXPORTED = {
+ /* { type, API, "name", version, special_info, init_function } */
+ { PLUGIN_VIDEO_DECODER, 19, "vdpau_vc1", XINE_VERSION_CODE, &dec_info_video, init_plugin },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};