asfdec: also read Metadata Library Object
[FFMpeg-mirror/mplayer-patches.git] / libavcodec / mpegvideo_xvmc.c
blob7101a3ea60b8e97882072cb2211f708f03eea4e8
1 /*
2 * XVideo Motion Compensation
3 * Copyright (c) 2003 Ivan Kalvachev
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <limits.h>
23 #include <X11/extensions/XvMC.h>
25 #include "avcodec.h"
26 #include "dsputil.h"
27 #include "mpegvideo.h"
29 #undef NDEBUG
30 #include <assert.h>
32 #include "xvmc.h"
33 #include "xvmc_internal.h"
35 /**
36 * Initialize the block field of the MpegEncContext pointer passed as
37 * parameter after making sure that the data is not corrupted.
38 * In order to implement something like direct rendering instead of decoding
39 * coefficients in s->blocks and then copying them, copy them directly
40 * into the data_blocks array provided by xvmc.
42 void ff_xvmc_init_block(MpegEncContext *s)
44 struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
45 assert(render && render->xvmc_id == AV_XVMC_ID);
47 s->block = (int16_t (*)[64])(render->data_blocks + render->next_free_data_block_num * 64);
50 /**
51 * Fill individual block pointers, so there are no gaps in the data_block array
52 * in case not all blocks in the macroblock are coded.
54 void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
56 int i, j = 0;
57 const int mb_block_count = 4 + (1 << s->chroma_format);
59 cbp <<= 12-mb_block_count;
60 for (i = 0; i < mb_block_count; i++) {
61 if (cbp & (1 << 11))
62 s->pblocks[i] = &s->block[j++];
63 else
64 s->pblocks[i] = NULL;
65 cbp += cbp;
69 /**
70 * Find and store the surfaces that are used as reference frames.
71 * This function should be called for every new field and/or frame.
72 * It should be safe to call the function a few times for the same field.
74 int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
76 struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
77 const int mb_block_count = 4 + (1 << s->chroma_format);
79 assert(avctx);
80 if (!render || render->xvmc_id != AV_XVMC_ID ||
81 !render->data_blocks || !render->mv_blocks ||
82 (unsigned int)render->allocated_mv_blocks > INT_MAX/(64*6) ||
83 (unsigned int)render->allocated_data_blocks > INT_MAX/64 ||
84 !render->p_surface) {
85 av_log(avctx, AV_LOG_ERROR,
86 "Render token doesn't look as expected.\n");
87 return -1; // make sure that this is a render packet
90 if (render->filled_mv_blocks_num) {
91 av_log(avctx, AV_LOG_ERROR,
92 "Rendering surface contains %i unprocessed blocks.\n",
93 render->filled_mv_blocks_num);
94 return -1;
96 if (render->allocated_mv_blocks < 1 ||
97 render->allocated_data_blocks < render->allocated_mv_blocks*mb_block_count ||
98 render->start_mv_blocks_num >= render->allocated_mv_blocks ||
99 render->next_free_data_block_num >
100 render->allocated_data_blocks -
101 mb_block_count*(render->allocated_mv_blocks-render->start_mv_blocks_num)) {
102 av_log(avctx, AV_LOG_ERROR,
103 "Rendering surface doesn't provide enough block structures to work with.\n");
104 return -1;
107 render->picture_structure = s->picture_structure;
108 render->flags = s->first_field ? 0 : XVMC_SECOND_FIELD;
109 render->p_future_surface = NULL;
110 render->p_past_surface = NULL;
112 switch(s->pict_type) {
113 case AV_PICTURE_TYPE_I:
114 return 0; // no prediction from other frames
115 case AV_PICTURE_TYPE_B:
116 next = (struct xvmc_pix_fmt*)s->next_picture.f.data[2];
117 if (!next)
118 return -1;
119 if (next->xvmc_id != AV_XVMC_ID)
120 return -1;
121 render->p_future_surface = next->p_surface;
122 // no return here, going to set forward prediction
123 case AV_PICTURE_TYPE_P:
124 last = (struct xvmc_pix_fmt*)s->last_picture.f.data[2];
125 if (!last)
126 last = render; // predict second field from the first
127 if (last->xvmc_id != AV_XVMC_ID)
128 return -1;
129 render->p_past_surface = last->p_surface;
130 return 0;
133 return -1;
137 * Complete frame/field rendering by passing any remaining blocks.
138 * Normally ff_draw_horiz_band() is called for each slice, however,
139 * some leftover blocks, for example from error_resilience(), may remain.
140 * It should be safe to call the function a few times for the same field.
142 void ff_xvmc_field_end(MpegEncContext *s)
144 struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
145 assert(render);
147 if (render->filled_mv_blocks_num > 0)
148 ff_draw_horiz_band(s, 0, 0);
152 * Synthesize the data needed by XvMC to render one macroblock of data.
153 * Fill all relevant fields, if necessary do IDCT.
155 void ff_xvmc_decode_mb(MpegEncContext *s)
157 XvMCMacroBlock *mv_block;
158 struct xvmc_pix_fmt *render;
159 int i, cbp, blocks_per_mb;
161 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
164 if (s->encoding) {
165 av_log(s->avctx, AV_LOG_ERROR, "XVMC doesn't support encoding!!!\n");
166 return;
169 // from MPV_decode_mb(), update DC predictors for P macroblocks
170 if (!s->mb_intra) {
171 s->last_dc[0] =
172 s->last_dc[1] =
173 s->last_dc[2] = 128 << s->intra_dc_precision;
176 // MC doesn't skip blocks
177 s->mb_skipped = 0;
180 // Do I need to export quant when I could not perform postprocessing?
181 // Anyway, it doesn't hurt.
182 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
184 // start of XVMC-specific code
185 render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
186 assert(render);
187 assert(render->xvmc_id == AV_XVMC_ID);
188 assert(render->mv_blocks);
190 // take the next free macroblock
191 mv_block = &render->mv_blocks[render->start_mv_blocks_num +
192 render->filled_mv_blocks_num];
194 mv_block->x = s->mb_x;
195 mv_block->y = s->mb_y;
196 mv_block->dct_type = s->interlaced_dct; // XVMC_DCT_TYPE_FRAME/FIELD;
197 if (s->mb_intra) {
198 mv_block->macroblock_type = XVMC_MB_TYPE_INTRA; // no MC, all done
199 } else {
200 mv_block->macroblock_type = XVMC_MB_TYPE_PATTERN;
202 if (s->mv_dir & MV_DIR_FORWARD) {
203 mv_block->macroblock_type |= XVMC_MB_TYPE_MOTION_FORWARD;
204 // PMV[n][dir][xy] = mv[dir][n][xy]
205 mv_block->PMV[0][0][0] = s->mv[0][0][0];
206 mv_block->PMV[0][0][1] = s->mv[0][0][1];
207 mv_block->PMV[1][0][0] = s->mv[0][1][0];
208 mv_block->PMV[1][0][1] = s->mv[0][1][1];
210 if (s->mv_dir & MV_DIR_BACKWARD) {
211 mv_block->macroblock_type |= XVMC_MB_TYPE_MOTION_BACKWARD;
212 mv_block->PMV[0][1][0] = s->mv[1][0][0];
213 mv_block->PMV[0][1][1] = s->mv[1][0][1];
214 mv_block->PMV[1][1][0] = s->mv[1][1][0];
215 mv_block->PMV[1][1][1] = s->mv[1][1][1];
218 switch(s->mv_type) {
219 case MV_TYPE_16X16:
220 mv_block->motion_type = XVMC_PREDICTION_FRAME;
221 break;
222 case MV_TYPE_16X8:
223 mv_block->motion_type = XVMC_PREDICTION_16x8;
224 break;
225 case MV_TYPE_FIELD:
226 mv_block->motion_type = XVMC_PREDICTION_FIELD;
227 if (s->picture_structure == PICT_FRAME) {
228 mv_block->PMV[0][0][1] <<= 1;
229 mv_block->PMV[1][0][1] <<= 1;
230 mv_block->PMV[0][1][1] <<= 1;
231 mv_block->PMV[1][1][1] <<= 1;
233 break;
234 case MV_TYPE_DMV:
235 mv_block->motion_type = XVMC_PREDICTION_DUAL_PRIME;
236 if (s->picture_structure == PICT_FRAME) {
238 mv_block->PMV[0][0][0] = s->mv[0][0][0]; // top from top
239 mv_block->PMV[0][0][1] = s->mv[0][0][1] << 1;
241 mv_block->PMV[0][1][0] = s->mv[0][0][0]; // bottom from bottom
242 mv_block->PMV[0][1][1] = s->mv[0][0][1] << 1;
244 mv_block->PMV[1][0][0] = s->mv[0][2][0]; // dmv00, top from bottom
245 mv_block->PMV[1][0][1] = s->mv[0][2][1] << 1; // dmv01
247 mv_block->PMV[1][1][0] = s->mv[0][3][0]; // dmv10, bottom from top
248 mv_block->PMV[1][1][1] = s->mv[0][3][1] << 1; // dmv11
250 } else {
251 mv_block->PMV[0][1][0] = s->mv[0][2][0]; // dmv00
252 mv_block->PMV[0][1][1] = s->mv[0][2][1]; // dmv01
254 break;
255 default:
256 assert(0);
259 mv_block->motion_vertical_field_select = 0;
261 // set correct field references
262 if (s->mv_type == MV_TYPE_FIELD || s->mv_type == MV_TYPE_16X8) {
263 mv_block->motion_vertical_field_select |= s->field_select[0][0];
264 mv_block->motion_vertical_field_select |= s->field_select[1][0] << 1;
265 mv_block->motion_vertical_field_select |= s->field_select[0][1] << 2;
266 mv_block->motion_vertical_field_select |= s->field_select[1][1] << 3;
268 } // !intra
269 // time to handle data blocks
270 mv_block->index = render->next_free_data_block_num;
272 blocks_per_mb = 6;
273 if (s->chroma_format >= 2) {
274 blocks_per_mb = 4 + (1 << s->chroma_format);
277 // calculate cbp
278 cbp = 0;
279 for (i = 0; i < blocks_per_mb; i++) {
280 cbp += cbp;
281 if (s->block_last_index[i] >= 0)
282 cbp++;
285 if (s->flags & CODEC_FLAG_GRAY) {
286 if (s->mb_intra) { // intra frames are always full chroma blocks
287 for (i = 4; i < blocks_per_mb; i++) {
288 memset(s->pblocks[i], 0, sizeof(*s->pblocks[i])); // so we need to clear them
289 if (!render->unsigned_intra)
290 *s->pblocks[i][0] = 1 << 10;
292 } else {
293 cbp &= 0xf << (blocks_per_mb - 4);
294 blocks_per_mb = 4; // luminance blocks only
297 mv_block->coded_block_pattern = cbp;
298 if (cbp == 0)
299 mv_block->macroblock_type &= ~XVMC_MB_TYPE_PATTERN;
301 for (i = 0; i < blocks_per_mb; i++) {
302 if (s->block_last_index[i] >= 0) {
303 // I do not have unsigned_intra MOCO to test, hope it is OK.
304 if (s->mb_intra && (render->idct || !render->unsigned_intra))
305 *s->pblocks[i][0] -= 1 << 10;
306 if (!render->idct) {
307 s->dsp.idct(*s->pblocks[i]);
308 /* It is unclear if MC hardware requires pixel diff values to be
309 * in the range [-255;255]. TODO: Clipping if such hardware is
310 * ever found. As of now it would only be an unnecessary
311 * slowdown. */
313 // copy blocks only if the codec doesn't support pblocks reordering
314 if (s->avctx->xvmc_acceleration == 1) {
315 memcpy(&render->data_blocks[render->next_free_data_block_num*64],
316 s->pblocks[i], sizeof(*s->pblocks[i]));
318 render->next_free_data_block_num++;
321 render->filled_mv_blocks_num++;
323 assert(render->filled_mv_blocks_num <= render->allocated_mv_blocks);
324 assert(render->next_free_data_block_num <= render->allocated_data_blocks);
325 /* The above conditions should not be able to fail as long as this function
326 * is used and the following 'if ()' automatically calls a callback to free
327 * blocks. */
330 if (render->filled_mv_blocks_num == render->allocated_mv_blocks)
331 ff_draw_horiz_band(s, 0, 0);