h264: simplify calls to ff_er_add_slice().
[FFMpeg-mirror/mplayer-patches.git] / libavcodec / vc1dec.c
blob38b82163a5a5b35afa1f51315addeb57d5dfa7d7
1 /*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 /**
25 * @file
26 * VC-1 and WMV3 decoder
29 #include "internal.h"
30 #include "dsputil.h"
31 #include "avcodec.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "h264chroma.h"
35 #include "vc1.h"
36 #include "vc1data.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
39 #include "unary.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
43 #undef NDEBUG
44 #include <assert.h>
46 #define MB_INTRA_VLC_BITS 9
47 #define DC_VLC_BITS 9
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 /***********************************************************************/
55 /**
56 * @name VC-1 Bitplane decoding
57 * @see 8.7, p56
58 * @{
61 /**
62 * Imode types
63 * @{
65 enum Imode {
66 IMODE_RAW,
67 IMODE_NORM2,
68 IMODE_DIFF2,
69 IMODE_NORM6,
70 IMODE_DIFF6,
71 IMODE_ROWSKIP,
72 IMODE_COLSKIP
74 /** @} */ //imode defines
77 /** @} */ //Bitplane group
79 static void vc1_put_signed_blocks_clamped(VC1Context *v)
81 MpegEncContext *s = &v->s;
82 int topleft_mb_pos, top_mb_pos;
83 int stride_y, fieldtx;
84 int v_dist;
86 /* The put pixels loop is always one MB row behind the decoding loop,
87 * because we can only put pixels when overlap filtering is done, and
88 * for filtering of the bottom edge of a MB, we need the next MB row
89 * present as well.
90 * Within the row, the put pixels loop is also one MB col behind the
91 * decoding loop. The reason for this is again, because for filtering
92 * of the right MB edge, we need the next MB present. */
93 if (!s->first_slice_line) {
94 if (s->mb_x) {
95 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
96 fieldtx = v->fieldtx_plane[topleft_mb_pos];
97 stride_y = s->linesize << fieldtx;
98 v_dist = (16 - fieldtx) >> (fieldtx == 0);
99 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
100 s->dest[0] - 16 * s->linesize - 16,
101 stride_y);
102 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
103 s->dest[0] - 16 * s->linesize - 8,
104 stride_y);
105 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
106 s->dest[0] - v_dist * s->linesize - 16,
107 stride_y);
108 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
109 s->dest[0] - v_dist * s->linesize - 8,
110 stride_y);
111 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
112 s->dest[1] - 8 * s->uvlinesize - 8,
113 s->uvlinesize);
114 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
115 s->dest[2] - 8 * s->uvlinesize - 8,
116 s->uvlinesize);
118 if (s->mb_x == s->mb_width - 1) {
119 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
120 fieldtx = v->fieldtx_plane[top_mb_pos];
121 stride_y = s->linesize << fieldtx;
122 v_dist = fieldtx ? 15 : 8;
123 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
124 s->dest[0] - 16 * s->linesize,
125 stride_y);
126 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
127 s->dest[0] - 16 * s->linesize + 8,
128 stride_y);
129 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
130 s->dest[0] - v_dist * s->linesize,
131 stride_y);
132 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
133 s->dest[0] - v_dist * s->linesize + 8,
134 stride_y);
135 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
136 s->dest[1] - 8 * s->uvlinesize,
137 s->uvlinesize);
138 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
139 s->dest[2] - 8 * s->uvlinesize,
140 s->uvlinesize);
144 #define inc_blk_idx(idx) do { \
145 idx++; \
146 if (idx >= v->n_allocated_blks) \
147 idx = 0; \
148 } while (0)
150 inc_blk_idx(v->topleft_blk_idx);
151 inc_blk_idx(v->top_blk_idx);
152 inc_blk_idx(v->left_blk_idx);
153 inc_blk_idx(v->cur_blk_idx);
156 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
158 MpegEncContext *s = &v->s;
159 int j;
160 if (!s->first_slice_line) {
161 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
162 if (s->mb_x)
163 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
164 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
165 for (j = 0; j < 2; j++) {
166 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
167 if (s->mb_x)
168 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
171 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
173 if (s->mb_y == s->end_mb_y - 1) {
174 if (s->mb_x) {
175 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
176 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
177 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
179 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
183 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
185 MpegEncContext *s = &v->s;
186 int j;
188 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
189 * means it runs two rows/cols behind the decoding loop. */
190 if (!s->first_slice_line) {
191 if (s->mb_x) {
192 if (s->mb_y >= s->start_mb_y + 2) {
193 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
195 if (s->mb_x >= 2)
196 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
197 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
198 for (j = 0; j < 2; j++) {
199 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
200 if (s->mb_x >= 2) {
201 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
205 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
208 if (s->mb_x == s->mb_width - 1) {
209 if (s->mb_y >= s->start_mb_y + 2) {
210 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
212 if (s->mb_x)
213 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
214 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
215 for (j = 0; j < 2; j++) {
216 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
217 if (s->mb_x >= 2) {
218 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
222 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
225 if (s->mb_y == s->end_mb_y) {
226 if (s->mb_x) {
227 if (s->mb_x >= 2)
228 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
229 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
230 if (s->mb_x >= 2) {
231 for (j = 0; j < 2; j++) {
232 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
237 if (s->mb_x == s->mb_width - 1) {
238 if (s->mb_x)
239 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
240 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
241 if (s->mb_x) {
242 for (j = 0; j < 2; j++) {
243 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
251 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
253 MpegEncContext *s = &v->s;
254 int mb_pos;
256 if (v->condover == CONDOVER_NONE)
257 return;
259 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
261 /* Within a MB, the horizontal overlap always runs before the vertical.
262 * To accomplish that, we run the H on left and internal borders of the
263 * currently decoded MB. Then, we wait for the next overlap iteration
264 * to do H overlap on the right edge of this MB, before moving over and
265 * running the V overlap. Therefore, the V overlap makes us trail by one
266 * MB col and the H overlap filter makes us trail by one MB row. This
267 * is reflected in the time at which we run the put_pixels loop. */
268 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
269 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
270 v->over_flags_plane[mb_pos - 1])) {
271 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
272 v->block[v->cur_blk_idx][0]);
273 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
274 v->block[v->cur_blk_idx][2]);
275 if (!(s->flags & CODEC_FLAG_GRAY)) {
276 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
277 v->block[v->cur_blk_idx][4]);
278 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
279 v->block[v->cur_blk_idx][5]);
282 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
283 v->block[v->cur_blk_idx][1]);
284 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
285 v->block[v->cur_blk_idx][3]);
287 if (s->mb_x == s->mb_width - 1) {
288 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
289 v->over_flags_plane[mb_pos - s->mb_stride])) {
290 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
291 v->block[v->cur_blk_idx][0]);
292 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
293 v->block[v->cur_blk_idx][1]);
294 if (!(s->flags & CODEC_FLAG_GRAY)) {
295 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
296 v->block[v->cur_blk_idx][4]);
297 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
298 v->block[v->cur_blk_idx][5]);
301 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
302 v->block[v->cur_blk_idx][2]);
303 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
304 v->block[v->cur_blk_idx][3]);
307 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
308 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
309 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
310 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
311 v->block[v->left_blk_idx][0]);
312 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
313 v->block[v->left_blk_idx][1]);
314 if (!(s->flags & CODEC_FLAG_GRAY)) {
315 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
316 v->block[v->left_blk_idx][4]);
317 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
318 v->block[v->left_blk_idx][5]);
321 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
322 v->block[v->left_blk_idx][2]);
323 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
324 v->block[v->left_blk_idx][3]);
328 /** Do motion compensation over 1 macroblock
329 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
331 static void vc1_mc_1mv(VC1Context *v, int dir)
333 MpegEncContext *s = &v->s;
334 DSPContext *dsp = &v->s.dsp;
335 H264ChromaContext *h264chroma = &v->h264chroma;
336 uint8_t *srcY, *srcU, *srcV;
337 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
338 int off, off_uv;
339 int v_edge_pos = s->v_edge_pos >> v->field_mode;
341 if ((!v->field_mode ||
342 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
343 !v->s.last_picture.f.data[0])
344 return;
346 mx = s->mv[dir][0][0];
347 my = s->mv[dir][0][1];
349 // store motion vectors for further use in B frames
350 if (s->pict_type == AV_PICTURE_TYPE_P) {
351 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
352 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
355 uvmx = (mx + ((mx & 3) == 3)) >> 1;
356 uvmy = (my + ((my & 3) == 3)) >> 1;
357 v->luma_mv[s->mb_x][0] = uvmx;
358 v->luma_mv[s->mb_x][1] = uvmy;
360 if (v->field_mode &&
361 v->cur_field_type != v->ref_field_type[dir]) {
362 my = my - 2 + 4 * v->cur_field_type;
363 uvmy = uvmy - 2 + 4 * v->cur_field_type;
366 // fastuvmc shall be ignored for interlaced frame picture
367 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
368 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
369 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
371 if (v->field_mode) { // interlaced field picture
372 if (!dir) {
373 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) {
374 srcY = s->current_picture.f.data[0];
375 srcU = s->current_picture.f.data[1];
376 srcV = s->current_picture.f.data[2];
377 } else {
378 srcY = s->last_picture.f.data[0];
379 srcU = s->last_picture.f.data[1];
380 srcV = s->last_picture.f.data[2];
382 } else {
383 srcY = s->next_picture.f.data[0];
384 srcU = s->next_picture.f.data[1];
385 srcV = s->next_picture.f.data[2];
387 } else {
388 if (!dir) {
389 srcY = s->last_picture.f.data[0];
390 srcU = s->last_picture.f.data[1];
391 srcV = s->last_picture.f.data[2];
392 } else {
393 srcY = s->next_picture.f.data[0];
394 srcU = s->next_picture.f.data[1];
395 srcV = s->next_picture.f.data[2];
399 src_x = s->mb_x * 16 + (mx >> 2);
400 src_y = s->mb_y * 16 + (my >> 2);
401 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
402 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
404 if (v->profile != PROFILE_ADVANCED) {
405 src_x = av_clip( src_x, -16, s->mb_width * 16);
406 src_y = av_clip( src_y, -16, s->mb_height * 16);
407 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
408 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
409 } else {
410 src_x = av_clip( src_x, -17, s->avctx->coded_width);
411 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
412 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
413 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
416 srcY += src_y * s->linesize + src_x;
417 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
418 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
420 if (v->field_mode && v->ref_field_type[dir]) {
421 srcY += s->current_picture_ptr->f.linesize[0];
422 srcU += s->current_picture_ptr->f.linesize[1];
423 srcV += s->current_picture_ptr->f.linesize[2];
426 /* for grayscale we should not try to read from unknown area */
427 if (s->flags & CODEC_FLAG_GRAY) {
428 srcU = s->edge_emu_buffer + 18 * s->linesize;
429 srcV = s->edge_emu_buffer + 18 * s->linesize;
432 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
433 || s->h_edge_pos < 22 || v_edge_pos < 22
434 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
435 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
436 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
438 srcY -= s->mspel * (1 + s->linesize);
439 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
440 17 + s->mspel * 2, 17 + s->mspel * 2,
441 src_x - s->mspel, src_y - s->mspel,
442 s->h_edge_pos, v_edge_pos);
443 srcY = s->edge_emu_buffer;
444 s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
445 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
446 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
447 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
448 srcU = uvbuf;
449 srcV = uvbuf + 16;
450 /* if we deal with range reduction we need to scale source blocks */
451 if (v->rangeredfrm) {
452 int i, j;
453 uint8_t *src, *src2;
455 src = srcY;
456 for (j = 0; j < 17 + s->mspel * 2; j++) {
457 for (i = 0; i < 17 + s->mspel * 2; i++)
458 src[i] = ((src[i] - 128) >> 1) + 128;
459 src += s->linesize;
461 src = srcU;
462 src2 = srcV;
463 for (j = 0; j < 9; j++) {
464 for (i = 0; i < 9; i++) {
465 src[i] = ((src[i] - 128) >> 1) + 128;
466 src2[i] = ((src2[i] - 128) >> 1) + 128;
468 src += s->uvlinesize;
469 src2 += s->uvlinesize;
472 /* if we deal with intensity compensation we need to scale source blocks */
473 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
474 int i, j;
475 uint8_t *src, *src2;
477 src = srcY;
478 for (j = 0; j < 17 + s->mspel * 2; j++) {
479 for (i = 0; i < 17 + s->mspel * 2; i++)
480 src[i] = v->luty[src[i]];
481 src += s->linesize;
483 src = srcU;
484 src2 = srcV;
485 for (j = 0; j < 9; j++) {
486 for (i = 0; i < 9; i++) {
487 src[i] = v->lutuv[src[i]];
488 src2[i] = v->lutuv[src2[i]];
490 src += s->uvlinesize;
491 src2 += s->uvlinesize;
494 srcY += s->mspel * (1 + s->linesize);
497 if (v->field_mode && v->cur_field_type) {
498 off = s->current_picture_ptr->f.linesize[0];
499 off_uv = s->current_picture_ptr->f.linesize[1];
500 } else {
501 off = 0;
502 off_uv = 0;
504 if (s->mspel) {
505 dxy = ((my & 3) << 2) | (mx & 3);
506 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
507 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
508 srcY += s->linesize * 8;
509 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
510 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
511 } else { // hpel mc - always used for luma
512 dxy = (my & 2) | ((mx & 2) >> 1);
513 if (!v->rnd)
514 dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
515 else
516 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
519 if (s->flags & CODEC_FLAG_GRAY) return;
520 /* Chroma MC always uses qpel bilinear */
521 uvmx = (uvmx & 3) << 1;
522 uvmy = (uvmy & 3) << 1;
523 if (!v->rnd) {
524 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
525 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
526 } else {
527 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
528 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
532 static inline int median4(int a, int b, int c, int d)
534 if (a < b) {
535 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
536 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
537 } else {
538 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
539 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
543 /** Do motion compensation for 4-MV macroblock - luminance block
545 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
547 MpegEncContext *s = &v->s;
548 DSPContext *dsp = &v->s.dsp;
549 uint8_t *srcY;
550 int dxy, mx, my, src_x, src_y;
551 int off;
552 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
553 int v_edge_pos = s->v_edge_pos >> v->field_mode;
555 if ((!v->field_mode ||
556 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
557 !v->s.last_picture.f.data[0])
558 return;
560 mx = s->mv[dir][n][0];
561 my = s->mv[dir][n][1];
563 if (!dir) {
564 if (v->field_mode) {
565 if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type)
566 srcY = s->current_picture.f.data[0];
567 else
568 srcY = s->last_picture.f.data[0];
569 } else
570 srcY = s->last_picture.f.data[0];
571 } else
572 srcY = s->next_picture.f.data[0];
574 if (v->field_mode) {
575 if (v->cur_field_type != v->ref_field_type[dir])
576 my = my - 2 + 4 * v->cur_field_type;
579 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
580 int same_count = 0, opp_count = 0, k;
581 int chosen_mv[2][4][2], f;
582 int tx, ty;
583 for (k = 0; k < 4; k++) {
584 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
585 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
586 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
587 opp_count += f;
588 same_count += 1 - f;
590 f = opp_count > same_count;
591 switch (f ? opp_count : same_count) {
592 case 4:
593 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
594 chosen_mv[f][2][0], chosen_mv[f][3][0]);
595 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
596 chosen_mv[f][2][1], chosen_mv[f][3][1]);
597 break;
598 case 3:
599 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
600 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
601 break;
602 case 2:
603 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
604 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
605 break;
607 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
608 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
609 for (k = 0; k < 4; k++)
610 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
613 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
614 int qx, qy;
615 int width = s->avctx->coded_width;
616 int height = s->avctx->coded_height >> 1;
617 qx = (s->mb_x * 16) + (mx >> 2);
618 qy = (s->mb_y * 8) + (my >> 3);
620 if (qx < -17)
621 mx -= 4 * (qx + 17);
622 else if (qx > width)
623 mx -= 4 * (qx - width);
624 if (qy < -18)
625 my -= 8 * (qy + 18);
626 else if (qy > height + 1)
627 my -= 8 * (qy - height - 1);
630 if ((v->fcm == ILACE_FRAME) && fieldmv)
631 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
632 else
633 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
634 if (v->field_mode && v->cur_field_type)
635 off += s->current_picture_ptr->f.linesize[0];
637 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
638 if (!fieldmv)
639 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
640 else
641 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
643 if (v->profile != PROFILE_ADVANCED) {
644 src_x = av_clip(src_x, -16, s->mb_width * 16);
645 src_y = av_clip(src_y, -16, s->mb_height * 16);
646 } else {
647 src_x = av_clip(src_x, -17, s->avctx->coded_width);
648 if (v->fcm == ILACE_FRAME) {
649 if (src_y & 1)
650 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
651 else
652 src_y = av_clip(src_y, -18, s->avctx->coded_height);
653 } else {
654 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
658 srcY += src_y * s->linesize + src_x;
659 if (v->field_mode && v->ref_field_type[dir])
660 srcY += s->current_picture_ptr->f.linesize[0];
662 if (fieldmv && !(src_y & 1))
663 v_edge_pos--;
664 if (fieldmv && (src_y & 1) && src_y < 4)
665 src_y--;
666 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
667 || s->h_edge_pos < 13 || v_edge_pos < 23
668 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
669 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
670 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
671 /* check emulate edge stride and offset */
672 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
673 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
674 src_x - s->mspel, src_y - (s->mspel << fieldmv),
675 s->h_edge_pos, v_edge_pos);
676 srcY = s->edge_emu_buffer;
677 /* if we deal with range reduction we need to scale source blocks */
678 if (v->rangeredfrm) {
679 int i, j;
680 uint8_t *src;
682 src = srcY;
683 for (j = 0; j < 9 + s->mspel * 2; j++) {
684 for (i = 0; i < 9 + s->mspel * 2; i++)
685 src[i] = ((src[i] - 128) >> 1) + 128;
686 src += s->linesize << fieldmv;
689 /* if we deal with intensity compensation we need to scale source blocks */
690 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
691 int i, j;
692 uint8_t *src;
694 src = srcY;
695 for (j = 0; j < 9 + s->mspel * 2; j++) {
696 for (i = 0; i < 9 + s->mspel * 2; i++)
697 src[i] = v->luty[src[i]];
698 src += s->linesize << fieldmv;
701 srcY += s->mspel * (1 + (s->linesize << fieldmv));
704 if (s->mspel) {
705 dxy = ((my & 3) << 2) | (mx & 3);
706 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
707 } else { // hpel mc - always used for luma
708 dxy = (my & 2) | ((mx & 2) >> 1);
709 if (!v->rnd)
710 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
711 else
712 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
716 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
718 int idx, i;
719 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
721 idx = ((a[3] != flag) << 3)
722 | ((a[2] != flag) << 2)
723 | ((a[1] != flag) << 1)
724 | (a[0] != flag);
725 if (!idx) {
726 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
727 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
728 return 4;
729 } else if (count[idx] == 1) {
730 switch (idx) {
731 case 0x1:
732 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
733 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
734 return 3;
735 case 0x2:
736 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
737 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
738 return 3;
739 case 0x4:
740 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
741 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
742 return 3;
743 case 0x8:
744 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
745 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
746 return 3;
748 } else if (count[idx] == 2) {
749 int t1 = 0, t2 = 0;
750 for (i = 0; i < 3; i++)
751 if (!a[i]) {
752 t1 = i;
753 break;
755 for (i = t1 + 1; i < 4; i++)
756 if (!a[i]) {
757 t2 = i;
758 break;
760 *tx = (mvx[t1] + mvx[t2]) / 2;
761 *ty = (mvy[t1] + mvy[t2]) / 2;
762 return 2;
763 } else {
764 return 0;
766 return -1;
769 /** Do motion compensation for 4-MV macroblock - both chroma blocks
771 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
773 MpegEncContext *s = &v->s;
774 H264ChromaContext *h264chroma = &v->h264chroma;
775 uint8_t *srcU, *srcV;
776 int uvmx, uvmy, uvsrc_x, uvsrc_y;
777 int k, tx = 0, ty = 0;
778 int mvx[4], mvy[4], intra[4], mv_f[4];
779 int valid_count;
780 int chroma_ref_type = v->cur_field_type, off = 0;
781 int v_edge_pos = s->v_edge_pos >> v->field_mode;
783 if (!v->field_mode && !v->s.last_picture.f.data[0])
784 return;
785 if (s->flags & CODEC_FLAG_GRAY)
786 return;
788 for (k = 0; k < 4; k++) {
789 mvx[k] = s->mv[dir][k][0];
790 mvy[k] = s->mv[dir][k][1];
791 intra[k] = v->mb_type[0][s->block_index[k]];
792 if (v->field_mode)
793 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
796 /* calculate chroma MV vector from four luma MVs */
797 if (!v->field_mode || (v->field_mode && !v->numref)) {
798 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
799 chroma_ref_type = v->reffield;
800 if (!valid_count) {
801 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
802 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
803 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
804 return; //no need to do MC for intra blocks
806 } else {
807 int dominant = 0;
808 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
809 dominant = 1;
810 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
811 if (dominant)
812 chroma_ref_type = !v->cur_field_type;
814 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
815 return;
816 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
817 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
818 uvmx = (tx + ((tx & 3) == 3)) >> 1;
819 uvmy = (ty + ((ty & 3) == 3)) >> 1;
821 v->luma_mv[s->mb_x][0] = uvmx;
822 v->luma_mv[s->mb_x][1] = uvmy;
824 if (v->fastuvmc) {
825 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
826 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
828 // Field conversion bias
829 if (v->cur_field_type != chroma_ref_type)
830 uvmy += 2 - 4 * chroma_ref_type;
832 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
833 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
835 if (v->profile != PROFILE_ADVANCED) {
836 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
837 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
838 } else {
839 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
840 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
843 if (!dir) {
844 if (v->field_mode) {
845 if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
846 srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
847 srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
848 } else {
849 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
850 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
852 } else {
853 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
854 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
856 } else {
857 srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
858 srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
861 if (v->field_mode) {
862 if (chroma_ref_type) {
863 srcU += s->current_picture_ptr->f.linesize[1];
864 srcV += s->current_picture_ptr->f.linesize[2];
866 off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
869 if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
870 || s->h_edge_pos < 18 || v_edge_pos < 18
871 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
872 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
873 s->vdsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
874 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
875 s->h_edge_pos >> 1, v_edge_pos >> 1);
876 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
877 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
878 s->h_edge_pos >> 1, v_edge_pos >> 1);
879 srcU = s->edge_emu_buffer;
880 srcV = s->edge_emu_buffer + 16;
882 /* if we deal with range reduction we need to scale source blocks */
883 if (v->rangeredfrm) {
884 int i, j;
885 uint8_t *src, *src2;
887 src = srcU;
888 src2 = srcV;
889 for (j = 0; j < 9; j++) {
890 for (i = 0; i < 9; i++) {
891 src[i] = ((src[i] - 128) >> 1) + 128;
892 src2[i] = ((src2[i] - 128) >> 1) + 128;
894 src += s->uvlinesize;
895 src2 += s->uvlinesize;
898 /* if we deal with intensity compensation we need to scale source blocks */
899 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
900 int i, j;
901 uint8_t *src, *src2;
903 src = srcU;
904 src2 = srcV;
905 for (j = 0; j < 9; j++) {
906 for (i = 0; i < 9; i++) {
907 src[i] = v->lutuv[src[i]];
908 src2[i] = v->lutuv[src2[i]];
910 src += s->uvlinesize;
911 src2 += s->uvlinesize;
916 /* Chroma MC always uses qpel bilinear */
917 uvmx = (uvmx & 3) << 1;
918 uvmy = (uvmy & 3) << 1;
919 if (!v->rnd) {
920 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
921 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
922 } else {
923 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
924 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
928 /** Do motion compensation for 4-MV field chroma macroblock (both U and V)
930 static void vc1_mc_4mv_chroma4(VC1Context *v)
932 MpegEncContext *s = &v->s;
933 H264ChromaContext *h264chroma = &v->h264chroma;
934 uint8_t *srcU, *srcV;
935 int uvsrc_x, uvsrc_y;
936 int uvmx_field[4], uvmy_field[4];
937 int i, off, tx, ty;
938 int fieldmv = v->blk_mv_type[s->block_index[0]];
939 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
940 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
941 int v_edge_pos = s->v_edge_pos >> 1;
943 if (!v->s.last_picture.f.data[0])
944 return;
945 if (s->flags & CODEC_FLAG_GRAY)
946 return;
948 for (i = 0; i < 4; i++) {
949 tx = s->mv[0][i][0];
950 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
951 ty = s->mv[0][i][1];
952 if (fieldmv)
953 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
954 else
955 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
958 for (i = 0; i < 4; i++) {
959 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
960 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
961 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
962 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
963 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
964 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
965 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
966 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
967 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
968 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
970 if (fieldmv && !(uvsrc_y & 1))
971 v_edge_pos--;
972 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
973 uvsrc_y--;
974 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
975 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
976 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
977 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
978 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
979 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
980 s->h_edge_pos >> 1, v_edge_pos);
981 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
982 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
983 s->h_edge_pos >> 1, v_edge_pos);
984 srcU = s->edge_emu_buffer;
985 srcV = s->edge_emu_buffer + 16;
987 /* if we deal with intensity compensation we need to scale source blocks */
988 if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
989 int i, j;
990 uint8_t *src, *src2;
992 src = srcU;
993 src2 = srcV;
994 for (j = 0; j < 5; j++) {
995 for (i = 0; i < 5; i++) {
996 src[i] = v->lutuv[src[i]];
997 src2[i] = v->lutuv[src2[i]];
999 src += s->uvlinesize << 1;
1000 src2 += s->uvlinesize << 1;
1004 if (!v->rnd) {
1005 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1006 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1007 } else {
1008 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1009 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1014 /***********************************************************************/
1016 * @name VC-1 Block-level functions
1017 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1018 * @{
1022 * @def GET_MQUANT
1023 * @brief Get macroblock-level quantizer scale
1025 #define GET_MQUANT() \
1026 if (v->dquantfrm) { \
1027 int edges = 0; \
1028 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1029 if (v->dqbilevel) { \
1030 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1031 } else { \
1032 mqdiff = get_bits(gb, 3); \
1033 if (mqdiff != 7) \
1034 mquant = v->pq + mqdiff; \
1035 else \
1036 mquant = get_bits(gb, 5); \
1039 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1040 edges = 1 << v->dqsbedge; \
1041 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1042 edges = (3 << v->dqsbedge) % 15; \
1043 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1044 edges = 15; \
1045 if ((edges&1) && !s->mb_x) \
1046 mquant = v->altpq; \
1047 if ((edges&2) && s->first_slice_line) \
1048 mquant = v->altpq; \
1049 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1050 mquant = v->altpq; \
1051 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1052 mquant = v->altpq; \
1053 if (!mquant || mquant > 31) { \
1054 av_log(v->s.avctx, AV_LOG_ERROR, \
1055 "Overriding invalid mquant %d\n", mquant); \
1056 mquant = 1; \
1061 * @def GET_MVDATA(_dmv_x, _dmv_y)
1062 * @brief Get MV differentials
1063 * @see MVDATA decoding from 8.3.5.2, p(1)20
1064 * @param _dmv_x Horizontal differential for decoded MV
1065 * @param _dmv_y Vertical differential for decoded MV
1067 #define GET_MVDATA(_dmv_x, _dmv_y) \
1068 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1069 VC1_MV_DIFF_VLC_BITS, 2); \
1070 if (index > 36) { \
1071 mb_has_coeffs = 1; \
1072 index -= 37; \
1073 } else \
1074 mb_has_coeffs = 0; \
1075 s->mb_intra = 0; \
1076 if (!index) { \
1077 _dmv_x = _dmv_y = 0; \
1078 } else if (index == 35) { \
1079 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1080 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1081 } else if (index == 36) { \
1082 _dmv_x = 0; \
1083 _dmv_y = 0; \
1084 s->mb_intra = 1; \
1085 } else { \
1086 index1 = index % 6; \
1087 if (!s->quarter_sample && index1 == 5) val = 1; \
1088 else val = 0; \
1089 if (size_table[index1] - val > 0) \
1090 val = get_bits(gb, size_table[index1] - val); \
1091 else val = 0; \
1092 sign = 0 - (val&1); \
1093 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1095 index1 = index / 6; \
1096 if (!s->quarter_sample && index1 == 5) val = 1; \
1097 else val = 0; \
1098 if (size_table[index1] - val > 0) \
1099 val = get_bits(gb, size_table[index1] - val); \
1100 else val = 0; \
1101 sign = 0 - (val & 1); \
1102 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1105 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1106 int *dmv_y, int *pred_flag)
1108 int index, index1;
1109 int extend_x = 0, extend_y = 0;
1110 GetBitContext *gb = &v->s.gb;
1111 int bits, esc;
1112 int val, sign;
1113 const int* offs_tab;
1115 if (v->numref) {
1116 bits = VC1_2REF_MVDATA_VLC_BITS;
1117 esc = 125;
1118 } else {
1119 bits = VC1_1REF_MVDATA_VLC_BITS;
1120 esc = 71;
1122 switch (v->dmvrange) {
1123 case 1:
1124 extend_x = 1;
1125 break;
1126 case 2:
1127 extend_y = 1;
1128 break;
1129 case 3:
1130 extend_x = extend_y = 1;
1131 break;
1133 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1134 if (index == esc) {
1135 *dmv_x = get_bits(gb, v->k_x);
1136 *dmv_y = get_bits(gb, v->k_y);
1137 if (v->numref) {
1138 if (pred_flag) {
1139 *pred_flag = *dmv_y & 1;
1140 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1141 } else {
1142 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1146 else {
1147 if (extend_x)
1148 offs_tab = offset_table2;
1149 else
1150 offs_tab = offset_table1;
1151 index1 = (index + 1) % 9;
1152 if (index1 != 0) {
1153 val = get_bits(gb, index1 + extend_x);
1154 sign = 0 -(val & 1);
1155 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1156 } else
1157 *dmv_x = 0;
1158 if (extend_y)
1159 offs_tab = offset_table2;
1160 else
1161 offs_tab = offset_table1;
1162 index1 = (index + 1) / 9;
1163 if (index1 > v->numref) {
1164 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1165 sign = 0 - (val & 1);
1166 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1167 } else
1168 *dmv_y = 0;
1169 if (v->numref && pred_flag)
1170 *pred_flag = index1 & 1;
1174 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1176 int scaledvalue, refdist;
1177 int scalesame1, scalesame2;
1178 int scalezone1_x, zone1offset_x;
1179 int table_index = dir ^ v->second_field;
1181 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1182 refdist = v->refdist;
1183 else
1184 refdist = dir ? v->brfd : v->frfd;
1185 if (refdist > 3)
1186 refdist = 3;
1187 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1188 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1189 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1190 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1192 if (FFABS(n) > 255)
1193 scaledvalue = n;
1194 else {
1195 if (FFABS(n) < scalezone1_x)
1196 scaledvalue = (n * scalesame1) >> 8;
1197 else {
1198 if (n < 0)
1199 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1200 else
1201 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1204 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1207 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1209 int scaledvalue, refdist;
1210 int scalesame1, scalesame2;
1211 int scalezone1_y, zone1offset_y;
1212 int table_index = dir ^ v->second_field;
1214 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1215 refdist = v->refdist;
1216 else
1217 refdist = dir ? v->brfd : v->frfd;
1218 if (refdist > 3)
1219 refdist = 3;
1220 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1221 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1222 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1223 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1225 if (FFABS(n) > 63)
1226 scaledvalue = n;
1227 else {
1228 if (FFABS(n) < scalezone1_y)
1229 scaledvalue = (n * scalesame1) >> 8;
1230 else {
1231 if (n < 0)
1232 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1233 else
1234 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1238 if (v->cur_field_type && !v->ref_field_type[dir])
1239 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1240 else
1241 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1244 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1246 int scalezone1_x, zone1offset_x;
1247 int scaleopp1, scaleopp2, brfd;
1248 int scaledvalue;
1250 brfd = FFMIN(v->brfd, 3);
1251 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1252 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1253 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1254 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1256 if (FFABS(n) > 255)
1257 scaledvalue = n;
1258 else {
1259 if (FFABS(n) < scalezone1_x)
1260 scaledvalue = (n * scaleopp1) >> 8;
1261 else {
1262 if (n < 0)
1263 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1264 else
1265 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1268 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1271 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1273 int scalezone1_y, zone1offset_y;
1274 int scaleopp1, scaleopp2, brfd;
1275 int scaledvalue;
1277 brfd = FFMIN(v->brfd, 3);
1278 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1279 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1280 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1281 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1283 if (FFABS(n) > 63)
1284 scaledvalue = n;
1285 else {
1286 if (FFABS(n) < scalezone1_y)
1287 scaledvalue = (n * scaleopp1) >> 8;
1288 else {
1289 if (n < 0)
1290 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1291 else
1292 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1295 if (v->cur_field_type && !v->ref_field_type[dir]) {
1296 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1297 } else {
1298 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1302 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1303 int dim, int dir)
1305 int brfd, scalesame;
1306 int hpel = 1 - v->s.quarter_sample;
1308 n >>= hpel;
1309 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1310 if (dim)
1311 n = scaleforsame_y(v, i, n, dir) << hpel;
1312 else
1313 n = scaleforsame_x(v, n, dir) << hpel;
1314 return n;
1316 brfd = FFMIN(v->brfd, 3);
1317 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1319 n = (n * scalesame >> 8) << hpel;
1320 return n;
1323 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1324 int dim, int dir)
1326 int refdist, scaleopp;
1327 int hpel = 1 - v->s.quarter_sample;
1329 n >>= hpel;
1330 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1331 if (dim)
1332 n = scaleforopp_y(v, n, dir) << hpel;
1333 else
1334 n = scaleforopp_x(v, n) << hpel;
1335 return n;
1337 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1338 refdist = FFMIN(v->refdist, 3);
1339 else
1340 refdist = dir ? v->brfd : v->frfd;
1341 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1343 n = (n * scaleopp >> 8) << hpel;
1344 return n;
1347 /** Predict and set motion vector
1349 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1350 int mv1, int r_x, int r_y, uint8_t* is_intra,
1351 int pred_flag, int dir)
1353 MpegEncContext *s = &v->s;
1354 int xy, wrap, off = 0;
1355 int16_t *A, *B, *C;
1356 int px, py;
1357 int sum;
1358 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1359 int opposite, a_f, b_f, c_f;
1360 int16_t field_predA[2];
1361 int16_t field_predB[2];
1362 int16_t field_predC[2];
1363 int a_valid, b_valid, c_valid;
1364 int hybridmv_thresh, y_bias = 0;
1366 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1367 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1368 mixedmv_pic = 1;
1369 else
1370 mixedmv_pic = 0;
1371 /* scale MV difference to be quad-pel */
1372 dmv_x <<= 1 - s->quarter_sample;
1373 dmv_y <<= 1 - s->quarter_sample;
1375 wrap = s->b8_stride;
1376 xy = s->block_index[n];
1378 if (s->mb_intra) {
1379 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1380 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1381 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1382 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1383 if (mv1) { /* duplicate motion data for 1-MV block */
1384 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1385 s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1386 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1387 s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1388 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1389 s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1390 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1391 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1392 s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1393 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1394 s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1395 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1396 s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1398 return;
1401 C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1402 A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1403 if (mv1) {
1404 if (v->field_mode && mixedmv_pic)
1405 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1406 else
1407 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1408 } else {
1409 //in 4-MV mode different blocks have different B predictor position
1410 switch (n) {
1411 case 0:
1412 off = (s->mb_x > 0) ? -1 : 1;
1413 break;
1414 case 1:
1415 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1416 break;
1417 case 2:
1418 off = 1;
1419 break;
1420 case 3:
1421 off = -1;
1424 B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1426 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1427 b_valid = a_valid && (s->mb_width > 1);
1428 c_valid = s->mb_x || (n == 1 || n == 3);
1429 if (v->field_mode) {
1430 a_valid = a_valid && !is_intra[xy - wrap];
1431 b_valid = b_valid && !is_intra[xy - wrap + off];
1432 c_valid = c_valid && !is_intra[xy - 1];
1435 if (a_valid) {
1436 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1437 num_oppfield += a_f;
1438 num_samefield += 1 - a_f;
1439 field_predA[0] = A[0];
1440 field_predA[1] = A[1];
1441 } else {
1442 field_predA[0] = field_predA[1] = 0;
1443 a_f = 0;
1445 if (b_valid) {
1446 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1447 num_oppfield += b_f;
1448 num_samefield += 1 - b_f;
1449 field_predB[0] = B[0];
1450 field_predB[1] = B[1];
1451 } else {
1452 field_predB[0] = field_predB[1] = 0;
1453 b_f = 0;
1455 if (c_valid) {
1456 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1457 num_oppfield += c_f;
1458 num_samefield += 1 - c_f;
1459 field_predC[0] = C[0];
1460 field_predC[1] = C[1];
1461 } else {
1462 field_predC[0] = field_predC[1] = 0;
1463 c_f = 0;
1466 if (v->field_mode) {
1467 if (!v->numref)
1468 // REFFIELD determines if the last field or the second-last field is
1469 // to be used as reference
1470 opposite = 1 - v->reffield;
1471 else {
1472 if (num_samefield <= num_oppfield)
1473 opposite = 1 - pred_flag;
1474 else
1475 opposite = pred_flag;
1477 } else
1478 opposite = 0;
1479 if (opposite) {
1480 if (a_valid && !a_f) {
1481 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1482 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1484 if (b_valid && !b_f) {
1485 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1486 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1488 if (c_valid && !c_f) {
1489 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1490 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1492 v->mv_f[dir][xy + v->blocks_off] = 1;
1493 v->ref_field_type[dir] = !v->cur_field_type;
1494 } else {
1495 if (a_valid && a_f) {
1496 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1497 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1499 if (b_valid && b_f) {
1500 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1501 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1503 if (c_valid && c_f) {
1504 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1505 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1507 v->mv_f[dir][xy + v->blocks_off] = 0;
1508 v->ref_field_type[dir] = v->cur_field_type;
1511 if (a_valid) {
1512 px = field_predA[0];
1513 py = field_predA[1];
1514 } else if (c_valid) {
1515 px = field_predC[0];
1516 py = field_predC[1];
1517 } else if (b_valid) {
1518 px = field_predB[0];
1519 py = field_predB[1];
1520 } else {
1521 px = 0;
1522 py = 0;
1525 if (num_samefield + num_oppfield > 1) {
1526 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1527 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1530 /* Pullback MV as specified in 8.3.5.3.4 */
1531 if (!v->field_mode) {
1532 int qx, qy, X, Y;
1533 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1534 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1535 X = (s->mb_width << 6) - 4;
1536 Y = (s->mb_height << 6) - 4;
1537 if (mv1) {
1538 if (qx + px < -60) px = -60 - qx;
1539 if (qy + py < -60) py = -60 - qy;
1540 } else {
1541 if (qx + px < -28) px = -28 - qx;
1542 if (qy + py < -28) py = -28 - qy;
1544 if (qx + px > X) px = X - qx;
1545 if (qy + py > Y) py = Y - qy;
1548 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1549 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1550 hybridmv_thresh = 32;
1551 if (a_valid && c_valid) {
1552 if (is_intra[xy - wrap])
1553 sum = FFABS(px) + FFABS(py);
1554 else
1555 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1556 if (sum > hybridmv_thresh) {
1557 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1558 px = field_predA[0];
1559 py = field_predA[1];
1560 } else {
1561 px = field_predC[0];
1562 py = field_predC[1];
1564 } else {
1565 if (is_intra[xy - 1])
1566 sum = FFABS(px) + FFABS(py);
1567 else
1568 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1569 if (sum > hybridmv_thresh) {
1570 if (get_bits1(&s->gb)) {
1571 px = field_predA[0];
1572 py = field_predA[1];
1573 } else {
1574 px = field_predC[0];
1575 py = field_predC[1];
1582 if (v->field_mode && v->numref)
1583 r_y >>= 1;
1584 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1585 y_bias = 1;
1586 /* store MV using signed modulus of MV range defined in 4.11 */
1587 s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1588 s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1589 if (mv1) { /* duplicate motion data for 1-MV block */
1590 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1591 s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1592 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1593 s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1594 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1595 s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1596 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1597 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1601 /** Predict and set motion vector for interlaced frame picture MBs
1603 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1604 int mvn, int r_x, int r_y, uint8_t* is_intra)
1606 MpegEncContext *s = &v->s;
1607 int xy, wrap, off = 0;
1608 int A[2], B[2], C[2];
1609 int px, py;
1610 int a_valid = 0, b_valid = 0, c_valid = 0;
1611 int field_a, field_b, field_c; // 0: same, 1: opposit
1612 int total_valid, num_samefield, num_oppfield;
1613 int pos_c, pos_b, n_adj;
1615 wrap = s->b8_stride;
1616 xy = s->block_index[n];
1618 if (s->mb_intra) {
1619 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1620 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1621 s->current_picture.f.motion_val[1][xy][0] = 0;
1622 s->current_picture.f.motion_val[1][xy][1] = 0;
1623 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1624 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1625 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1626 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1627 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1628 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1629 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1630 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1631 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1632 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1633 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1634 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1635 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1636 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1638 return;
1641 off = ((n == 0) || (n == 1)) ? 1 : -1;
1642 /* predict A */
1643 if (s->mb_x || (n == 1) || (n == 3)) {
1644 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1645 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1646 A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1647 A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1648 a_valid = 1;
1649 } else { // current block has frame mv and cand. has field MV (so average)
1650 A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1651 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1652 A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1653 + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1654 a_valid = 1;
1656 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1657 a_valid = 0;
1658 A[0] = A[1] = 0;
1660 } else
1661 A[0] = A[1] = 0;
1662 /* Predict B and C */
1663 B[0] = B[1] = C[0] = C[1] = 0;
1664 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1665 if (!s->first_slice_line) {
1666 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1667 b_valid = 1;
1668 n_adj = n | 2;
1669 pos_b = s->block_index[n_adj] - 2 * wrap;
1670 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1671 n_adj = (n & 2) | (n & 1);
1673 B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1674 B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1675 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1676 B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1677 B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1680 if (s->mb_width > 1) {
1681 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1682 c_valid = 1;
1683 n_adj = 2;
1684 pos_c = s->block_index[2] - 2 * wrap + 2;
1685 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1686 n_adj = n & 2;
1688 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1689 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1690 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1691 C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1692 C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1694 if (s->mb_x == s->mb_width - 1) {
1695 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1696 c_valid = 1;
1697 n_adj = 3;
1698 pos_c = s->block_index[3] - 2 * wrap - 2;
1699 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1700 n_adj = n | 1;
1702 C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1703 C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1704 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1705 C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1706 C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1708 } else
1709 c_valid = 0;
1714 } else {
1715 pos_b = s->block_index[1];
1716 b_valid = 1;
1717 B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1718 B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1719 pos_c = s->block_index[0];
1720 c_valid = 1;
1721 C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1722 C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1725 total_valid = a_valid + b_valid + c_valid;
1726 // check if predictor A is out of bounds
1727 if (!s->mb_x && !(n == 1 || n == 3)) {
1728 A[0] = A[1] = 0;
1730 // check if predictor B is out of bounds
1731 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1732 B[0] = B[1] = C[0] = C[1] = 0;
1734 if (!v->blk_mv_type[xy]) {
1735 if (s->mb_width == 1) {
1736 px = B[0];
1737 py = B[1];
1738 } else {
1739 if (total_valid >= 2) {
1740 px = mid_pred(A[0], B[0], C[0]);
1741 py = mid_pred(A[1], B[1], C[1]);
1742 } else if (total_valid) {
1743 if (a_valid) { px = A[0]; py = A[1]; }
1744 if (b_valid) { px = B[0]; py = B[1]; }
1745 if (c_valid) { px = C[0]; py = C[1]; }
1746 } else
1747 px = py = 0;
1749 } else {
1750 if (a_valid)
1751 field_a = (A[1] & 4) ? 1 : 0;
1752 else
1753 field_a = 0;
1754 if (b_valid)
1755 field_b = (B[1] & 4) ? 1 : 0;
1756 else
1757 field_b = 0;
1758 if (c_valid)
1759 field_c = (C[1] & 4) ? 1 : 0;
1760 else
1761 field_c = 0;
1763 num_oppfield = field_a + field_b + field_c;
1764 num_samefield = total_valid - num_oppfield;
1765 if (total_valid == 3) {
1766 if ((num_samefield == 3) || (num_oppfield == 3)) {
1767 px = mid_pred(A[0], B[0], C[0]);
1768 py = mid_pred(A[1], B[1], C[1]);
1769 } else if (num_samefield >= num_oppfield) {
1770 /* take one MV from same field set depending on priority
1771 the check for B may not be necessary */
1772 px = !field_a ? A[0] : B[0];
1773 py = !field_a ? A[1] : B[1];
1774 } else {
1775 px = field_a ? A[0] : B[0];
1776 py = field_a ? A[1] : B[1];
1778 } else if (total_valid == 2) {
1779 if (num_samefield >= num_oppfield) {
1780 if (!field_a && a_valid) {
1781 px = A[0];
1782 py = A[1];
1783 } else if (!field_b && b_valid) {
1784 px = B[0];
1785 py = B[1];
1786 } else if (c_valid) {
1787 px = C[0];
1788 py = C[1];
1789 } else px = py = 0;
1790 } else {
1791 if (field_a && a_valid) {
1792 px = A[0];
1793 py = A[1];
1794 } else if (field_b && b_valid) {
1795 px = B[0];
1796 py = B[1];
1797 } else if (c_valid) {
1798 px = C[0];
1799 py = C[1];
1802 } else if (total_valid == 1) {
1803 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1804 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1805 } else
1806 px = py = 0;
1809 /* store MV using signed modulus of MV range defined in 4.11 */
1810 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1811 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1812 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1813 s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1814 s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1815 s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1816 s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1817 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1818 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1819 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1820 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1821 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1822 s->mv[0][n + 1][0] = s->mv[0][n][0];
1823 s->mv[0][n + 1][1] = s->mv[0][n][1];
1827 /** Motion compensation for direct or interpolated blocks in B-frames
1829 static void vc1_interp_mc(VC1Context *v)
1831 MpegEncContext *s = &v->s;
1832 DSPContext *dsp = &v->s.dsp;
1833 H264ChromaContext *h264chroma = &v->h264chroma;
1834 uint8_t *srcY, *srcU, *srcV;
1835 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1836 int off, off_uv;
1837 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1839 if (!v->field_mode && !v->s.next_picture.f.data[0])
1840 return;
1842 mx = s->mv[1][0][0];
1843 my = s->mv[1][0][1];
1844 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1845 uvmy = (my + ((my & 3) == 3)) >> 1;
1846 if (v->field_mode) {
1847 if (v->cur_field_type != v->ref_field_type[1])
1848 my = my - 2 + 4 * v->cur_field_type;
1849 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1851 if (v->fastuvmc) {
1852 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1853 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1855 srcY = s->next_picture.f.data[0];
1856 srcU = s->next_picture.f.data[1];
1857 srcV = s->next_picture.f.data[2];
1859 src_x = s->mb_x * 16 + (mx >> 2);
1860 src_y = s->mb_y * 16 + (my >> 2);
1861 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1862 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1864 if (v->profile != PROFILE_ADVANCED) {
1865 src_x = av_clip( src_x, -16, s->mb_width * 16);
1866 src_y = av_clip( src_y, -16, s->mb_height * 16);
1867 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1868 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1869 } else {
1870 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1871 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1872 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1873 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1876 srcY += src_y * s->linesize + src_x;
1877 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1878 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1880 if (v->field_mode && v->ref_field_type[1]) {
1881 srcY += s->current_picture_ptr->f.linesize[0];
1882 srcU += s->current_picture_ptr->f.linesize[1];
1883 srcV += s->current_picture_ptr->f.linesize[2];
1886 /* for grayscale we should not try to read from unknown area */
1887 if (s->flags & CODEC_FLAG_GRAY) {
1888 srcU = s->edge_emu_buffer + 18 * s->linesize;
1889 srcV = s->edge_emu_buffer + 18 * s->linesize;
1892 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
1893 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1894 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1895 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1897 srcY -= s->mspel * (1 + s->linesize);
1898 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
1899 17 + s->mspel * 2, 17 + s->mspel * 2,
1900 src_x - s->mspel, src_y - s->mspel,
1901 s->h_edge_pos, v_edge_pos);
1902 srcY = s->edge_emu_buffer;
1903 s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
1904 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1905 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
1906 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1907 srcU = uvbuf;
1908 srcV = uvbuf + 16;
1909 /* if we deal with range reduction we need to scale source blocks */
1910 if (v->rangeredfrm) {
1911 int i, j;
1912 uint8_t *src, *src2;
1914 src = srcY;
1915 for (j = 0; j < 17 + s->mspel * 2; j++) {
1916 for (i = 0; i < 17 + s->mspel * 2; i++)
1917 src[i] = ((src[i] - 128) >> 1) + 128;
1918 src += s->linesize;
1920 src = srcU;
1921 src2 = srcV;
1922 for (j = 0; j < 9; j++) {
1923 for (i = 0; i < 9; i++) {
1924 src[i] = ((src[i] - 128) >> 1) + 128;
1925 src2[i] = ((src2[i] - 128) >> 1) + 128;
1927 src += s->uvlinesize;
1928 src2 += s->uvlinesize;
1931 srcY += s->mspel * (1 + s->linesize);
1934 if (v->field_mode && v->cur_field_type) {
1935 off = s->current_picture_ptr->f.linesize[0];
1936 off_uv = s->current_picture_ptr->f.linesize[1];
1937 } else {
1938 off = 0;
1939 off_uv = 0;
1942 if (s->mspel) {
1943 dxy = ((my & 3) << 2) | (mx & 3);
1944 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
1945 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
1946 srcY += s->linesize * 8;
1947 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
1948 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
1949 } else { // hpel mc
1950 dxy = (my & 2) | ((mx & 2) >> 1);
1952 if (!v->rnd)
1953 dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1954 else
1955 dsp->avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
1958 if (s->flags & CODEC_FLAG_GRAY) return;
1959 /* Chroma MC always uses qpel blilinear */
1960 uvmx = (uvmx & 3) << 1;
1961 uvmy = (uvmy & 3) << 1;
1962 if (!v->rnd) {
1963 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1964 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1965 } else {
1966 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1967 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1971 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1973 int n = bfrac;
1975 #if B_FRACTION_DEN==256
1976 if (inv)
1977 n -= 256;
1978 if (!qs)
1979 return 2 * ((value * n + 255) >> 9);
1980 return (value * n + 128) >> 8;
1981 #else
1982 if (inv)
1983 n -= B_FRACTION_DEN;
1984 if (!qs)
1985 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
1986 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
1987 #endif
1990 /** Reconstruct motion vector for B-frame and do motion compensation
1992 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
1993 int direct, int mode)
1995 if (v->use_ic) {
1996 v->mv_mode2 = v->mv_mode;
1997 v->mv_mode = MV_PMODE_INTENSITY_COMP;
1999 if (direct) {
2000 vc1_mc_1mv(v, 0);
2001 vc1_interp_mc(v);
2002 if (v->use_ic)
2003 v->mv_mode = v->mv_mode2;
2004 return;
2006 if (mode == BMV_TYPE_INTERPOLATED) {
2007 vc1_mc_1mv(v, 0);
2008 vc1_interp_mc(v);
2009 if (v->use_ic)
2010 v->mv_mode = v->mv_mode2;
2011 return;
2014 if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2015 v->mv_mode = v->mv_mode2;
2016 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2017 if (v->use_ic)
2018 v->mv_mode = v->mv_mode2;
2021 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2022 int direct, int mvtype)
2024 MpegEncContext *s = &v->s;
2025 int xy, wrap, off = 0;
2026 int16_t *A, *B, *C;
2027 int px, py;
2028 int sum;
2029 int r_x, r_y;
2030 const uint8_t *is_intra = v->mb_type[0];
2032 r_x = v->range_x;
2033 r_y = v->range_y;
2034 /* scale MV difference to be quad-pel */
2035 dmv_x[0] <<= 1 - s->quarter_sample;
2036 dmv_y[0] <<= 1 - s->quarter_sample;
2037 dmv_x[1] <<= 1 - s->quarter_sample;
2038 dmv_y[1] <<= 1 - s->quarter_sample;
2040 wrap = s->b8_stride;
2041 xy = s->block_index[0];
2043 if (s->mb_intra) {
2044 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2045 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2046 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2047 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2048 return;
2050 if (!v->field_mode) {
2051 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2052 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2053 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2054 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2056 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2057 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2058 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2059 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2060 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2062 if (direct) {
2063 s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2064 s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2065 s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2066 s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2067 return;
2070 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2071 C = s->current_picture.f.motion_val[0][xy - 2];
2072 A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2073 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2074 B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2076 if (!s->mb_x) C[0] = C[1] = 0;
2077 if (!s->first_slice_line) { // predictor A is not out of bounds
2078 if (s->mb_width == 1) {
2079 px = A[0];
2080 py = A[1];
2081 } else {
2082 px = mid_pred(A[0], B[0], C[0]);
2083 py = mid_pred(A[1], B[1], C[1]);
2085 } else if (s->mb_x) { // predictor C is not out of bounds
2086 px = C[0];
2087 py = C[1];
2088 } else {
2089 px = py = 0;
2091 /* Pullback MV as specified in 8.3.5.3.4 */
2093 int qx, qy, X, Y;
2094 if (v->profile < PROFILE_ADVANCED) {
2095 qx = (s->mb_x << 5);
2096 qy = (s->mb_y << 5);
2097 X = (s->mb_width << 5) - 4;
2098 Y = (s->mb_height << 5) - 4;
2099 if (qx + px < -28) px = -28 - qx;
2100 if (qy + py < -28) py = -28 - qy;
2101 if (qx + px > X) px = X - qx;
2102 if (qy + py > Y) py = Y - qy;
2103 } else {
2104 qx = (s->mb_x << 6);
2105 qy = (s->mb_y << 6);
2106 X = (s->mb_width << 6) - 4;
2107 Y = (s->mb_height << 6) - 4;
2108 if (qx + px < -60) px = -60 - qx;
2109 if (qy + py < -60) py = -60 - qy;
2110 if (qx + px > X) px = X - qx;
2111 if (qy + py > Y) py = Y - qy;
2114 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2115 if (0 && !s->first_slice_line && s->mb_x) {
2116 if (is_intra[xy - wrap])
2117 sum = FFABS(px) + FFABS(py);
2118 else
2119 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2120 if (sum > 32) {
2121 if (get_bits1(&s->gb)) {
2122 px = A[0];
2123 py = A[1];
2124 } else {
2125 px = C[0];
2126 py = C[1];
2128 } else {
2129 if (is_intra[xy - 2])
2130 sum = FFABS(px) + FFABS(py);
2131 else
2132 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2133 if (sum > 32) {
2134 if (get_bits1(&s->gb)) {
2135 px = A[0];
2136 py = A[1];
2137 } else {
2138 px = C[0];
2139 py = C[1];
2144 /* store MV using signed modulus of MV range defined in 4.11 */
2145 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2146 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2148 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2149 C = s->current_picture.f.motion_val[1][xy - 2];
2150 A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2151 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2152 B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2154 if (!s->mb_x)
2155 C[0] = C[1] = 0;
2156 if (!s->first_slice_line) { // predictor A is not out of bounds
2157 if (s->mb_width == 1) {
2158 px = A[0];
2159 py = A[1];
2160 } else {
2161 px = mid_pred(A[0], B[0], C[0]);
2162 py = mid_pred(A[1], B[1], C[1]);
2164 } else if (s->mb_x) { // predictor C is not out of bounds
2165 px = C[0];
2166 py = C[1];
2167 } else {
2168 px = py = 0;
2170 /* Pullback MV as specified in 8.3.5.3.4 */
2172 int qx, qy, X, Y;
2173 if (v->profile < PROFILE_ADVANCED) {
2174 qx = (s->mb_x << 5);
2175 qy = (s->mb_y << 5);
2176 X = (s->mb_width << 5) - 4;
2177 Y = (s->mb_height << 5) - 4;
2178 if (qx + px < -28) px = -28 - qx;
2179 if (qy + py < -28) py = -28 - qy;
2180 if (qx + px > X) px = X - qx;
2181 if (qy + py > Y) py = Y - qy;
2182 } else {
2183 qx = (s->mb_x << 6);
2184 qy = (s->mb_y << 6);
2185 X = (s->mb_width << 6) - 4;
2186 Y = (s->mb_height << 6) - 4;
2187 if (qx + px < -60) px = -60 - qx;
2188 if (qy + py < -60) py = -60 - qy;
2189 if (qx + px > X) px = X - qx;
2190 if (qy + py > Y) py = Y - qy;
2193 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2194 if (0 && !s->first_slice_line && s->mb_x) {
2195 if (is_intra[xy - wrap])
2196 sum = FFABS(px) + FFABS(py);
2197 else
2198 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2199 if (sum > 32) {
2200 if (get_bits1(&s->gb)) {
2201 px = A[0];
2202 py = A[1];
2203 } else {
2204 px = C[0];
2205 py = C[1];
2207 } else {
2208 if (is_intra[xy - 2])
2209 sum = FFABS(px) + FFABS(py);
2210 else
2211 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2212 if (sum > 32) {
2213 if (get_bits1(&s->gb)) {
2214 px = A[0];
2215 py = A[1];
2216 } else {
2217 px = C[0];
2218 py = C[1];
2223 /* store MV using signed modulus of MV range defined in 4.11 */
2225 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2226 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2228 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2229 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2230 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2231 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2234 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2236 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2237 MpegEncContext *s = &v->s;
2238 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2240 if (v->bmvtype == BMV_TYPE_DIRECT) {
2241 int total_opp, k, f;
2242 if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2243 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2244 v->bfraction, 0, s->quarter_sample);
2245 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2246 v->bfraction, 0, s->quarter_sample);
2247 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2248 v->bfraction, 1, s->quarter_sample);
2249 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2250 v->bfraction, 1, s->quarter_sample);
2252 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2253 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2254 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2255 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2256 f = (total_opp > 2) ? 1 : 0;
2257 } else {
2258 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2259 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2260 f = 0;
2262 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2263 for (k = 0; k < 4; k++) {
2264 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2265 s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2266 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2267 s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2268 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2269 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2271 return;
2273 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2274 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2275 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2276 return;
2278 if (dir) { // backward
2279 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2280 if (n == 3 || mv1) {
2281 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2283 } else { // forward
2284 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2285 if (n == 3 || mv1) {
2286 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2291 /** Get predicted DC value for I-frames only
2292 * prediction dir: left=0, top=1
2293 * @param s MpegEncContext
2294 * @param overlap flag indicating that overlap filtering is used
2295 * @param pq integer part of picture quantizer
2296 * @param[in] n block index in the current MB
2297 * @param dc_val_ptr Pointer to DC predictor
2298 * @param dir_ptr Prediction direction for use in AC prediction
2300 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2301 int16_t **dc_val_ptr, int *dir_ptr)
2303 int a, b, c, wrap, pred, scale;
2304 int16_t *dc_val;
2305 static const uint16_t dcpred[32] = {
2306 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2307 114, 102, 93, 85, 79, 73, 68, 64,
2308 60, 57, 54, 51, 49, 47, 45, 43,
2309 41, 39, 38, 37, 35, 34, 33
2312 /* find prediction - wmv3_dc_scale always used here in fact */
2313 if (n < 4) scale = s->y_dc_scale;
2314 else scale = s->c_dc_scale;
2316 wrap = s->block_wrap[n];
2317 dc_val = s->dc_val[0] + s->block_index[n];
2319 /* B A
2320 * C X
2322 c = dc_val[ - 1];
2323 b = dc_val[ - 1 - wrap];
2324 a = dc_val[ - wrap];
2326 if (pq < 9 || !overlap) {
2327 /* Set outer values */
2328 if (s->first_slice_line && (n != 2 && n != 3))
2329 b = a = dcpred[scale];
2330 if (s->mb_x == 0 && (n != 1 && n != 3))
2331 b = c = dcpred[scale];
2332 } else {
2333 /* Set outer values */
2334 if (s->first_slice_line && (n != 2 && n != 3))
2335 b = a = 0;
2336 if (s->mb_x == 0 && (n != 1 && n != 3))
2337 b = c = 0;
2340 if (abs(a - b) <= abs(b - c)) {
2341 pred = c;
2342 *dir_ptr = 1; // left
2343 } else {
2344 pred = a;
2345 *dir_ptr = 0; // top
2348 /* update predictor */
2349 *dc_val_ptr = &dc_val[0];
2350 return pred;
2354 /** Get predicted DC value
2355 * prediction dir: left=0, top=1
2356 * @param s MpegEncContext
2357 * @param overlap flag indicating that overlap filtering is used
2358 * @param pq integer part of picture quantizer
2359 * @param[in] n block index in the current MB
2360 * @param a_avail flag indicating top block availability
2361 * @param c_avail flag indicating left block availability
2362 * @param dc_val_ptr Pointer to DC predictor
2363 * @param dir_ptr Prediction direction for use in AC prediction
2365 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2366 int a_avail, int c_avail,
2367 int16_t **dc_val_ptr, int *dir_ptr)
2369 int a, b, c, wrap, pred;
2370 int16_t *dc_val;
2371 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2372 int q1, q2 = 0;
2373 int dqscale_index;
2375 wrap = s->block_wrap[n];
2376 dc_val = s->dc_val[0] + s->block_index[n];
2378 /* B A
2379 * C X
2381 c = dc_val[ - 1];
2382 b = dc_val[ - 1 - wrap];
2383 a = dc_val[ - wrap];
2384 /* scale predictors if needed */
2385 q1 = s->current_picture.f.qscale_table[mb_pos];
2386 dqscale_index = s->y_dc_scale_table[q1] - 1;
2387 if (dqscale_index < 0)
2388 return 0;
2389 if (c_avail && (n != 1 && n != 3)) {
2390 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2391 if (q2 && q2 != q1)
2392 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2394 if (a_avail && (n != 2 && n != 3)) {
2395 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2396 if (q2 && q2 != q1)
2397 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2399 if (a_avail && c_avail && (n != 3)) {
2400 int off = mb_pos;
2401 if (n != 1)
2402 off--;
2403 if (n != 2)
2404 off -= s->mb_stride;
2405 q2 = s->current_picture.f.qscale_table[off];
2406 if (q2 && q2 != q1)
2407 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2410 if (a_avail && c_avail) {
2411 if (abs(a - b) <= abs(b - c)) {
2412 pred = c;
2413 *dir_ptr = 1; // left
2414 } else {
2415 pred = a;
2416 *dir_ptr = 0; // top
2418 } else if (a_avail) {
2419 pred = a;
2420 *dir_ptr = 0; // top
2421 } else if (c_avail) {
2422 pred = c;
2423 *dir_ptr = 1; // left
2424 } else {
2425 pred = 0;
2426 *dir_ptr = 1; // left
2429 /* update predictor */
2430 *dc_val_ptr = &dc_val[0];
2431 return pred;
2434 /** @} */ // Block group
2437 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2438 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2439 * @{
2442 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2443 uint8_t **coded_block_ptr)
2445 int xy, wrap, pred, a, b, c;
2447 xy = s->block_index[n];
2448 wrap = s->b8_stride;
2450 /* B C
2451 * A X
2453 a = s->coded_block[xy - 1 ];
2454 b = s->coded_block[xy - 1 - wrap];
2455 c = s->coded_block[xy - wrap];
2457 if (b == c) {
2458 pred = a;
2459 } else {
2460 pred = c;
2463 /* store value */
2464 *coded_block_ptr = &s->coded_block[xy];
2466 return pred;
2470 * Decode one AC coefficient
2471 * @param v The VC1 context
2472 * @param last Last coefficient
2473 * @param skip How much zero coefficients to skip
2474 * @param value Decoded AC coefficient value
2475 * @param codingset set of VLC to decode data
2476 * @see 8.1.3.4
2478 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2479 int *value, int codingset)
2481 GetBitContext *gb = &v->s.gb;
2482 int index, escape, run = 0, level = 0, lst = 0;
2484 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2485 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2486 run = vc1_index_decode_table[codingset][index][0];
2487 level = vc1_index_decode_table[codingset][index][1];
2488 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2489 if (get_bits1(gb))
2490 level = -level;
2491 } else {
2492 escape = decode210(gb);
2493 if (escape != 2) {
2494 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2495 run = vc1_index_decode_table[codingset][index][0];
2496 level = vc1_index_decode_table[codingset][index][1];
2497 lst = index >= vc1_last_decode_table[codingset];
2498 if (escape == 0) {
2499 if (lst)
2500 level += vc1_last_delta_level_table[codingset][run];
2501 else
2502 level += vc1_delta_level_table[codingset][run];
2503 } else {
2504 if (lst)
2505 run += vc1_last_delta_run_table[codingset][level] + 1;
2506 else
2507 run += vc1_delta_run_table[codingset][level] + 1;
2509 if (get_bits1(gb))
2510 level = -level;
2511 } else {
2512 int sign;
2513 lst = get_bits1(gb);
2514 if (v->s.esc3_level_length == 0) {
2515 if (v->pq < 8 || v->dquantfrm) { // table 59
2516 v->s.esc3_level_length = get_bits(gb, 3);
2517 if (!v->s.esc3_level_length)
2518 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2519 } else { // table 60
2520 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2522 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2524 run = get_bits(gb, v->s.esc3_run_length);
2525 sign = get_bits1(gb);
2526 level = get_bits(gb, v->s.esc3_level_length);
2527 if (sign)
2528 level = -level;
2532 *last = lst;
2533 *skip = run;
2534 *value = level;
2537 /** Decode intra block in intra frames - should be faster than decode_intra_block
2538 * @param v VC1Context
2539 * @param block block to decode
2540 * @param[in] n subblock index
2541 * @param coded are AC coeffs present or not
2542 * @param codingset set of VLC to decode data
2544 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2545 int coded, int codingset)
2547 GetBitContext *gb = &v->s.gb;
2548 MpegEncContext *s = &v->s;
2549 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2550 int i;
2551 int16_t *dc_val;
2552 int16_t *ac_val, *ac_val2;
2553 int dcdiff;
2555 /* Get DC differential */
2556 if (n < 4) {
2557 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2558 } else {
2559 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2561 if (dcdiff < 0) {
2562 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2563 return -1;
2565 if (dcdiff) {
2566 if (dcdiff == 119 /* ESC index value */) {
2567 /* TODO: Optimize */
2568 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2569 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2570 else dcdiff = get_bits(gb, 8);
2571 } else {
2572 if (v->pq == 1)
2573 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2574 else if (v->pq == 2)
2575 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2577 if (get_bits1(gb))
2578 dcdiff = -dcdiff;
2581 /* Prediction */
2582 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2583 *dc_val = dcdiff;
2585 /* Store the quantized DC coeff, used for prediction */
2586 if (n < 4) {
2587 block[0] = dcdiff * s->y_dc_scale;
2588 } else {
2589 block[0] = dcdiff * s->c_dc_scale;
2591 /* Skip ? */
2592 if (!coded) {
2593 goto not_coded;
2596 // AC Decoding
2597 i = 1;
2600 int last = 0, skip, value;
2601 const uint8_t *zz_table;
2602 int scale;
2603 int k;
2605 scale = v->pq * 2 + v->halfpq;
2607 if (v->s.ac_pred) {
2608 if (!dc_pred_dir)
2609 zz_table = v->zz_8x8[2];
2610 else
2611 zz_table = v->zz_8x8[3];
2612 } else
2613 zz_table = v->zz_8x8[1];
2615 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2616 ac_val2 = ac_val;
2617 if (dc_pred_dir) // left
2618 ac_val -= 16;
2619 else // top
2620 ac_val -= 16 * s->block_wrap[n];
2622 while (!last) {
2623 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2624 i += skip;
2625 if (i > 63)
2626 break;
2627 block[zz_table[i++]] = value;
2630 /* apply AC prediction if needed */
2631 if (s->ac_pred) {
2632 if (dc_pred_dir) { // left
2633 for (k = 1; k < 8; k++)
2634 block[k << v->left_blk_sh] += ac_val[k];
2635 } else { // top
2636 for (k = 1; k < 8; k++)
2637 block[k << v->top_blk_sh] += ac_val[k + 8];
2640 /* save AC coeffs for further prediction */
2641 for (k = 1; k < 8; k++) {
2642 ac_val2[k] = block[k << v->left_blk_sh];
2643 ac_val2[k + 8] = block[k << v->top_blk_sh];
2646 /* scale AC coeffs */
2647 for (k = 1; k < 64; k++)
2648 if (block[k]) {
2649 block[k] *= scale;
2650 if (!v->pquantizer)
2651 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2654 if (s->ac_pred) i = 63;
2657 not_coded:
2658 if (!coded) {
2659 int k, scale;
2660 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2661 ac_val2 = ac_val;
2663 i = 0;
2664 scale = v->pq * 2 + v->halfpq;
2665 memset(ac_val2, 0, 16 * 2);
2666 if (dc_pred_dir) { // left
2667 ac_val -= 16;
2668 if (s->ac_pred)
2669 memcpy(ac_val2, ac_val, 8 * 2);
2670 } else { // top
2671 ac_val -= 16 * s->block_wrap[n];
2672 if (s->ac_pred)
2673 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2676 /* apply AC prediction if needed */
2677 if (s->ac_pred) {
2678 if (dc_pred_dir) { //left
2679 for (k = 1; k < 8; k++) {
2680 block[k << v->left_blk_sh] = ac_val[k] * scale;
2681 if (!v->pquantizer && block[k << v->left_blk_sh])
2682 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2684 } else { // top
2685 for (k = 1; k < 8; k++) {
2686 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2687 if (!v->pquantizer && block[k << v->top_blk_sh])
2688 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2691 i = 63;
2694 s->block_last_index[n] = i;
2696 return 0;
2699 /** Decode intra block in intra frames - should be faster than decode_intra_block
2700 * @param v VC1Context
2701 * @param block block to decode
2702 * @param[in] n subblock number
2703 * @param coded are AC coeffs present or not
2704 * @param codingset set of VLC to decode data
2705 * @param mquant quantizer value for this macroblock
2707 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2708 int coded, int codingset, int mquant)
2710 GetBitContext *gb = &v->s.gb;
2711 MpegEncContext *s = &v->s;
2712 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2713 int i;
2714 int16_t *dc_val;
2715 int16_t *ac_val, *ac_val2;
2716 int dcdiff;
2717 int a_avail = v->a_avail, c_avail = v->c_avail;
2718 int use_pred = s->ac_pred;
2719 int scale;
2720 int q1, q2 = 0;
2721 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2723 /* Get DC differential */
2724 if (n < 4) {
2725 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2726 } else {
2727 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2729 if (dcdiff < 0) {
2730 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2731 return -1;
2733 if (dcdiff) {
2734 if (dcdiff == 119 /* ESC index value */) {
2735 /* TODO: Optimize */
2736 if (mquant == 1) dcdiff = get_bits(gb, 10);
2737 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2738 else dcdiff = get_bits(gb, 8);
2739 } else {
2740 if (mquant == 1)
2741 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2742 else if (mquant == 2)
2743 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2745 if (get_bits1(gb))
2746 dcdiff = -dcdiff;
2749 /* Prediction */
2750 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2751 *dc_val = dcdiff;
2753 /* Store the quantized DC coeff, used for prediction */
2754 if (n < 4) {
2755 block[0] = dcdiff * s->y_dc_scale;
2756 } else {
2757 block[0] = dcdiff * s->c_dc_scale;
2760 //AC Decoding
2761 i = 1;
2763 /* check if AC is needed at all */
2764 if (!a_avail && !c_avail)
2765 use_pred = 0;
2766 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2767 ac_val2 = ac_val;
2769 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2771 if (dc_pred_dir) // left
2772 ac_val -= 16;
2773 else // top
2774 ac_val -= 16 * s->block_wrap[n];
2776 q1 = s->current_picture.f.qscale_table[mb_pos];
2777 if ( dc_pred_dir && c_avail && mb_pos)
2778 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2779 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2780 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2781 if ( dc_pred_dir && n == 1)
2782 q2 = q1;
2783 if (!dc_pred_dir && n == 2)
2784 q2 = q1;
2785 if (n == 3)
2786 q2 = q1;
2788 if (coded) {
2789 int last = 0, skip, value;
2790 const uint8_t *zz_table;
2791 int k;
2793 if (v->s.ac_pred) {
2794 if (!use_pred && v->fcm == ILACE_FRAME) {
2795 zz_table = v->zzi_8x8;
2796 } else {
2797 if (!dc_pred_dir) // top
2798 zz_table = v->zz_8x8[2];
2799 else // left
2800 zz_table = v->zz_8x8[3];
2802 } else {
2803 if (v->fcm != ILACE_FRAME)
2804 zz_table = v->zz_8x8[1];
2805 else
2806 zz_table = v->zzi_8x8;
2809 while (!last) {
2810 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2811 i += skip;
2812 if (i > 63)
2813 break;
2814 block[zz_table[i++]] = value;
2817 /* apply AC prediction if needed */
2818 if (use_pred) {
2819 /* scale predictors if needed*/
2820 if (q2 && q1 != q2) {
2821 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2822 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2824 if (q1 < 1)
2825 return AVERROR_INVALIDDATA;
2826 if (dc_pred_dir) { // left
2827 for (k = 1; k < 8; k++)
2828 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2829 } else { // top
2830 for (k = 1; k < 8; k++)
2831 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2833 } else {
2834 if (dc_pred_dir) { //left
2835 for (k = 1; k < 8; k++)
2836 block[k << v->left_blk_sh] += ac_val[k];
2837 } else { //top
2838 for (k = 1; k < 8; k++)
2839 block[k << v->top_blk_sh] += ac_val[k + 8];
2843 /* save AC coeffs for further prediction */
2844 for (k = 1; k < 8; k++) {
2845 ac_val2[k ] = block[k << v->left_blk_sh];
2846 ac_val2[k + 8] = block[k << v->top_blk_sh];
2849 /* scale AC coeffs */
2850 for (k = 1; k < 64; k++)
2851 if (block[k]) {
2852 block[k] *= scale;
2853 if (!v->pquantizer)
2854 block[k] += (block[k] < 0) ? -mquant : mquant;
2857 if (use_pred) i = 63;
2858 } else { // no AC coeffs
2859 int k;
2861 memset(ac_val2, 0, 16 * 2);
2862 if (dc_pred_dir) { // left
2863 if (use_pred) {
2864 memcpy(ac_val2, ac_val, 8 * 2);
2865 if (q2 && q1 != q2) {
2866 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2867 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2868 if (q1 < 1)
2869 return AVERROR_INVALIDDATA;
2870 for (k = 1; k < 8; k++)
2871 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2874 } else { // top
2875 if (use_pred) {
2876 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2877 if (q2 && q1 != q2) {
2878 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2879 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2880 if (q1 < 1)
2881 return AVERROR_INVALIDDATA;
2882 for (k = 1; k < 8; k++)
2883 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2888 /* apply AC prediction if needed */
2889 if (use_pred) {
2890 if (dc_pred_dir) { // left
2891 for (k = 1; k < 8; k++) {
2892 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2893 if (!v->pquantizer && block[k << v->left_blk_sh])
2894 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2896 } else { // top
2897 for (k = 1; k < 8; k++) {
2898 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2899 if (!v->pquantizer && block[k << v->top_blk_sh])
2900 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2903 i = 63;
2906 s->block_last_index[n] = i;
2908 return 0;
2911 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2912 * @param v VC1Context
2913 * @param block block to decode
2914 * @param[in] n subblock index
2915 * @param coded are AC coeffs present or not
2916 * @param mquant block quantizer
2917 * @param codingset set of VLC to decode data
2919 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
2920 int coded, int mquant, int codingset)
2922 GetBitContext *gb = &v->s.gb;
2923 MpegEncContext *s = &v->s;
2924 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2925 int i;
2926 int16_t *dc_val;
2927 int16_t *ac_val, *ac_val2;
2928 int dcdiff;
2929 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2930 int a_avail = v->a_avail, c_avail = v->c_avail;
2931 int use_pred = s->ac_pred;
2932 int scale;
2933 int q1, q2 = 0;
2935 s->dsp.clear_block(block);
2937 /* XXX: Guard against dumb values of mquant */
2938 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2940 /* Set DC scale - y and c use the same */
2941 s->y_dc_scale = s->y_dc_scale_table[mquant];
2942 s->c_dc_scale = s->c_dc_scale_table[mquant];
2944 /* Get DC differential */
2945 if (n < 4) {
2946 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2947 } else {
2948 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2950 if (dcdiff < 0) {
2951 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2952 return -1;
2954 if (dcdiff) {
2955 if (dcdiff == 119 /* ESC index value */) {
2956 /* TODO: Optimize */
2957 if (mquant == 1) dcdiff = get_bits(gb, 10);
2958 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2959 else dcdiff = get_bits(gb, 8);
2960 } else {
2961 if (mquant == 1)
2962 dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2963 else if (mquant == 2)
2964 dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2966 if (get_bits1(gb))
2967 dcdiff = -dcdiff;
2970 /* Prediction */
2971 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2972 *dc_val = dcdiff;
2974 /* Store the quantized DC coeff, used for prediction */
2976 if (n < 4) {
2977 block[0] = dcdiff * s->y_dc_scale;
2978 } else {
2979 block[0] = dcdiff * s->c_dc_scale;
2982 //AC Decoding
2983 i = 1;
2985 /* check if AC is needed at all and adjust direction if needed */
2986 if (!a_avail) dc_pred_dir = 1;
2987 if (!c_avail) dc_pred_dir = 0;
2988 if (!a_avail && !c_avail) use_pred = 0;
2989 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2990 ac_val2 = ac_val;
2992 scale = mquant * 2 + v->halfpq;
2994 if (dc_pred_dir) //left
2995 ac_val -= 16;
2996 else //top
2997 ac_val -= 16 * s->block_wrap[n];
2999 q1 = s->current_picture.f.qscale_table[mb_pos];
3000 if (dc_pred_dir && c_avail && mb_pos)
3001 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3002 if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3003 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3004 if ( dc_pred_dir && n == 1)
3005 q2 = q1;
3006 if (!dc_pred_dir && n == 2)
3007 q2 = q1;
3008 if (n == 3) q2 = q1;
3010 if (coded) {
3011 int last = 0, skip, value;
3012 int k;
3014 while (!last) {
3015 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3016 i += skip;
3017 if (i > 63)
3018 break;
3019 if (v->fcm == PROGRESSIVE)
3020 block[v->zz_8x8[0][i++]] = value;
3021 else {
3022 if (use_pred && (v->fcm == ILACE_FRAME)) {
3023 if (!dc_pred_dir) // top
3024 block[v->zz_8x8[2][i++]] = value;
3025 else // left
3026 block[v->zz_8x8[3][i++]] = value;
3027 } else {
3028 block[v->zzi_8x8[i++]] = value;
3033 /* apply AC prediction if needed */
3034 if (use_pred) {
3035 /* scale predictors if needed*/
3036 if (q2 && q1 != q2) {
3037 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3038 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3040 if (q1 < 1)
3041 return AVERROR_INVALIDDATA;
3042 if (dc_pred_dir) { // left
3043 for (k = 1; k < 8; k++)
3044 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3045 } else { //top
3046 for (k = 1; k < 8; k++)
3047 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3049 } else {
3050 if (dc_pred_dir) { // left
3051 for (k = 1; k < 8; k++)
3052 block[k << v->left_blk_sh] += ac_val[k];
3053 } else { // top
3054 for (k = 1; k < 8; k++)
3055 block[k << v->top_blk_sh] += ac_val[k + 8];
3059 /* save AC coeffs for further prediction */
3060 for (k = 1; k < 8; k++) {
3061 ac_val2[k ] = block[k << v->left_blk_sh];
3062 ac_val2[k + 8] = block[k << v->top_blk_sh];
3065 /* scale AC coeffs */
3066 for (k = 1; k < 64; k++)
3067 if (block[k]) {
3068 block[k] *= scale;
3069 if (!v->pquantizer)
3070 block[k] += (block[k] < 0) ? -mquant : mquant;
3073 if (use_pred) i = 63;
3074 } else { // no AC coeffs
3075 int k;
3077 memset(ac_val2, 0, 16 * 2);
3078 if (dc_pred_dir) { // left
3079 if (use_pred) {
3080 memcpy(ac_val2, ac_val, 8 * 2);
3081 if (q2 && q1 != q2) {
3082 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3083 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3084 if (q1 < 1)
3085 return AVERROR_INVALIDDATA;
3086 for (k = 1; k < 8; k++)
3087 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3090 } else { // top
3091 if (use_pred) {
3092 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3093 if (q2 && q1 != q2) {
3094 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3095 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3096 if (q1 < 1)
3097 return AVERROR_INVALIDDATA;
3098 for (k = 1; k < 8; k++)
3099 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3104 /* apply AC prediction if needed */
3105 if (use_pred) {
3106 if (dc_pred_dir) { // left
3107 for (k = 1; k < 8; k++) {
3108 block[k << v->left_blk_sh] = ac_val2[k] * scale;
3109 if (!v->pquantizer && block[k << v->left_blk_sh])
3110 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3112 } else { // top
3113 for (k = 1; k < 8; k++) {
3114 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3115 if (!v->pquantizer && block[k << v->top_blk_sh])
3116 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3119 i = 63;
3122 s->block_last_index[n] = i;
3124 return 0;
3127 /** Decode P block
3129 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3130 int mquant, int ttmb, int first_block,
3131 uint8_t *dst, int linesize, int skip_block,
3132 int *ttmb_out)
3134 MpegEncContext *s = &v->s;
3135 GetBitContext *gb = &s->gb;
3136 int i, j;
3137 int subblkpat = 0;
3138 int scale, off, idx, last, skip, value;
3139 int ttblk = ttmb & 7;
3140 int pat = 0;
3142 s->dsp.clear_block(block);
3144 if (ttmb == -1) {
3145 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3147 if (ttblk == TT_4X4) {
3148 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3150 if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3151 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3152 || (!v->res_rtm_flag && !first_block))) {
3153 subblkpat = decode012(gb);
3154 if (subblkpat)
3155 subblkpat ^= 3; // swap decoded pattern bits
3156 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3157 ttblk = TT_8X4;
3158 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3159 ttblk = TT_4X8;
3161 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3163 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3164 if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3165 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3166 ttblk = TT_8X4;
3168 if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3169 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3170 ttblk = TT_4X8;
3172 switch (ttblk) {
3173 case TT_8X8:
3174 pat = 0xF;
3175 i = 0;
3176 last = 0;
3177 while (!last) {
3178 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3179 i += skip;
3180 if (i > 63)
3181 break;
3182 if (!v->fcm)
3183 idx = v->zz_8x8[0][i++];
3184 else
3185 idx = v->zzi_8x8[i++];
3186 block[idx] = value * scale;
3187 if (!v->pquantizer)
3188 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3190 if (!skip_block) {
3191 if (i == 1)
3192 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3193 else {
3194 v->vc1dsp.vc1_inv_trans_8x8(block);
3195 s->dsp.add_pixels_clamped(block, dst, linesize);
3198 break;
3199 case TT_4X4:
3200 pat = ~subblkpat & 0xF;
3201 for (j = 0; j < 4; j++) {
3202 last = subblkpat & (1 << (3 - j));
3203 i = 0;
3204 off = (j & 1) * 4 + (j & 2) * 16;
3205 while (!last) {
3206 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3207 i += skip;
3208 if (i > 15)
3209 break;
3210 if (!v->fcm)
3211 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3212 else
3213 idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3214 block[idx + off] = value * scale;
3215 if (!v->pquantizer)
3216 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3218 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3219 if (i == 1)
3220 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3221 else
3222 v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3225 break;
3226 case TT_8X4:
3227 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3228 for (j = 0; j < 2; j++) {
3229 last = subblkpat & (1 << (1 - j));
3230 i = 0;
3231 off = j * 32;
3232 while (!last) {
3233 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3234 i += skip;
3235 if (i > 31)
3236 break;
3237 if (!v->fcm)
3238 idx = v->zz_8x4[i++] + off;
3239 else
3240 idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3241 block[idx] = value * scale;
3242 if (!v->pquantizer)
3243 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3245 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3246 if (i == 1)
3247 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3248 else
3249 v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3252 break;
3253 case TT_4X8:
3254 pat = ~(subblkpat * 5) & 0xF;
3255 for (j = 0; j < 2; j++) {
3256 last = subblkpat & (1 << (1 - j));
3257 i = 0;
3258 off = j * 4;
3259 while (!last) {
3260 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3261 i += skip;
3262 if (i > 31)
3263 break;
3264 if (!v->fcm)
3265 idx = v->zz_4x8[i++] + off;
3266 else
3267 idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3268 block[idx] = value * scale;
3269 if (!v->pquantizer)
3270 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3272 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3273 if (i == 1)
3274 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3275 else
3276 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3279 break;
3281 if (ttmb_out)
3282 *ttmb_out |= ttblk << (n * 4);
3283 return pat;
3286 /** @} */ // Macroblock group
3288 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3289 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3291 static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
3293 MpegEncContext *s = &v->s;
3294 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3295 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3296 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3297 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3298 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3299 uint8_t *dst;
3301 if (block_num > 3) {
3302 dst = s->dest[block_num - 3];
3303 } else {
3304 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3306 if (s->mb_y != s->end_mb_y || block_num < 2) {
3307 int16_t (*mv)[2];
3308 int mv_stride;
3310 if (block_num > 3) {
3311 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3312 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3313 mv = &v->luma_mv[s->mb_x - s->mb_stride];
3314 mv_stride = s->mb_stride;
3315 } else {
3316 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3317 : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3318 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3319 : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3320 mv_stride = s->b8_stride;
3321 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3324 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3325 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3326 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3327 } else {
3328 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3329 if (idx == 3) {
3330 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3331 } else if (idx) {
3332 if (idx == 1)
3333 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3334 else
3335 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3340 dst -= 4 * linesize;
3341 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3342 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3343 idx = (block_cbp | (block_cbp >> 2)) & 3;
3344 if (idx == 3) {
3345 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3346 } else if (idx) {
3347 if (idx == 1)
3348 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3349 else
3350 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3355 static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
3357 MpegEncContext *s = &v->s;
3358 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3359 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3360 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3361 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3362 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3363 uint8_t *dst;
3365 if (block_num > 3) {
3366 dst = s->dest[block_num - 3] - 8 * linesize;
3367 } else {
3368 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3371 if (s->mb_x != s->mb_width || !(block_num & 5)) {
3372 int16_t (*mv)[2];
3374 if (block_num > 3) {
3375 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3376 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3377 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3378 } else {
3379 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3380 : (mb_cbp >> ((block_num + 1) * 4));
3381 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3382 : (mb_is_intra >> ((block_num + 1) * 4));
3383 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3385 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3386 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3387 } else {
3388 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3389 if (idx == 5) {
3390 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3391 } else if (idx) {
3392 if (idx == 1)
3393 v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3394 else
3395 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3400 dst -= 4;
3401 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3402 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3403 idx = (block_cbp | (block_cbp >> 1)) & 5;
3404 if (idx == 5) {
3405 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3406 } else if (idx) {
3407 if (idx == 1)
3408 v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3409 else
3410 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3415 static void vc1_apply_p_loop_filter(VC1Context *v)
3417 MpegEncContext *s = &v->s;
3418 int i;
3420 for (i = 0; i < 6; i++) {
3421 vc1_apply_p_v_loop_filter(v, i);
3424 /* V always precedes H, therefore we run H one MB before V;
3425 * at the end of a row, we catch up to complete the row */
3426 if (s->mb_x) {
3427 for (i = 0; i < 6; i++) {
3428 vc1_apply_p_h_loop_filter(v, i);
3430 if (s->mb_x == s->mb_width - 1) {
3431 s->mb_x++;
3432 ff_update_block_index(s);
3433 for (i = 0; i < 6; i++) {
3434 vc1_apply_p_h_loop_filter(v, i);
3440 /** Decode one P-frame MB
3442 static int vc1_decode_p_mb(VC1Context *v)
3444 MpegEncContext *s = &v->s;
3445 GetBitContext *gb = &s->gb;
3446 int i, j;
3447 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3448 int cbp; /* cbp decoding stuff */
3449 int mqdiff, mquant; /* MB quantization */
3450 int ttmb = v->ttfrm; /* MB Transform type */
3452 int mb_has_coeffs = 1; /* last_flag */
3453 int dmv_x, dmv_y; /* Differential MV components */
3454 int index, index1; /* LUT indexes */
3455 int val, sign; /* temp values */
3456 int first_block = 1;
3457 int dst_idx, off;
3458 int skipped, fourmv;
3459 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3461 mquant = v->pq; /* lossy initialization */
3463 if (v->mv_type_is_raw)
3464 fourmv = get_bits1(gb);
3465 else
3466 fourmv = v->mv_type_mb_plane[mb_pos];
3467 if (v->skip_is_raw)
3468 skipped = get_bits1(gb);
3469 else
3470 skipped = v->s.mbskip_table[mb_pos];
3472 if (!fourmv) { /* 1MV mode */
3473 if (!skipped) {
3474 GET_MVDATA(dmv_x, dmv_y);
3476 if (s->mb_intra) {
3477 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3478 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3480 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3481 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3483 /* FIXME Set DC val for inter block ? */
3484 if (s->mb_intra && !mb_has_coeffs) {
3485 GET_MQUANT();
3486 s->ac_pred = get_bits1(gb);
3487 cbp = 0;
3488 } else if (mb_has_coeffs) {
3489 if (s->mb_intra)
3490 s->ac_pred = get_bits1(gb);
3491 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3492 GET_MQUANT();
3493 } else {
3494 mquant = v->pq;
3495 cbp = 0;
3497 s->current_picture.f.qscale_table[mb_pos] = mquant;
3499 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3500 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3501 VC1_TTMB_VLC_BITS, 2);
3502 if (!s->mb_intra) vc1_mc_1mv(v, 0);
3503 dst_idx = 0;
3504 for (i = 0; i < 6; i++) {
3505 s->dc_val[0][s->block_index[i]] = 0;
3506 dst_idx += i >> 2;
3507 val = ((cbp >> (5 - i)) & 1);
3508 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3509 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3510 if (s->mb_intra) {
3511 /* check if prediction blocks A and C are available */
3512 v->a_avail = v->c_avail = 0;
3513 if (i == 2 || i == 3 || !s->first_slice_line)
3514 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3515 if (i == 1 || i == 3 || s->mb_x)
3516 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3518 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3519 (i & 4) ? v->codingset2 : v->codingset);
3520 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3521 continue;
3522 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3523 if (v->rangeredfrm)
3524 for (j = 0; j < 64; j++)
3525 s->block[i][j] <<= 1;
3526 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3527 if (v->pq >= 9 && v->overlap) {
3528 if (v->c_avail)
3529 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3530 if (v->a_avail)
3531 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3533 block_cbp |= 0xF << (i << 2);
3534 block_intra |= 1 << i;
3535 } else if (val) {
3536 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3537 s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3538 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3539 block_cbp |= pat << (i << 2);
3540 if (!v->ttmbf && ttmb < 8)
3541 ttmb = -1;
3542 first_block = 0;
3545 } else { // skipped
3546 s->mb_intra = 0;
3547 for (i = 0; i < 6; i++) {
3548 v->mb_type[0][s->block_index[i]] = 0;
3549 s->dc_val[0][s->block_index[i]] = 0;
3551 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3552 s->current_picture.f.qscale_table[mb_pos] = 0;
3553 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3554 vc1_mc_1mv(v, 0);
3556 } else { // 4MV mode
3557 if (!skipped /* unskipped MB */) {
3558 int intra_count = 0, coded_inter = 0;
3559 int is_intra[6], is_coded[6];
3560 /* Get CBPCY */
3561 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3562 for (i = 0; i < 6; i++) {
3563 val = ((cbp >> (5 - i)) & 1);
3564 s->dc_val[0][s->block_index[i]] = 0;
3565 s->mb_intra = 0;
3566 if (i < 4) {
3567 dmv_x = dmv_y = 0;
3568 s->mb_intra = 0;
3569 mb_has_coeffs = 0;
3570 if (val) {
3571 GET_MVDATA(dmv_x, dmv_y);
3573 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3574 if (!s->mb_intra)
3575 vc1_mc_4mv_luma(v, i, 0);
3576 intra_count += s->mb_intra;
3577 is_intra[i] = s->mb_intra;
3578 is_coded[i] = mb_has_coeffs;
3580 if (i & 4) {
3581 is_intra[i] = (intra_count >= 3);
3582 is_coded[i] = val;
3584 if (i == 4)
3585 vc1_mc_4mv_chroma(v, 0);
3586 v->mb_type[0][s->block_index[i]] = is_intra[i];
3587 if (!coded_inter)
3588 coded_inter = !is_intra[i] & is_coded[i];
3590 // if there are no coded blocks then don't do anything more
3591 dst_idx = 0;
3592 if (!intra_count && !coded_inter)
3593 goto end;
3594 GET_MQUANT();
3595 s->current_picture.f.qscale_table[mb_pos] = mquant;
3596 /* test if block is intra and has pred */
3598 int intrapred = 0;
3599 for (i = 0; i < 6; i++)
3600 if (is_intra[i]) {
3601 if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3602 || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3603 intrapred = 1;
3604 break;
3607 if (intrapred)
3608 s->ac_pred = get_bits1(gb);
3609 else
3610 s->ac_pred = 0;
3612 if (!v->ttmbf && coded_inter)
3613 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3614 for (i = 0; i < 6; i++) {
3615 dst_idx += i >> 2;
3616 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3617 s->mb_intra = is_intra[i];
3618 if (is_intra[i]) {
3619 /* check if prediction blocks A and C are available */
3620 v->a_avail = v->c_avail = 0;
3621 if (i == 2 || i == 3 || !s->first_slice_line)
3622 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3623 if (i == 1 || i == 3 || s->mb_x)
3624 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3626 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3627 (i & 4) ? v->codingset2 : v->codingset);
3628 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3629 continue;
3630 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3631 if (v->rangeredfrm)
3632 for (j = 0; j < 64; j++)
3633 s->block[i][j] <<= 1;
3634 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3635 (i & 4) ? s->uvlinesize : s->linesize);
3636 if (v->pq >= 9 && v->overlap) {
3637 if (v->c_avail)
3638 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3639 if (v->a_avail)
3640 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3642 block_cbp |= 0xF << (i << 2);
3643 block_intra |= 1 << i;
3644 } else if (is_coded[i]) {
3645 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3646 first_block, s->dest[dst_idx] + off,
3647 (i & 4) ? s->uvlinesize : s->linesize,
3648 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3649 &block_tt);
3650 block_cbp |= pat << (i << 2);
3651 if (!v->ttmbf && ttmb < 8)
3652 ttmb = -1;
3653 first_block = 0;
3656 } else { // skipped MB
3657 s->mb_intra = 0;
3658 s->current_picture.f.qscale_table[mb_pos] = 0;
3659 for (i = 0; i < 6; i++) {
3660 v->mb_type[0][s->block_index[i]] = 0;
3661 s->dc_val[0][s->block_index[i]] = 0;
3663 for (i = 0; i < 4; i++) {
3664 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3665 vc1_mc_4mv_luma(v, i, 0);
3667 vc1_mc_4mv_chroma(v, 0);
3668 s->current_picture.f.qscale_table[mb_pos] = 0;
3671 end:
3672 v->cbp[s->mb_x] = block_cbp;
3673 v->ttblk[s->mb_x] = block_tt;
3674 v->is_intra[s->mb_x] = block_intra;
3676 return 0;
3679 /* Decode one macroblock in an interlaced frame p picture */
3681 static int vc1_decode_p_mb_intfr(VC1Context *v)
3683 MpegEncContext *s = &v->s;
3684 GetBitContext *gb = &s->gb;
3685 int i;
3686 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3687 int cbp = 0; /* cbp decoding stuff */
3688 int mqdiff, mquant; /* MB quantization */
3689 int ttmb = v->ttfrm; /* MB Transform type */
3691 int mb_has_coeffs = 1; /* last_flag */
3692 int dmv_x, dmv_y; /* Differential MV components */
3693 int val; /* temp value */
3694 int first_block = 1;
3695 int dst_idx, off;
3696 int skipped, fourmv = 0, twomv = 0;
3697 int block_cbp = 0, pat, block_tt = 0;
3698 int idx_mbmode = 0, mvbp;
3699 int stride_y, fieldtx;
3701 mquant = v->pq; /* Loosy initialization */
3703 if (v->skip_is_raw)
3704 skipped = get_bits1(gb);
3705 else
3706 skipped = v->s.mbskip_table[mb_pos];
3707 if (!skipped) {
3708 if (v->fourmvswitch)
3709 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3710 else
3711 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3712 switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3713 /* store the motion vector type in a flag (useful later) */
3714 case MV_PMODE_INTFR_4MV:
3715 fourmv = 1;
3716 v->blk_mv_type[s->block_index[0]] = 0;
3717 v->blk_mv_type[s->block_index[1]] = 0;
3718 v->blk_mv_type[s->block_index[2]] = 0;
3719 v->blk_mv_type[s->block_index[3]] = 0;
3720 break;
3721 case MV_PMODE_INTFR_4MV_FIELD:
3722 fourmv = 1;
3723 v->blk_mv_type[s->block_index[0]] = 1;
3724 v->blk_mv_type[s->block_index[1]] = 1;
3725 v->blk_mv_type[s->block_index[2]] = 1;
3726 v->blk_mv_type[s->block_index[3]] = 1;
3727 break;
3728 case MV_PMODE_INTFR_2MV_FIELD:
3729 twomv = 1;
3730 v->blk_mv_type[s->block_index[0]] = 1;
3731 v->blk_mv_type[s->block_index[1]] = 1;
3732 v->blk_mv_type[s->block_index[2]] = 1;
3733 v->blk_mv_type[s->block_index[3]] = 1;
3734 break;
3735 case MV_PMODE_INTFR_1MV:
3736 v->blk_mv_type[s->block_index[0]] = 0;
3737 v->blk_mv_type[s->block_index[1]] = 0;
3738 v->blk_mv_type[s->block_index[2]] = 0;
3739 v->blk_mv_type[s->block_index[3]] = 0;
3740 break;
3742 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3743 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3744 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3745 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3746 s->mb_intra = v->is_intra[s->mb_x] = 1;
3747 for (i = 0; i < 6; i++)
3748 v->mb_type[0][s->block_index[i]] = 1;
3749 fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3750 mb_has_coeffs = get_bits1(gb);
3751 if (mb_has_coeffs)
3752 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3753 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3754 GET_MQUANT();
3755 s->current_picture.f.qscale_table[mb_pos] = mquant;
3756 /* Set DC scale - y and c use the same (not sure if necessary here) */
3757 s->y_dc_scale = s->y_dc_scale_table[mquant];
3758 s->c_dc_scale = s->c_dc_scale_table[mquant];
3759 dst_idx = 0;
3760 for (i = 0; i < 6; i++) {
3761 s->dc_val[0][s->block_index[i]] = 0;
3762 dst_idx += i >> 2;
3763 val = ((cbp >> (5 - i)) & 1);
3764 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3765 v->a_avail = v->c_avail = 0;
3766 if (i == 2 || i == 3 || !s->first_slice_line)
3767 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3768 if (i == 1 || i == 3 || s->mb_x)
3769 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3771 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3772 (i & 4) ? v->codingset2 : v->codingset);
3773 if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3774 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3775 if (i < 4) {
3776 stride_y = s->linesize << fieldtx;
3777 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3778 } else {
3779 stride_y = s->uvlinesize;
3780 off = 0;
3782 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3783 //TODO: loop filter
3786 } else { // inter MB
3787 mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3788 if (mb_has_coeffs)
3789 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3790 if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3791 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
3792 } else {
3793 if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3794 || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3795 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3798 s->mb_intra = v->is_intra[s->mb_x] = 0;
3799 for (i = 0; i < 6; i++)
3800 v->mb_type[0][s->block_index[i]] = 0;
3801 fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3802 /* for all motion vector read MVDATA and motion compensate each block */
3803 dst_idx = 0;
3804 if (fourmv) {
3805 mvbp = v->fourmvbp;
3806 for (i = 0; i < 6; i++) {
3807 if (i < 4) {
3808 dmv_x = dmv_y = 0;
3809 val = ((mvbp >> (3 - i)) & 1);
3810 if (val) {
3811 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3813 vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3814 vc1_mc_4mv_luma(v, i, 0);
3815 } else if (i == 4) {
3816 vc1_mc_4mv_chroma4(v);
3819 } else if (twomv) {
3820 mvbp = v->twomvbp;
3821 dmv_x = dmv_y = 0;
3822 if (mvbp & 2) {
3823 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3825 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3826 vc1_mc_4mv_luma(v, 0, 0);
3827 vc1_mc_4mv_luma(v, 1, 0);
3828 dmv_x = dmv_y = 0;
3829 if (mvbp & 1) {
3830 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3832 vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3833 vc1_mc_4mv_luma(v, 2, 0);
3834 vc1_mc_4mv_luma(v, 3, 0);
3835 vc1_mc_4mv_chroma4(v);
3836 } else {
3837 mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3838 dmv_x = dmv_y = 0;
3839 if (mvbp) {
3840 get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3842 vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3843 vc1_mc_1mv(v, 0);
3845 if (cbp)
3846 GET_MQUANT(); // p. 227
3847 s->current_picture.f.qscale_table[mb_pos] = mquant;
3848 if (!v->ttmbf && cbp)
3849 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3850 for (i = 0; i < 6; i++) {
3851 s->dc_val[0][s->block_index[i]] = 0;
3852 dst_idx += i >> 2;
3853 val = ((cbp >> (5 - i)) & 1);
3854 if (!fieldtx)
3855 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3856 else
3857 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3858 if (val) {
3859 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3860 first_block, s->dest[dst_idx] + off,
3861 (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3862 (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3863 block_cbp |= pat << (i << 2);
3864 if (!v->ttmbf && ttmb < 8)
3865 ttmb = -1;
3866 first_block = 0;
3870 } else { // skipped
3871 s->mb_intra = v->is_intra[s->mb_x] = 0;
3872 for (i = 0; i < 6; i++) {
3873 v->mb_type[0][s->block_index[i]] = 0;
3874 s->dc_val[0][s->block_index[i]] = 0;
3876 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3877 s->current_picture.f.qscale_table[mb_pos] = 0;
3878 v->blk_mv_type[s->block_index[0]] = 0;
3879 v->blk_mv_type[s->block_index[1]] = 0;
3880 v->blk_mv_type[s->block_index[2]] = 0;
3881 v->blk_mv_type[s->block_index[3]] = 0;
3882 vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3883 vc1_mc_1mv(v, 0);
3885 if (s->mb_x == s->mb_width - 1)
3886 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3887 return 0;
3890 static int vc1_decode_p_mb_intfi(VC1Context *v)
3892 MpegEncContext *s = &v->s;
3893 GetBitContext *gb = &s->gb;
3894 int i;
3895 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3896 int cbp = 0; /* cbp decoding stuff */
3897 int mqdiff, mquant; /* MB quantization */
3898 int ttmb = v->ttfrm; /* MB Transform type */
3900 int mb_has_coeffs = 1; /* last_flag */
3901 int dmv_x, dmv_y; /* Differential MV components */
3902 int val; /* temp values */
3903 int first_block = 1;
3904 int dst_idx, off;
3905 int pred_flag;
3906 int block_cbp = 0, pat, block_tt = 0;
3907 int idx_mbmode = 0;
3909 mquant = v->pq; /* Loosy initialization */
3911 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3912 if (idx_mbmode <= 1) { // intra MB
3913 s->mb_intra = v->is_intra[s->mb_x] = 1;
3914 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3915 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3916 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
3917 GET_MQUANT();
3918 s->current_picture.f.qscale_table[mb_pos] = mquant;
3919 /* Set DC scale - y and c use the same (not sure if necessary here) */
3920 s->y_dc_scale = s->y_dc_scale_table[mquant];
3921 s->c_dc_scale = s->c_dc_scale_table[mquant];
3922 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3923 mb_has_coeffs = idx_mbmode & 1;
3924 if (mb_has_coeffs)
3925 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
3926 dst_idx = 0;
3927 for (i = 0; i < 6; i++) {
3928 s->dc_val[0][s->block_index[i]] = 0;
3929 v->mb_type[0][s->block_index[i]] = 1;
3930 dst_idx += i >> 2;
3931 val = ((cbp >> (5 - i)) & 1);
3932 v->a_avail = v->c_avail = 0;
3933 if (i == 2 || i == 3 || !s->first_slice_line)
3934 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3935 if (i == 1 || i == 3 || s->mb_x)
3936 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3938 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3939 (i & 4) ? v->codingset2 : v->codingset);
3940 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3941 continue;
3942 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3943 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3944 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
3945 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
3946 // TODO: loop filter
3948 } else {
3949 s->mb_intra = v->is_intra[s->mb_x] = 0;
3950 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
3951 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
3952 if (idx_mbmode <= 5) { // 1-MV
3953 dmv_x = dmv_y = pred_flag = 0;
3954 if (idx_mbmode & 1) {
3955 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3957 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3958 vc1_mc_1mv(v, 0);
3959 mb_has_coeffs = !(idx_mbmode & 2);
3960 } else { // 4-MV
3961 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
3962 for (i = 0; i < 6; i++) {
3963 if (i < 4) {
3964 dmv_x = dmv_y = pred_flag = 0;
3965 val = ((v->fourmvbp >> (3 - i)) & 1);
3966 if (val) {
3967 get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3969 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3970 vc1_mc_4mv_luma(v, i, 0);
3971 } else if (i == 4)
3972 vc1_mc_4mv_chroma(v, 0);
3974 mb_has_coeffs = idx_mbmode & 1;
3976 if (mb_has_coeffs)
3977 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3978 if (cbp) {
3979 GET_MQUANT();
3981 s->current_picture.f.qscale_table[mb_pos] = mquant;
3982 if (!v->ttmbf && cbp) {
3983 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3985 dst_idx = 0;
3986 for (i = 0; i < 6; i++) {
3987 s->dc_val[0][s->block_index[i]] = 0;
3988 dst_idx += i >> 2;
3989 val = ((cbp >> (5 - i)) & 1);
3990 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
3991 if (v->cur_field_type)
3992 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
3993 if (val) {
3994 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3995 first_block, s->dest[dst_idx] + off,
3996 (i & 4) ? s->uvlinesize : s->linesize,
3997 (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3998 &block_tt);
3999 block_cbp |= pat << (i << 2);
4000 if (!v->ttmbf && ttmb < 8) ttmb = -1;
4001 first_block = 0;
4005 if (s->mb_x == s->mb_width - 1)
4006 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4007 return 0;
4010 /** Decode one B-frame MB (in Main profile)
4012 static void vc1_decode_b_mb(VC1Context *v)
4014 MpegEncContext *s = &v->s;
4015 GetBitContext *gb = &s->gb;
4016 int i, j;
4017 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4018 int cbp = 0; /* cbp decoding stuff */
4019 int mqdiff, mquant; /* MB quantization */
4020 int ttmb = v->ttfrm; /* MB Transform type */
4021 int mb_has_coeffs = 0; /* last_flag */
4022 int index, index1; /* LUT indexes */
4023 int val, sign; /* temp values */
4024 int first_block = 1;
4025 int dst_idx, off;
4026 int skipped, direct;
4027 int dmv_x[2], dmv_y[2];
4028 int bmvtype = BMV_TYPE_BACKWARD;
4030 mquant = v->pq; /* lossy initialization */
4031 s->mb_intra = 0;
4033 if (v->dmb_is_raw)
4034 direct = get_bits1(gb);
4035 else
4036 direct = v->direct_mb_plane[mb_pos];
4037 if (v->skip_is_raw)
4038 skipped = get_bits1(gb);
4039 else
4040 skipped = v->s.mbskip_table[mb_pos];
4042 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4043 for (i = 0; i < 6; i++) {
4044 v->mb_type[0][s->block_index[i]] = 0;
4045 s->dc_val[0][s->block_index[i]] = 0;
4047 s->current_picture.f.qscale_table[mb_pos] = 0;
4049 if (!direct) {
4050 if (!skipped) {
4051 GET_MVDATA(dmv_x[0], dmv_y[0]);
4052 dmv_x[1] = dmv_x[0];
4053 dmv_y[1] = dmv_y[0];
4055 if (skipped || !s->mb_intra) {
4056 bmvtype = decode012(gb);
4057 switch (bmvtype) {
4058 case 0:
4059 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4060 break;
4061 case 1:
4062 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4063 break;
4064 case 2:
4065 bmvtype = BMV_TYPE_INTERPOLATED;
4066 dmv_x[0] = dmv_y[0] = 0;
4070 for (i = 0; i < 6; i++)
4071 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4073 if (skipped) {
4074 if (direct)
4075 bmvtype = BMV_TYPE_INTERPOLATED;
4076 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4077 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4078 return;
4080 if (direct) {
4081 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4082 GET_MQUANT();
4083 s->mb_intra = 0;
4084 s->current_picture.f.qscale_table[mb_pos] = mquant;
4085 if (!v->ttmbf)
4086 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4087 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4088 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4089 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4090 } else {
4091 if (!mb_has_coeffs && !s->mb_intra) {
4092 /* no coded blocks - effectively skipped */
4093 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4094 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4095 return;
4097 if (s->mb_intra && !mb_has_coeffs) {
4098 GET_MQUANT();
4099 s->current_picture.f.qscale_table[mb_pos] = mquant;
4100 s->ac_pred = get_bits1(gb);
4101 cbp = 0;
4102 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4103 } else {
4104 if (bmvtype == BMV_TYPE_INTERPOLATED) {
4105 GET_MVDATA(dmv_x[0], dmv_y[0]);
4106 if (!mb_has_coeffs) {
4107 /* interpolated skipped block */
4108 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4109 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4110 return;
4113 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4114 if (!s->mb_intra) {
4115 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4117 if (s->mb_intra)
4118 s->ac_pred = get_bits1(gb);
4119 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4120 GET_MQUANT();
4121 s->current_picture.f.qscale_table[mb_pos] = mquant;
4122 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4123 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4126 dst_idx = 0;
4127 for (i = 0; i < 6; i++) {
4128 s->dc_val[0][s->block_index[i]] = 0;
4129 dst_idx += i >> 2;
4130 val = ((cbp >> (5 - i)) & 1);
4131 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4132 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4133 if (s->mb_intra) {
4134 /* check if prediction blocks A and C are available */
4135 v->a_avail = v->c_avail = 0;
4136 if (i == 2 || i == 3 || !s->first_slice_line)
4137 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4138 if (i == 1 || i == 3 || s->mb_x)
4139 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4141 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4142 (i & 4) ? v->codingset2 : v->codingset);
4143 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4144 continue;
4145 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4146 if (v->rangeredfrm)
4147 for (j = 0; j < 64; j++)
4148 s->block[i][j] <<= 1;
4149 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4150 } else if (val) {
4151 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4152 first_block, s->dest[dst_idx] + off,
4153 (i & 4) ? s->uvlinesize : s->linesize,
4154 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4155 if (!v->ttmbf && ttmb < 8)
4156 ttmb = -1;
4157 first_block = 0;
4162 /** Decode one B-frame MB (in interlaced field B picture)
4164 static void vc1_decode_b_mb_intfi(VC1Context *v)
4166 MpegEncContext *s = &v->s;
4167 GetBitContext *gb = &s->gb;
4168 int i, j;
4169 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4170 int cbp = 0; /* cbp decoding stuff */
4171 int mqdiff, mquant; /* MB quantization */
4172 int ttmb = v->ttfrm; /* MB Transform type */
4173 int mb_has_coeffs = 0; /* last_flag */
4174 int val; /* temp value */
4175 int first_block = 1;
4176 int dst_idx, off;
4177 int fwd;
4178 int dmv_x[2], dmv_y[2], pred_flag[2];
4179 int bmvtype = BMV_TYPE_BACKWARD;
4180 int idx_mbmode, interpmvp;
4182 mquant = v->pq; /* Loosy initialization */
4183 s->mb_intra = 0;
4185 idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4186 if (idx_mbmode <= 1) { // intra MB
4187 s->mb_intra = v->is_intra[s->mb_x] = 1;
4188 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4189 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4190 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4191 GET_MQUANT();
4192 s->current_picture.f.qscale_table[mb_pos] = mquant;
4193 /* Set DC scale - y and c use the same (not sure if necessary here) */
4194 s->y_dc_scale = s->y_dc_scale_table[mquant];
4195 s->c_dc_scale = s->c_dc_scale_table[mquant];
4196 v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4197 mb_has_coeffs = idx_mbmode & 1;
4198 if (mb_has_coeffs)
4199 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4200 dst_idx = 0;
4201 for (i = 0; i < 6; i++) {
4202 s->dc_val[0][s->block_index[i]] = 0;
4203 dst_idx += i >> 2;
4204 val = ((cbp >> (5 - i)) & 1);
4205 v->mb_type[0][s->block_index[i]] = s->mb_intra;
4206 v->a_avail = v->c_avail = 0;
4207 if (i == 2 || i == 3 || !s->first_slice_line)
4208 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4209 if (i == 1 || i == 3 || s->mb_x)
4210 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4212 vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4213 (i & 4) ? v->codingset2 : v->codingset);
4214 if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4215 continue;
4216 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4217 if (v->rangeredfrm)
4218 for (j = 0; j < 64; j++)
4219 s->block[i][j] <<= 1;
4220 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4221 off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
4222 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4223 // TODO: yet to perform loop filter
4225 } else {
4226 s->mb_intra = v->is_intra[s->mb_x] = 0;
4227 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4228 for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4229 if (v->fmb_is_raw)
4230 fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4231 else
4232 fwd = v->forward_mb_plane[mb_pos];
4233 if (idx_mbmode <= 5) { // 1-MV
4234 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4235 pred_flag[0] = pred_flag[1] = 0;
4236 if (fwd)
4237 bmvtype = BMV_TYPE_FORWARD;
4238 else {
4239 bmvtype = decode012(gb);
4240 switch (bmvtype) {
4241 case 0:
4242 bmvtype = BMV_TYPE_BACKWARD;
4243 break;
4244 case 1:
4245 bmvtype = BMV_TYPE_DIRECT;
4246 break;
4247 case 2:
4248 bmvtype = BMV_TYPE_INTERPOLATED;
4249 interpmvp = get_bits1(gb);
4252 v->bmvtype = bmvtype;
4253 if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4254 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4256 if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4257 get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4259 if (bmvtype == BMV_TYPE_DIRECT) {
4260 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4261 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4263 vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4264 vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4265 mb_has_coeffs = !(idx_mbmode & 2);
4266 } else { // 4-MV
4267 if (fwd)
4268 bmvtype = BMV_TYPE_FORWARD;
4269 v->bmvtype = bmvtype;
4270 v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
4271 for (i = 0; i < 6; i++) {
4272 if (i < 4) {
4273 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4274 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4275 val = ((v->fourmvbp >> (3 - i)) & 1);
4276 if (val) {
4277 get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4278 &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4279 &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4281 vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4282 vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4283 } else if (i == 4)
4284 vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4286 mb_has_coeffs = idx_mbmode & 1;
4288 if (mb_has_coeffs)
4289 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4290 if (cbp) {
4291 GET_MQUANT();
4293 s->current_picture.f.qscale_table[mb_pos] = mquant;
4294 if (!v->ttmbf && cbp) {
4295 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
4297 dst_idx = 0;
4298 for (i = 0; i < 6; i++) {
4299 s->dc_val[0][s->block_index[i]] = 0;
4300 dst_idx += i >> 2;
4301 val = ((cbp >> (5 - i)) & 1);
4302 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4303 if (v->cur_field_type)
4304 off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
4305 if (val) {
4306 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4307 first_block, s->dest[dst_idx] + off,
4308 (i & 4) ? s->uvlinesize : s->linesize,
4309 (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4310 if (!v->ttmbf && ttmb < 8)
4311 ttmb = -1;
4312 first_block = 0;
4318 /** Decode blocks of I-frame
4320 static void vc1_decode_i_blocks(VC1Context *v)
4322 int k, j;
4323 MpegEncContext *s = &v->s;
4324 int cbp, val;
4325 uint8_t *coded_val;
4326 int mb_pos;
4328 /* select codingmode used for VLC tables selection */
4329 switch (v->y_ac_table_index) {
4330 case 0:
4331 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4332 break;
4333 case 1:
4334 v->codingset = CS_HIGH_MOT_INTRA;
4335 break;
4336 case 2:
4337 v->codingset = CS_MID_RATE_INTRA;
4338 break;
4341 switch (v->c_ac_table_index) {
4342 case 0:
4343 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4344 break;
4345 case 1:
4346 v->codingset2 = CS_HIGH_MOT_INTER;
4347 break;
4348 case 2:
4349 v->codingset2 = CS_MID_RATE_INTER;
4350 break;
4353 /* Set DC scale - y and c use the same */
4354 s->y_dc_scale = s->y_dc_scale_table[v->pq];
4355 s->c_dc_scale = s->c_dc_scale_table[v->pq];
4357 //do frame decode
4358 s->mb_x = s->mb_y = 0;
4359 s->mb_intra = 1;
4360 s->first_slice_line = 1;
4361 for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4362 s->mb_x = 0;
4363 ff_init_block_index(s);
4364 for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4365 uint8_t *dst[6];
4366 ff_update_block_index(s);
4367 dst[0] = s->dest[0];
4368 dst[1] = dst[0] + 8;
4369 dst[2] = s->dest[0] + s->linesize * 8;
4370 dst[3] = dst[2] + 8;
4371 dst[4] = s->dest[1];
4372 dst[5] = s->dest[2];
4373 s->dsp.clear_blocks(s->block[0]);
4374 mb_pos = s->mb_x + s->mb_y * s->mb_width;
4375 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4376 s->current_picture.f.qscale_table[mb_pos] = v->pq;
4377 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4378 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4380 // do actual MB decoding and displaying
4381 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4382 v->s.ac_pred = get_bits1(&v->s.gb);
4384 for (k = 0; k < 6; k++) {
4385 val = ((cbp >> (5 - k)) & 1);
4387 if (k < 4) {
4388 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4389 val = val ^ pred;
4390 *coded_val = val;
4392 cbp |= val << (5 - k);
4394 vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4396 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4397 continue;
4398 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4399 if (v->pq >= 9 && v->overlap) {
4400 if (v->rangeredfrm)
4401 for (j = 0; j < 64; j++)
4402 s->block[k][j] <<= 1;
4403 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4404 } else {
4405 if (v->rangeredfrm)
4406 for (j = 0; j < 64; j++)
4407 s->block[k][j] = (s->block[k][j] - 64) << 1;
4408 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4412 if (v->pq >= 9 && v->overlap) {
4413 if (s->mb_x) {
4414 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4415 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4416 if (!(s->flags & CODEC_FLAG_GRAY)) {
4417 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4418 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4421 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4422 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4423 if (!s->first_slice_line) {
4424 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4425 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4426 if (!(s->flags & CODEC_FLAG_GRAY)) {
4427 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4428 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4431 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4432 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4434 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4436 if (get_bits_count(&s->gb) > v->bits) {
4437 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4438 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4439 get_bits_count(&s->gb), v->bits);
4440 return;
4443 if (!v->s.loop_filter)
4444 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4445 else if (s->mb_y)
4446 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4448 s->first_slice_line = 0;
4450 if (v->s.loop_filter)
4451 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4453 /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4454 * profile, these only differ are when decoding MSS2 rectangles. */
4455 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4458 /** Decode blocks of I-frame for advanced profile
4460 static void vc1_decode_i_blocks_adv(VC1Context *v)
4462 int k;
4463 MpegEncContext *s = &v->s;
4464 int cbp, val;
4465 uint8_t *coded_val;
4466 int mb_pos;
4467 int mquant = v->pq;
4468 int mqdiff;
4469 GetBitContext *gb = &s->gb;
4471 /* select codingmode used for VLC tables selection */
4472 switch (v->y_ac_table_index) {
4473 case 0:
4474 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4475 break;
4476 case 1:
4477 v->codingset = CS_HIGH_MOT_INTRA;
4478 break;
4479 case 2:
4480 v->codingset = CS_MID_RATE_INTRA;
4481 break;
4484 switch (v->c_ac_table_index) {
4485 case 0:
4486 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4487 break;
4488 case 1:
4489 v->codingset2 = CS_HIGH_MOT_INTER;
4490 break;
4491 case 2:
4492 v->codingset2 = CS_MID_RATE_INTER;
4493 break;
4496 // do frame decode
4497 s->mb_x = s->mb_y = 0;
4498 s->mb_intra = 1;
4499 s->first_slice_line = 1;
4500 s->mb_y = s->start_mb_y;
4501 if (s->start_mb_y) {
4502 s->mb_x = 0;
4503 ff_init_block_index(s);
4504 memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4505 (1 + s->b8_stride) * sizeof(*s->coded_block));
4507 for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4508 s->mb_x = 0;
4509 ff_init_block_index(s);
4510 for (;s->mb_x < s->mb_width; s->mb_x++) {
4511 int16_t (*block)[64] = v->block[v->cur_blk_idx];
4512 ff_update_block_index(s);
4513 s->dsp.clear_blocks(block[0]);
4514 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4515 s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4516 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4517 s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4519 // do actual MB decoding and displaying
4520 if (v->fieldtx_is_raw)
4521 v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4522 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
4523 if ( v->acpred_is_raw)
4524 v->s.ac_pred = get_bits1(&v->s.gb);
4525 else
4526 v->s.ac_pred = v->acpred_plane[mb_pos];
4528 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4529 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4531 GET_MQUANT();
4533 s->current_picture.f.qscale_table[mb_pos] = mquant;
4534 /* Set DC scale - y and c use the same */
4535 s->y_dc_scale = s->y_dc_scale_table[mquant];
4536 s->c_dc_scale = s->c_dc_scale_table[mquant];
4538 for (k = 0; k < 6; k++) {
4539 val = ((cbp >> (5 - k)) & 1);
4541 if (k < 4) {
4542 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4543 val = val ^ pred;
4544 *coded_val = val;
4546 cbp |= val << (5 - k);
4548 v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4549 v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4551 vc1_decode_i_block_adv(v, block[k], k, val,
4552 (k < 4) ? v->codingset : v->codingset2, mquant);
4554 if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4555 continue;
4556 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
4559 vc1_smooth_overlap_filter_iblk(v);
4560 vc1_put_signed_blocks_clamped(v);
4561 if (v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
4563 if (get_bits_count(&s->gb) > v->bits) {
4564 // TODO: may need modification to handle slice coding
4565 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4566 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4567 get_bits_count(&s->gb), v->bits);
4568 return;
4571 if (!v->s.loop_filter)
4572 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4573 else if (s->mb_y)
4574 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4575 s->first_slice_line = 0;
4578 /* raw bottom MB row */
4579 s->mb_x = 0;
4580 ff_init_block_index(s);
4581 for (;s->mb_x < s->mb_width; s->mb_x++) {
4582 ff_update_block_index(s);
4583 vc1_put_signed_blocks_clamped(v);
4584 if (v->s.loop_filter)
4585 vc1_loop_filter_iblk_delayed(v, v->pq);
4587 if (v->s.loop_filter)
4588 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4589 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4590 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4593 static void vc1_decode_p_blocks(VC1Context *v)
4595 MpegEncContext *s = &v->s;
4596 int apply_loop_filter;
4598 /* select codingmode used for VLC tables selection */
4599 switch (v->c_ac_table_index) {
4600 case 0:
4601 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4602 break;
4603 case 1:
4604 v->codingset = CS_HIGH_MOT_INTRA;
4605 break;
4606 case 2:
4607 v->codingset = CS_MID_RATE_INTRA;
4608 break;
4611 switch (v->c_ac_table_index) {
4612 case 0:
4613 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4614 break;
4615 case 1:
4616 v->codingset2 = CS_HIGH_MOT_INTER;
4617 break;
4618 case 2:
4619 v->codingset2 = CS_MID_RATE_INTER;
4620 break;
4623 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4624 s->first_slice_line = 1;
4625 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4626 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4627 s->mb_x = 0;
4628 ff_init_block_index(s);
4629 for (; s->mb_x < s->mb_width; s->mb_x++) {
4630 ff_update_block_index(s);
4632 if (v->fcm == ILACE_FIELD)
4633 vc1_decode_p_mb_intfi(v);
4634 else if (v->fcm == ILACE_FRAME)
4635 vc1_decode_p_mb_intfr(v);
4636 else vc1_decode_p_mb(v);
4637 if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4638 vc1_apply_p_loop_filter(v);
4639 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4640 // TODO: may need modification to handle slice coding
4641 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4642 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4643 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4644 return;
4647 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4648 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4649 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4650 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4651 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4652 s->first_slice_line = 0;
4654 if (apply_loop_filter) {
4655 s->mb_x = 0;
4656 ff_init_block_index(s);
4657 for (; s->mb_x < s->mb_width; s->mb_x++) {
4658 ff_update_block_index(s);
4659 vc1_apply_p_loop_filter(v);
4662 if (s->end_mb_y >= s->start_mb_y)
4663 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4664 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4665 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4668 static void vc1_decode_b_blocks(VC1Context *v)
4670 MpegEncContext *s = &v->s;
4672 /* select codingmode used for VLC tables selection */
4673 switch (v->c_ac_table_index) {
4674 case 0:
4675 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
4676 break;
4677 case 1:
4678 v->codingset = CS_HIGH_MOT_INTRA;
4679 break;
4680 case 2:
4681 v->codingset = CS_MID_RATE_INTRA;
4682 break;
4685 switch (v->c_ac_table_index) {
4686 case 0:
4687 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
4688 break;
4689 case 1:
4690 v->codingset2 = CS_HIGH_MOT_INTER;
4691 break;
4692 case 2:
4693 v->codingset2 = CS_MID_RATE_INTER;
4694 break;
4697 s->first_slice_line = 1;
4698 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4699 s->mb_x = 0;
4700 ff_init_block_index(s);
4701 for (; s->mb_x < s->mb_width; s->mb_x++) {
4702 ff_update_block_index(s);
4704 if (v->fcm == ILACE_FIELD)
4705 vc1_decode_b_mb_intfi(v);
4706 else
4707 vc1_decode_b_mb(v);
4708 if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4709 // TODO: may need modification to handle slice coding
4710 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4711 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4712 get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4713 return;
4715 if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4717 if (!v->s.loop_filter)
4718 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4719 else if (s->mb_y)
4720 ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4721 s->first_slice_line = 0;
4723 if (v->s.loop_filter)
4724 ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4725 ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4726 (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4729 static void vc1_decode_skip_blocks(VC1Context *v)
4731 MpegEncContext *s = &v->s;
4733 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4734 s->first_slice_line = 1;
4735 for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4736 s->mb_x = 0;
4737 ff_init_block_index(s);
4738 ff_update_block_index(s);
4739 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4740 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4741 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4742 ff_draw_horiz_band(s, s->mb_y * 16, 16);
4743 s->first_slice_line = 0;
4745 s->pict_type = AV_PICTURE_TYPE_P;
4748 void ff_vc1_decode_blocks(VC1Context *v)
4751 v->s.esc3_level_length = 0;
4752 if (v->x8_type) {
4753 ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4754 } else {
4755 v->cur_blk_idx = 0;
4756 v->left_blk_idx = -1;
4757 v->topleft_blk_idx = 1;
4758 v->top_blk_idx = 2;
4759 switch (v->s.pict_type) {
4760 case AV_PICTURE_TYPE_I:
4761 if (v->profile == PROFILE_ADVANCED)
4762 vc1_decode_i_blocks_adv(v);
4763 else
4764 vc1_decode_i_blocks(v);
4765 break;
4766 case AV_PICTURE_TYPE_P:
4767 if (v->p_frame_skipped)
4768 vc1_decode_skip_blocks(v);
4769 else
4770 vc1_decode_p_blocks(v);
4771 break;
4772 case AV_PICTURE_TYPE_B:
4773 if (v->bi_type) {
4774 if (v->profile == PROFILE_ADVANCED)
4775 vc1_decode_i_blocks_adv(v);
4776 else
4777 vc1_decode_i_blocks(v);
4778 } else
4779 vc1_decode_b_blocks(v);
4780 break;
4785 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4787 typedef struct {
4789 * Transform coefficients for both sprites in 16.16 fixed point format,
4790 * in the order they appear in the bitstream:
4791 * x scale
4792 * rotation 1 (unused)
4793 * x offset
4794 * rotation 2 (unused)
4795 * y scale
4796 * y offset
4797 * alpha
4799 int coefs[2][7];
4801 int effect_type, effect_flag;
4802 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
4803 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
4804 } SpriteData;
4806 static inline int get_fp_val(GetBitContext* gb)
4808 return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4811 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4813 c[1] = c[3] = 0;
4815 switch (get_bits(gb, 2)) {
4816 case 0:
4817 c[0] = 1 << 16;
4818 c[2] = get_fp_val(gb);
4819 c[4] = 1 << 16;
4820 break;
4821 case 1:
4822 c[0] = c[4] = get_fp_val(gb);
4823 c[2] = get_fp_val(gb);
4824 break;
4825 case 2:
4826 c[0] = get_fp_val(gb);
4827 c[2] = get_fp_val(gb);
4828 c[4] = get_fp_val(gb);
4829 break;
4830 case 3:
4831 c[0] = get_fp_val(gb);
4832 c[1] = get_fp_val(gb);
4833 c[2] = get_fp_val(gb);
4834 c[3] = get_fp_val(gb);
4835 c[4] = get_fp_val(gb);
4836 break;
4838 c[5] = get_fp_val(gb);
4839 if (get_bits1(gb))
4840 c[6] = get_fp_val(gb);
4841 else
4842 c[6] = 1 << 16;
4845 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4847 AVCodecContext *avctx = v->s.avctx;
4848 int sprite, i;
4850 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4851 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4852 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4853 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4854 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4855 for (i = 0; i < 7; i++)
4856 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4857 sd->coefs[sprite][i] / (1<<16),
4858 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4859 av_log(avctx, AV_LOG_DEBUG, "\n");
4862 skip_bits(gb, 2);
4863 if (sd->effect_type = get_bits_long(gb, 30)) {
4864 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4865 case 7:
4866 vc1_sprite_parse_transform(gb, sd->effect_params1);
4867 break;
4868 case 14:
4869 vc1_sprite_parse_transform(gb, sd->effect_params1);
4870 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4871 break;
4872 default:
4873 for (i = 0; i < sd->effect_pcount1; i++)
4874 sd->effect_params1[i] = get_fp_val(gb);
4876 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4877 // effect 13 is simple alpha blending and matches the opacity above
4878 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4879 for (i = 0; i < sd->effect_pcount1; i++)
4880 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4881 sd->effect_params1[i] / (1 << 16),
4882 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4883 av_log(avctx, AV_LOG_DEBUG, "\n");
4886 sd->effect_pcount2 = get_bits(gb, 16);
4887 if (sd->effect_pcount2 > 10) {
4888 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
4889 return;
4890 } else if (sd->effect_pcount2) {
4891 i = -1;
4892 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
4893 while (++i < sd->effect_pcount2) {
4894 sd->effect_params2[i] = get_fp_val(gb);
4895 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4896 sd->effect_params2[i] / (1 << 16),
4897 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
4899 av_log(avctx, AV_LOG_DEBUG, "\n");
4902 if (sd->effect_flag = get_bits1(gb))
4903 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
4905 if (get_bits_count(gb) >= gb->size_in_bits +
4906 (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
4907 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
4908 if (get_bits_count(gb) < gb->size_in_bits - 8)
4909 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
4912 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
4914 int i, plane, row, sprite;
4915 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
4916 uint8_t* src_h[2][2];
4917 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
4918 int ysub[2];
4919 MpegEncContext *s = &v->s;
4921 for (i = 0; i < 2; i++) {
4922 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
4923 xadv[i] = sd->coefs[i][0];
4924 if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
4925 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
4927 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
4928 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
4930 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
4932 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
4933 int width = v->output_width>>!!plane;
4935 for (row = 0; row < v->output_height>>!!plane; row++) {
4936 uint8_t *dst = v->sprite_output_frame.data[plane] +
4937 v->sprite_output_frame.linesize[plane] * row;
4939 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4940 uint8_t *iplane = s->current_picture.f.data[plane];
4941 int iline = s->current_picture.f.linesize[plane];
4942 int ycoord = yoff[sprite] + yadv[sprite] * row;
4943 int yline = ycoord >> 16;
4944 int next_line;
4945 ysub[sprite] = ycoord & 0xFFFF;
4946 if (sprite) {
4947 iplane = s->last_picture.f.data[plane];
4948 iline = s->last_picture.f.linesize[plane];
4950 next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
4951 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
4952 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
4953 if (ysub[sprite])
4954 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
4955 } else {
4956 if (sr_cache[sprite][0] != yline) {
4957 if (sr_cache[sprite][1] == yline) {
4958 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
4959 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
4960 } else {
4961 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
4962 sr_cache[sprite][0] = yline;
4965 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
4966 v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
4967 iplane + next_line, xoff[sprite],
4968 xadv[sprite], width);
4969 sr_cache[sprite][1] = yline + 1;
4971 src_h[sprite][0] = v->sr_rows[sprite][0];
4972 src_h[sprite][1] = v->sr_rows[sprite][1];
4976 if (!v->two_sprites) {
4977 if (ysub[0]) {
4978 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
4979 } else {
4980 memcpy(dst, src_h[0][0], width);
4982 } else {
4983 if (ysub[0] && ysub[1]) {
4984 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
4985 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
4986 } else if (ysub[0]) {
4987 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
4988 src_h[1][0], alpha, width);
4989 } else if (ysub[1]) {
4990 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
4991 src_h[0][0], (1<<16)-1-alpha, width);
4992 } else {
4993 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
4998 if (!plane) {
4999 for (i = 0; i < 2; i++) {
5000 xoff[i] >>= 1;
5001 yoff[i] >>= 1;
5009 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5011 MpegEncContext *s = &v->s;
5012 AVCodecContext *avctx = s->avctx;
5013 SpriteData sd;
5015 vc1_parse_sprites(v, gb, &sd);
5017 if (!s->current_picture.f.data[0]) {
5018 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5019 return -1;
5022 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5023 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5024 v->two_sprites = 0;
5027 if (v->sprite_output_frame.data[0])
5028 avctx->release_buffer(avctx, &v->sprite_output_frame);
5030 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
5031 v->sprite_output_frame.reference = 0;
5032 if (ff_get_buffer(avctx, &v->sprite_output_frame) < 0) {
5033 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5034 return -1;
5037 vc1_draw_sprites(v, &sd);
5039 return 0;
5042 static void vc1_sprite_flush(AVCodecContext *avctx)
5044 VC1Context *v = avctx->priv_data;
5045 MpegEncContext *s = &v->s;
5046 AVFrame *f = &s->current_picture.f;
5047 int plane, i;
5049 /* Windows Media Image codecs have a convergence interval of two keyframes.
5050 Since we can't enforce it, clear to black the missing sprite. This is
5051 wrong but it looks better than doing nothing. */
5053 if (f->data[0])
5054 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5055 for (i = 0; i < v->sprite_height>>!!plane; i++)
5056 memset(f->data[plane] + i * f->linesize[plane],
5057 plane ? 128 : 0, f->linesize[plane]);
5060 #endif
5062 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
5064 MpegEncContext *s = &v->s;
5065 int i;
5067 /* Allocate mb bitplanes */
5068 v->mv_type_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5069 v->direct_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5070 v->forward_mb_plane = av_malloc (s->mb_stride * s->mb_height);
5071 v->fieldtx_plane = av_mallocz(s->mb_stride * s->mb_height);
5072 v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5073 v->over_flags_plane = av_malloc (s->mb_stride * s->mb_height);
5075 v->n_allocated_blks = s->mb_width + 2;
5076 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5077 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5078 v->cbp = v->cbp_base + s->mb_stride;
5079 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5080 v->ttblk = v->ttblk_base + s->mb_stride;
5081 v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5082 v->is_intra = v->is_intra_base + s->mb_stride;
5083 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5084 v->luma_mv = v->luma_mv_base + s->mb_stride;
5086 /* allocate block type info in that way so it could be used with s->block_index[] */
5087 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5088 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5089 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5090 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5092 /* allocate memory to store block level MV info */
5093 v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5094 v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5095 v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5096 v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5097 v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5098 v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5099 v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5100 v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5101 v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5102 v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5103 v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5105 /* Init coded blocks info */
5106 if (v->profile == PROFILE_ADVANCED) {
5107 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5108 // return -1;
5109 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5110 // return -1;
5113 ff_intrax8_common_init(&v->x8,s);
5115 if (s->avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || s->avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5116 for (i = 0; i < 4; i++)
5117 if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5120 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5121 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5122 !v->mb_type_base)
5123 return -1;
5125 return 0;
5128 av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
5130 int i;
5131 for (i = 0; i < 64; i++) {
5132 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5133 v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5134 v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5135 v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5136 v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5137 v->zzi_8x8[i] = transpose(ff_vc1_adv_interlaced_8x8_zz[i]);
5139 v->left_blk_sh = 0;
5140 v->top_blk_sh = 3;
5143 /** Initialize a VC1/WMV3 decoder
5144 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5145 * @todo TODO: Decypher remaining bits in extra_data
5147 static av_cold int vc1_decode_init(AVCodecContext *avctx)
5149 VC1Context *v = avctx->priv_data;
5150 MpegEncContext *s = &v->s;
5151 GetBitContext gb;
5153 /* save the container output size for WMImage */
5154 v->output_width = avctx->width;
5155 v->output_height = avctx->height;
5157 if (!avctx->extradata_size || !avctx->extradata)
5158 return -1;
5159 if (!(avctx->flags & CODEC_FLAG_GRAY))
5160 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5161 else
5162 avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5163 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5164 v->s.avctx = avctx;
5165 avctx->flags |= CODEC_FLAG_EMU_EDGE;
5166 v->s.flags |= CODEC_FLAG_EMU_EDGE;
5168 if (ff_vc1_init_common(v) < 0)
5169 return -1;
5170 ff_h264chroma_init(&v->h264chroma, 8);
5171 ff_vc1dsp_init(&v->vc1dsp);
5173 if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5174 int count = 0;
5176 // looks like WMV3 has a sequence header stored in the extradata
5177 // advanced sequence header may be before the first frame
5178 // the last byte of the extradata is a version number, 1 for the
5179 // samples we can decode
5181 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5183 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5184 return -1;
5186 count = avctx->extradata_size*8 - get_bits_count(&gb);
5187 if (count > 0) {
5188 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5189 count, get_bits(&gb, count));
5190 } else if (count < 0) {
5191 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5193 } else { // VC1/WVC1/WVP2
5194 const uint8_t *start = avctx->extradata;
5195 uint8_t *end = avctx->extradata + avctx->extradata_size;
5196 const uint8_t *next;
5197 int size, buf2_size;
5198 uint8_t *buf2 = NULL;
5199 int seq_initialized = 0, ep_initialized = 0;
5201 if (avctx->extradata_size < 16) {
5202 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5203 return -1;
5206 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
5207 start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5208 next = start;
5209 for (; next < end; start = next) {
5210 next = find_next_marker(start + 4, end);
5211 size = next - start - 4;
5212 if (size <= 0)
5213 continue;
5214 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5215 init_get_bits(&gb, buf2, buf2_size * 8);
5216 switch (AV_RB32(start)) {
5217 case VC1_CODE_SEQHDR:
5218 if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5219 av_free(buf2);
5220 return -1;
5222 seq_initialized = 1;
5223 break;
5224 case VC1_CODE_ENTRYPOINT:
5225 if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5226 av_free(buf2);
5227 return -1;
5229 ep_initialized = 1;
5230 break;
5233 av_free(buf2);
5234 if (!seq_initialized || !ep_initialized) {
5235 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5236 return -1;
5238 v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5241 avctx->profile = v->profile;
5242 if (v->profile == PROFILE_ADVANCED)
5243 avctx->level = v->level;
5245 avctx->has_b_frames = !!avctx->max_b_frames;
5247 s->mb_width = (avctx->coded_width + 15) >> 4;
5248 s->mb_height = (avctx->coded_height + 15) >> 4;
5250 if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5251 ff_vc1_init_transposed_scantables(v);
5252 } else {
5253 memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5254 v->left_blk_sh = 3;
5255 v->top_blk_sh = 0;
5258 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5259 v->sprite_width = avctx->coded_width;
5260 v->sprite_height = avctx->coded_height;
5262 avctx->coded_width = avctx->width = v->output_width;
5263 avctx->coded_height = avctx->height = v->output_height;
5265 // prevent 16.16 overflows
5266 if (v->sprite_width > 1 << 14 ||
5267 v->sprite_height > 1 << 14 ||
5268 v->output_width > 1 << 14 ||
5269 v->output_height > 1 << 14) return -1;
5271 return 0;
5274 /** Close a VC1/WMV3 decoder
5275 * @warning Initial try at using MpegEncContext stuff
5277 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
5279 VC1Context *v = avctx->priv_data;
5280 int i;
5282 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5283 && v->sprite_output_frame.data[0])
5284 avctx->release_buffer(avctx, &v->sprite_output_frame);
5285 for (i = 0; i < 4; i++)
5286 av_freep(&v->sr_rows[i >> 1][i & 1]);
5287 av_freep(&v->hrd_rate);
5288 av_freep(&v->hrd_buffer);
5289 ff_MPV_common_end(&v->s);
5290 av_freep(&v->mv_type_mb_plane);
5291 av_freep(&v->direct_mb_plane);
5292 av_freep(&v->forward_mb_plane);
5293 av_freep(&v->fieldtx_plane);
5294 av_freep(&v->acpred_plane);
5295 av_freep(&v->over_flags_plane);
5296 av_freep(&v->mb_type_base);
5297 av_freep(&v->blk_mv_type_base);
5298 av_freep(&v->mv_f_base);
5299 av_freep(&v->mv_f_last_base);
5300 av_freep(&v->mv_f_next_base);
5301 av_freep(&v->block);
5302 av_freep(&v->cbp_base);
5303 av_freep(&v->ttblk_base);
5304 av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5305 av_freep(&v->luma_mv_base);
5306 ff_intrax8_common_end(&v->x8);
5307 return 0;
5311 /** Decode a VC1/WMV3 frame
5312 * @todo TODO: Handle VC-1 IDUs (Transport level?)
5314 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5315 int *got_frame, AVPacket *avpkt)
5317 const uint8_t *buf = avpkt->data;
5318 int buf_size = avpkt->size, n_slices = 0, i;
5319 VC1Context *v = avctx->priv_data;
5320 MpegEncContext *s = &v->s;
5321 AVFrame *pict = data;
5322 uint8_t *buf2 = NULL;
5323 const uint8_t *buf_start = buf;
5324 int mb_height, n_slices1;
5325 struct {
5326 uint8_t *buf;
5327 GetBitContext gb;
5328 int mby_start;
5329 } *slices = NULL, *tmp;
5331 /* no supplementary picture */
5332 if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5333 /* special case for last picture */
5334 if (s->low_delay == 0 && s->next_picture_ptr) {
5335 *pict = s->next_picture_ptr->f;
5336 s->next_picture_ptr = NULL;
5338 *got_frame = 1;
5341 return 0;
5344 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5345 if (v->profile < PROFILE_ADVANCED)
5346 avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5347 else
5348 avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5351 //for advanced profile we may need to parse and unescape data
5352 if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5353 int buf_size2 = 0;
5354 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5356 if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5357 const uint8_t *start, *end, *next;
5358 int size;
5360 next = buf;
5361 for (start = buf, end = buf + buf_size; next < end; start = next) {
5362 next = find_next_marker(start + 4, end);
5363 size = next - start - 4;
5364 if (size <= 0) continue;
5365 switch (AV_RB32(start)) {
5366 case VC1_CODE_FRAME:
5367 if (avctx->hwaccel ||
5368 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5369 buf_start = start;
5370 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5371 break;
5372 case VC1_CODE_FIELD: {
5373 int buf_size3;
5374 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5375 if (!tmp)
5376 goto err;
5377 slices = tmp;
5378 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5379 if (!slices[n_slices].buf)
5380 goto err;
5381 buf_size3 = vc1_unescape_buffer(start + 4, size,
5382 slices[n_slices].buf);
5383 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5384 buf_size3 << 3);
5385 /* assuming that the field marker is at the exact middle,
5386 hope it's correct */
5387 slices[n_slices].mby_start = s->mb_height >> 1;
5388 n_slices1 = n_slices - 1; // index of the last slice of the first field
5389 n_slices++;
5390 break;
5392 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5393 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5394 init_get_bits(&s->gb, buf2, buf_size2 * 8);
5395 ff_vc1_decode_entry_point(avctx, v, &s->gb);
5396 break;
5397 case VC1_CODE_SLICE: {
5398 int buf_size3;
5399 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5400 if (!tmp)
5401 goto err;
5402 slices = tmp;
5403 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5404 if (!slices[n_slices].buf)
5405 goto err;
5406 buf_size3 = vc1_unescape_buffer(start + 4, size,
5407 slices[n_slices].buf);
5408 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5409 buf_size3 << 3);
5410 slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5411 n_slices++;
5412 break;
5416 } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5417 const uint8_t *divider;
5418 int buf_size3;
5420 divider = find_next_marker(buf, buf + buf_size);
5421 if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5422 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5423 goto err;
5424 } else { // found field marker, unescape second field
5425 tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5426 if (!tmp)
5427 goto err;
5428 slices = tmp;
5429 slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5430 if (!slices[n_slices].buf)
5431 goto err;
5432 buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5433 init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5434 buf_size3 << 3);
5435 slices[n_slices].mby_start = s->mb_height >> 1;
5436 n_slices1 = n_slices - 1;
5437 n_slices++;
5439 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5440 } else {
5441 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5443 init_get_bits(&s->gb, buf2, buf_size2*8);
5444 } else
5445 init_get_bits(&s->gb, buf, buf_size*8);
5447 if (v->res_sprite) {
5448 v->new_sprite = !get_bits1(&s->gb);
5449 v->two_sprites = get_bits1(&s->gb);
5450 /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5451 we're using the sprite compositor. These are intentionally kept separate
5452 so you can get the raw sprites by using the wmv3 decoder for WMVP or
5453 the vc1 one for WVP2 */
5454 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5455 if (v->new_sprite) {
5456 // switch AVCodecContext parameters to those of the sprites
5457 avctx->width = avctx->coded_width = v->sprite_width;
5458 avctx->height = avctx->coded_height = v->sprite_height;
5459 } else {
5460 goto image;
5465 if (s->context_initialized &&
5466 (s->width != avctx->coded_width ||
5467 s->height != avctx->coded_height)) {
5468 ff_vc1_decode_end(avctx);
5471 if (!s->context_initialized) {
5472 if (ff_msmpeg4_decode_init(avctx) < 0 || ff_vc1_decode_init_alloc_tables(v) < 0)
5473 goto err;
5475 s->low_delay = !avctx->has_b_frames || v->res_sprite;
5477 if (v->profile == PROFILE_ADVANCED) {
5478 s->h_edge_pos = avctx->coded_width;
5479 s->v_edge_pos = avctx->coded_height;
5483 /* We need to set current_picture_ptr before reading the header,
5484 * otherwise we cannot store anything in there. */
5485 if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5486 int i = ff_find_unused_picture(s, 0);
5487 if (i < 0)
5488 goto err;
5489 s->current_picture_ptr = &s->picture[i];
5492 // do parse frame header
5493 v->pic_header_flag = 0;
5494 if (v->profile < PROFILE_ADVANCED) {
5495 if (ff_vc1_parse_frame_header(v, &s->gb) == -1) {
5496 goto err;
5498 } else {
5499 if (ff_vc1_parse_frame_header_adv(v, &s->gb) == -1) {
5500 goto err;
5504 if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5505 && s->pict_type != AV_PICTURE_TYPE_I) {
5506 av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5507 goto err;
5510 // process pulldown flags
5511 s->current_picture_ptr->f.repeat_pict = 0;
5512 // Pulldown flags are only valid when 'broadcast' has been set.
5513 // So ticks_per_frame will be 2
5514 if (v->rff) {
5515 // repeat field
5516 s->current_picture_ptr->f.repeat_pict = 1;
5517 } else if (v->rptfrm) {
5518 // repeat frames
5519 s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5522 // for skipping the frame
5523 s->current_picture.f.pict_type = s->pict_type;
5524 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
5526 /* skip B-frames if we don't have reference frames */
5527 if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5528 goto err;
5530 if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5531 (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5532 avctx->skip_frame >= AVDISCARD_ALL) {
5533 goto end;
5536 if (s->next_p_frame_damaged) {
5537 if (s->pict_type == AV_PICTURE_TYPE_B)
5538 goto end;
5539 else
5540 s->next_p_frame_damaged = 0;
5543 if (ff_MPV_frame_start(s, avctx) < 0) {
5544 goto err;
5547 s->me.qpel_put = s->dsp.put_qpel_pixels_tab;
5548 s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab;
5550 if ((CONFIG_VC1_VDPAU_DECODER)
5551 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5552 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5553 else if (avctx->hwaccel) {
5554 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
5555 goto err;
5556 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5557 goto err;
5558 if (avctx->hwaccel->end_frame(avctx) < 0)
5559 goto err;
5560 } else {
5561 ff_er_frame_start(s);
5563 v->bits = buf_size * 8;
5564 v->end_mb_x = s->mb_width;
5565 if (v->field_mode) {
5566 uint8_t *tmp[2];
5567 s->current_picture.f.linesize[0] <<= 1;
5568 s->current_picture.f.linesize[1] <<= 1;
5569 s->current_picture.f.linesize[2] <<= 1;
5570 s->linesize <<= 1;
5571 s->uvlinesize <<= 1;
5572 tmp[0] = v->mv_f_last[0];
5573 tmp[1] = v->mv_f_last[1];
5574 v->mv_f_last[0] = v->mv_f_next[0];
5575 v->mv_f_last[1] = v->mv_f_next[1];
5576 v->mv_f_next[0] = v->mv_f[0];
5577 v->mv_f_next[1] = v->mv_f[1];
5578 v->mv_f[0] = tmp[0];
5579 v->mv_f[1] = tmp[1];
5581 mb_height = s->mb_height >> v->field_mode;
5582 for (i = 0; i <= n_slices; i++) {
5583 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5584 if (v->field_mode <= 0) {
5585 av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
5586 "picture boundary (%d >= %d)\n", i,
5587 slices[i - 1].mby_start, mb_height);
5588 continue;
5590 v->second_field = 1;
5591 v->blocks_off = s->mb_width * s->mb_height << 1;
5592 v->mb_off = s->mb_stride * s->mb_height >> 1;
5593 } else {
5594 v->second_field = 0;
5595 v->blocks_off = 0;
5596 v->mb_off = 0;
5598 if (i) {
5599 v->pic_header_flag = 0;
5600 if (v->field_mode && i == n_slices1 + 2) {
5601 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5602 av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
5603 continue;
5605 } else if (get_bits1(&s->gb)) {
5606 v->pic_header_flag = 1;
5607 if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5608 av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
5609 continue;
5613 s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5614 if (!v->field_mode || v->second_field)
5615 s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5616 else
5617 s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5618 ff_vc1_decode_blocks(v);
5619 if (i != n_slices)
5620 s->gb = slices[i].gb;
5622 if (v->field_mode) {
5623 v->second_field = 0;
5624 if (s->pict_type == AV_PICTURE_TYPE_B) {
5625 memcpy(v->mv_f_base, v->mv_f_next_base,
5626 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5628 s->current_picture.f.linesize[0] >>= 1;
5629 s->current_picture.f.linesize[1] >>= 1;
5630 s->current_picture.f.linesize[2] >>= 1;
5631 s->linesize >>= 1;
5632 s->uvlinesize >>= 1;
5634 av_dlog(s->avctx, "Consumed %i/%i bits\n",
5635 get_bits_count(&s->gb), s->gb.size_in_bits);
5636 // if (get_bits_count(&s->gb) > buf_size * 8)
5637 // return -1;
5638 ff_er_frame_end(s);
5641 ff_MPV_frame_end(s);
5643 if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5644 image:
5645 avctx->width = avctx->coded_width = v->output_width;
5646 avctx->height = avctx->coded_height = v->output_height;
5647 if (avctx->skip_frame >= AVDISCARD_NONREF)
5648 goto end;
5649 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5650 if (vc1_decode_sprites(v, &s->gb))
5651 goto err;
5652 #endif
5653 *pict = v->sprite_output_frame;
5654 *got_frame = 1;
5655 } else {
5656 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5657 *pict = s->current_picture_ptr->f;
5658 } else if (s->last_picture_ptr != NULL) {
5659 *pict = s->last_picture_ptr->f;
5661 if (s->last_picture_ptr || s->low_delay) {
5662 *got_frame = 1;
5663 ff_print_debug_info(s, pict);
5667 end:
5668 av_free(buf2);
5669 for (i = 0; i < n_slices; i++)
5670 av_free(slices[i].buf);
5671 av_free(slices);
5672 return buf_size;
5674 err:
5675 av_free(buf2);
5676 for (i = 0; i < n_slices; i++)
5677 av_free(slices[i].buf);
5678 av_free(slices);
5679 return -1;
5683 static const AVProfile profiles[] = {
5684 { FF_PROFILE_VC1_SIMPLE, "Simple" },
5685 { FF_PROFILE_VC1_MAIN, "Main" },
5686 { FF_PROFILE_VC1_COMPLEX, "Complex" },
5687 { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5688 { FF_PROFILE_UNKNOWN },
5691 AVCodec ff_vc1_decoder = {
5692 .name = "vc1",
5693 .type = AVMEDIA_TYPE_VIDEO,
5694 .id = AV_CODEC_ID_VC1,
5695 .priv_data_size = sizeof(VC1Context),
5696 .init = vc1_decode_init,
5697 .close = ff_vc1_decode_end,
5698 .decode = vc1_decode_frame,
5699 .flush = ff_mpeg_flush,
5700 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5701 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5702 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5703 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5706 #if CONFIG_WMV3_DECODER
5707 AVCodec ff_wmv3_decoder = {
5708 .name = "wmv3",
5709 .type = AVMEDIA_TYPE_VIDEO,
5710 .id = AV_CODEC_ID_WMV3,
5711 .priv_data_size = sizeof(VC1Context),
5712 .init = vc1_decode_init,
5713 .close = ff_vc1_decode_end,
5714 .decode = vc1_decode_frame,
5715 .flush = ff_mpeg_flush,
5716 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5717 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5718 .pix_fmts = ff_hwaccel_pixfmt_list_420,
5719 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5721 #endif
5723 #if CONFIG_WMV3_VDPAU_DECODER
5724 AVCodec ff_wmv3_vdpau_decoder = {
5725 .name = "wmv3_vdpau",
5726 .type = AVMEDIA_TYPE_VIDEO,
5727 .id = AV_CODEC_ID_WMV3,
5728 .priv_data_size = sizeof(VC1Context),
5729 .init = vc1_decode_init,
5730 .close = ff_vc1_decode_end,
5731 .decode = vc1_decode_frame,
5732 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5733 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5734 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
5735 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5737 #endif
5739 #if CONFIG_VC1_VDPAU_DECODER
5740 AVCodec ff_vc1_vdpau_decoder = {
5741 .name = "vc1_vdpau",
5742 .type = AVMEDIA_TYPE_VIDEO,
5743 .id = AV_CODEC_ID_VC1,
5744 .priv_data_size = sizeof(VC1Context),
5745 .init = vc1_decode_init,
5746 .close = ff_vc1_decode_end,
5747 .decode = vc1_decode_frame,
5748 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
5749 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5750 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
5751 .profiles = NULL_IF_CONFIG_SMALL(profiles)
5753 #endif
5755 #if CONFIG_WMV3IMAGE_DECODER
5756 AVCodec ff_wmv3image_decoder = {
5757 .name = "wmv3image",
5758 .type = AVMEDIA_TYPE_VIDEO,
5759 .id = AV_CODEC_ID_WMV3IMAGE,
5760 .priv_data_size = sizeof(VC1Context),
5761 .init = vc1_decode_init,
5762 .close = ff_vc1_decode_end,
5763 .decode = vc1_decode_frame,
5764 .capabilities = CODEC_CAP_DR1,
5765 .flush = vc1_sprite_flush,
5766 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5767 .pix_fmts = ff_pixfmt_list_420
5769 #endif
5771 #if CONFIG_VC1IMAGE_DECODER
5772 AVCodec ff_vc1image_decoder = {
5773 .name = "vc1image",
5774 .type = AVMEDIA_TYPE_VIDEO,
5775 .id = AV_CODEC_ID_VC1IMAGE,
5776 .priv_data_size = sizeof(VC1Context),
5777 .init = vc1_decode_init,
5778 .close = ff_vc1_decode_end,
5779 .decode = vc1_decode_frame,
5780 .capabilities = CODEC_CAP_DR1,
5781 .flush = vc1_sprite_flush,
5782 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5783 .pix_fmts = ff_pixfmt_list_420
5785 #endif