Add a @todo with a comment from Kostya so we don't forget to optimize that at
[ffmpeg-lucabe.git] / libavcodec / vc1.c
blob229c8850600a9a1ed1ff94d96808614fe3bc1ac7
1 /*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006-2007 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 /**
24 * @file libavcodec/vc1.c
25 * VC-1 and WMV3 decoder
28 #include "internal.h"
29 #include "dsputil.h"
30 #include "avcodec.h"
31 #include "mpegvideo.h"
32 #include "vc1.h"
33 #include "vc1data.h"
34 #include "vc1acdata.h"
35 #include "msmpeg4data.h"
36 #include "unary.h"
37 #include "simple_idct.h"
38 #include "mathops.h"
39 #include "vdpau_internal.h"
41 #undef NDEBUG
42 #include <assert.h>
44 #define MB_INTRA_VLC_BITS 9
45 #define DC_VLC_BITS 9
46 #define AC_VLC_BITS 9
47 static const uint16_t table_mb_intra[64][2];
50 /**
51 * Init VC-1 specific tables and VC1Context members
52 * @param v The VC1Context to initialize
53 * @return Status
55 static int vc1_init_common(VC1Context *v)
57 static int done = 0;
58 int i = 0;
60 v->hrd_rate = v->hrd_buffer = NULL;
62 /* VLC tables */
63 if(!done)
65 done = 1;
66 init_vlc(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
67 ff_vc1_bfraction_bits, 1, 1,
68 ff_vc1_bfraction_codes, 1, 1, 1);
69 init_vlc(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
70 ff_vc1_norm2_bits, 1, 1,
71 ff_vc1_norm2_codes, 1, 1, 1);
72 init_vlc(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
73 ff_vc1_norm6_bits, 1, 1,
74 ff_vc1_norm6_codes, 2, 2, 1);
75 init_vlc(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
76 ff_vc1_imode_bits, 1, 1,
77 ff_vc1_imode_codes, 1, 1, 1);
78 for (i=0; i<3; i++)
80 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
81 ff_vc1_ttmb_bits[i], 1, 1,
82 ff_vc1_ttmb_codes[i], 2, 2, 1);
83 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
84 ff_vc1_ttblk_bits[i], 1, 1,
85 ff_vc1_ttblk_codes[i], 1, 1, 1);
86 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
87 ff_vc1_subblkpat_bits[i], 1, 1,
88 ff_vc1_subblkpat_codes[i], 1, 1, 1);
90 for(i=0; i<4; i++)
92 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
93 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
94 ff_vc1_4mv_block_pattern_codes[i], 1, 1, 1);
95 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
96 ff_vc1_cbpcy_p_bits[i], 1, 1,
97 ff_vc1_cbpcy_p_codes[i], 2, 2, 1);
98 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
99 ff_vc1_mv_diff_bits[i], 1, 1,
100 ff_vc1_mv_diff_codes[i], 2, 2, 1);
102 for(i=0; i<8; i++)
103 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
104 &vc1_ac_tables[i][0][1], 8, 4,
105 &vc1_ac_tables[i][0][0], 8, 4, 1);
106 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
107 &ff_msmp4_mb_i_table[0][1], 4, 2,
108 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
111 /* Other defaults */
112 v->pq = -1;
113 v->mvrange = 0; /* 7.1.1.18, p80 */
115 return 0;
118 /***********************************************************************/
120 * @defgroup vc1bitplane VC-1 Bitplane decoding
121 * @see 8.7, p56
122 * @{
126 * Imode types
127 * @{
129 enum Imode {
130 IMODE_RAW,
131 IMODE_NORM2,
132 IMODE_DIFF2,
133 IMODE_NORM6,
134 IMODE_DIFF6,
135 IMODE_ROWSKIP,
136 IMODE_COLSKIP
138 /** @} */ //imode defines
140 /** Decode rows by checking if they are skipped
141 * @param plane Buffer to store decoded bits
142 * @param[in] width Width of this buffer
143 * @param[in] height Height of this buffer
144 * @param[in] stride of this buffer
146 static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
147 int x, y;
149 for (y=0; y<height; y++){
150 if (!get_bits1(gb)) //rowskip
151 memset(plane, 0, width);
152 else
153 for (x=0; x<width; x++)
154 plane[x] = get_bits1(gb);
155 plane += stride;
159 /** Decode columns by checking if they are skipped
160 * @param plane Buffer to store decoded bits
161 * @param[in] width Width of this buffer
162 * @param[in] height Height of this buffer
163 * @param[in] stride of this buffer
164 * @todo FIXME: Optimize
166 static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
167 int x, y;
169 for (x=0; x<width; x++){
170 if (!get_bits1(gb)) //colskip
171 for (y=0; y<height; y++)
172 plane[y*stride] = 0;
173 else
174 for (y=0; y<height; y++)
175 plane[y*stride] = get_bits1(gb);
176 plane ++;
180 /** Decode a bitplane's bits
181 * @param data bitplane where to store the decode bits
182 * @param[out] raw_flag pointer to the flag indicating that this bitplane is not coded explicitly
183 * @param v VC-1 context for bit reading and logging
184 * @return Status
185 * @todo FIXME: Optimize
187 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
189 GetBitContext *gb = &v->s.gb;
191 int imode, x, y, code, offset;
192 uint8_t invert, *planep = data;
193 int width, height, stride;
195 width = v->s.mb_width;
196 height = v->s.mb_height;
197 stride = v->s.mb_stride;
198 invert = get_bits1(gb);
199 imode = get_vlc2(gb, ff_vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
201 *raw_flag = 0;
202 switch (imode)
204 case IMODE_RAW:
205 //Data is actually read in the MB layer (same for all tests == "raw")
206 *raw_flag = 1; //invert ignored
207 return invert;
208 case IMODE_DIFF2:
209 case IMODE_NORM2:
210 if ((height * width) & 1)
212 *planep++ = get_bits1(gb);
213 offset = 1;
215 else offset = 0;
216 // decode bitplane as one long line
217 for (y = offset; y < height * width; y += 2) {
218 code = get_vlc2(gb, ff_vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
219 *planep++ = code & 1;
220 offset++;
221 if(offset == width) {
222 offset = 0;
223 planep += stride - width;
225 *planep++ = code >> 1;
226 offset++;
227 if(offset == width) {
228 offset = 0;
229 planep += stride - width;
232 break;
233 case IMODE_DIFF6:
234 case IMODE_NORM6:
235 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
236 for(y = 0; y < height; y+= 3) {
237 for(x = width & 1; x < width; x += 2) {
238 code = get_vlc2(gb, ff_vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
239 if(code < 0){
240 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
241 return -1;
243 planep[x + 0] = (code >> 0) & 1;
244 planep[x + 1] = (code >> 1) & 1;
245 planep[x + 0 + stride] = (code >> 2) & 1;
246 planep[x + 1 + stride] = (code >> 3) & 1;
247 planep[x + 0 + stride * 2] = (code >> 4) & 1;
248 planep[x + 1 + stride * 2] = (code >> 5) & 1;
250 planep += stride * 3;
252 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
253 } else { // 3x2
254 planep += (height & 1) * stride;
255 for(y = height & 1; y < height; y += 2) {
256 for(x = width % 3; x < width; x += 3) {
257 code = get_vlc2(gb, ff_vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
258 if(code < 0){
259 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
260 return -1;
262 planep[x + 0] = (code >> 0) & 1;
263 planep[x + 1] = (code >> 1) & 1;
264 planep[x + 2] = (code >> 2) & 1;
265 planep[x + 0 + stride] = (code >> 3) & 1;
266 planep[x + 1 + stride] = (code >> 4) & 1;
267 planep[x + 2 + stride] = (code >> 5) & 1;
269 planep += stride * 2;
271 x = width % 3;
272 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
273 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
275 break;
276 case IMODE_ROWSKIP:
277 decode_rowskip(data, width, height, stride, &v->s.gb);
278 break;
279 case IMODE_COLSKIP:
280 decode_colskip(data, width, height, stride, &v->s.gb);
281 break;
282 default: break;
285 /* Applying diff operator */
286 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
288 planep = data;
289 planep[0] ^= invert;
290 for (x=1; x<width; x++)
291 planep[x] ^= planep[x-1];
292 for (y=1; y<height; y++)
294 planep += stride;
295 planep[0] ^= planep[-stride];
296 for (x=1; x<width; x++)
298 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
299 else planep[x] ^= planep[x-1];
303 else if (invert)
305 planep = data;
306 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
308 return (imode<<1) + invert;
311 /** @} */ //Bitplane group
313 #define FILTSIGN(a) ((a) >= 0 ? 1 : -1)
315 * VC-1 in-loop deblocking filter for one line
316 * @param src source block type
317 * @param stride block stride
318 * @param pq block quantizer
319 * @return whether other 3 pairs should be filtered or not
320 * @see 8.6
322 static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){
323 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
325 int a0 = (2*(src[-2*stride] - src[ 1*stride]) - 5*(src[-1*stride] - src[ 0*stride]) + 4) >> 3;
326 int a0_sign = a0 >> 31; /* Store sign */
327 a0 = (a0 ^ a0_sign) - a0_sign; /* a0 = FFABS(a0); */
328 if(a0 < pq){
329 int a1 = FFABS((2*(src[-4*stride] - src[-1*stride]) - 5*(src[-3*stride] - src[-2*stride]) + 4) >> 3);
330 int a2 = FFABS((2*(src[ 0*stride] - src[ 3*stride]) - 5*(src[ 1*stride] - src[ 2*stride]) + 4) >> 3);
331 if(a1 < a0 || a2 < a0){
332 int clip = src[-1*stride] - src[ 0*stride];
333 int clip_sign = clip >> 31;
334 clip = ((clip ^ clip_sign) - clip_sign)>>1;
335 if(clip){
336 int a3 = FFMIN(a1, a2);
337 int d = 5 * (a3 - a0);
338 int d_sign = (d >> 31);
339 d = ((d ^ d_sign) - d_sign) >> 3;
340 d_sign ^= a0_sign;
342 if( d_sign ^ clip_sign )
343 d = 0;
344 else{
345 d = FFMIN(d, clip);
346 d = (d ^ d_sign) - d_sign; /* Restore sign */
347 src[-1*stride] = cm[src[-1*stride] - d];
348 src[ 0*stride] = cm[src[ 0*stride] + d];
350 return 1;
354 return 0;
358 * VC-1 in-loop deblocking filter
359 * @param src source block type
360 * @param step distance between horizontally adjacent elements
361 * @param stride distance between vertically adjacent elements
362 * @param len edge length to filter (4 or 8 pixels)
363 * @param pq block quantizer
364 * @see 8.6
366 static void vc1_loop_filter(uint8_t* src, int step, int stride, int len, int pq)
368 int i;
369 int filt3;
371 for(i = 0; i < len; i += 4){
372 filt3 = vc1_filter_line(src + 2*step, stride, pq);
373 if(filt3){
374 vc1_filter_line(src + 0*step, stride, pq);
375 vc1_filter_line(src + 1*step, stride, pq);
376 vc1_filter_line(src + 3*step, stride, pq);
378 src += step * 4;
382 static void vc1_loop_filter_iblk(MpegEncContext *s, int pq)
384 int i, j;
385 if(!s->first_slice_line)
386 vc1_loop_filter(s->dest[0], 1, s->linesize, 16, pq);
387 vc1_loop_filter(s->dest[0] + 8*s->linesize, 1, s->linesize, 16, pq);
388 for(i = !s->mb_x*8; i < 16; i += 8)
389 vc1_loop_filter(s->dest[0] + i, s->linesize, 1, 16, pq);
390 for(j = 0; j < 2; j++){
391 if(!s->first_slice_line)
392 vc1_loop_filter(s->dest[j+1], 1, s->uvlinesize, 8, pq);
393 if(s->mb_x)
394 vc1_loop_filter(s->dest[j+1], s->uvlinesize, 1, 8, pq);
398 /***********************************************************************/
399 /** VOP Dquant decoding
400 * @param v VC-1 Context
402 static int vop_dquant_decoding(VC1Context *v)
404 GetBitContext *gb = &v->s.gb;
405 int pqdiff;
407 //variable size
408 if (v->dquant == 2)
410 pqdiff = get_bits(gb, 3);
411 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
412 else v->altpq = v->pq + pqdiff + 1;
414 else
416 v->dquantfrm = get_bits1(gb);
417 if ( v->dquantfrm )
419 v->dqprofile = get_bits(gb, 2);
420 switch (v->dqprofile)
422 case DQPROFILE_SINGLE_EDGE:
423 case DQPROFILE_DOUBLE_EDGES:
424 v->dqsbedge = get_bits(gb, 2);
425 break;
426 case DQPROFILE_ALL_MBS:
427 v->dqbilevel = get_bits1(gb);
428 if(!v->dqbilevel)
429 v->halfpq = 0;
430 default: break; //Forbidden ?
432 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
434 pqdiff = get_bits(gb, 3);
435 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
436 else v->altpq = v->pq + pqdiff + 1;
440 return 0;
443 /** Put block onto picture
445 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
447 uint8_t *Y;
448 int ys, us, vs;
449 DSPContext *dsp = &v->s.dsp;
451 if(v->rangeredfrm) {
452 int i, j, k;
453 for(k = 0; k < 6; k++)
454 for(j = 0; j < 8; j++)
455 for(i = 0; i < 8; i++)
456 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
459 ys = v->s.current_picture.linesize[0];
460 us = v->s.current_picture.linesize[1];
461 vs = v->s.current_picture.linesize[2];
462 Y = v->s.dest[0];
464 dsp->put_pixels_clamped(block[0], Y, ys);
465 dsp->put_pixels_clamped(block[1], Y + 8, ys);
466 Y += ys * 8;
467 dsp->put_pixels_clamped(block[2], Y, ys);
468 dsp->put_pixels_clamped(block[3], Y + 8, ys);
470 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
471 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
472 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
476 /** Do motion compensation over 1 macroblock
477 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
479 static void vc1_mc_1mv(VC1Context *v, int dir)
481 MpegEncContext *s = &v->s;
482 DSPContext *dsp = &v->s.dsp;
483 uint8_t *srcY, *srcU, *srcV;
484 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
486 if(!v->s.last_picture.data[0])return;
488 mx = s->mv[dir][0][0];
489 my = s->mv[dir][0][1];
491 // store motion vectors for further use in B frames
492 if(s->pict_type == FF_P_TYPE) {
493 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
494 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
496 uvmx = (mx + ((mx & 3) == 3)) >> 1;
497 uvmy = (my + ((my & 3) == 3)) >> 1;
498 if(v->fastuvmc) {
499 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
500 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
502 if(!dir) {
503 srcY = s->last_picture.data[0];
504 srcU = s->last_picture.data[1];
505 srcV = s->last_picture.data[2];
506 } else {
507 srcY = s->next_picture.data[0];
508 srcU = s->next_picture.data[1];
509 srcV = s->next_picture.data[2];
512 src_x = s->mb_x * 16 + (mx >> 2);
513 src_y = s->mb_y * 16 + (my >> 2);
514 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
515 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
517 if(v->profile != PROFILE_ADVANCED){
518 src_x = av_clip( src_x, -16, s->mb_width * 16);
519 src_y = av_clip( src_y, -16, s->mb_height * 16);
520 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
521 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
522 }else{
523 src_x = av_clip( src_x, -17, s->avctx->coded_width);
524 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
525 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
526 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
529 srcY += src_y * s->linesize + src_x;
530 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
531 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
533 /* for grayscale we should not try to read from unknown area */
534 if(s->flags & CODEC_FLAG_GRAY) {
535 srcU = s->edge_emu_buffer + 18 * s->linesize;
536 srcV = s->edge_emu_buffer + 18 * s->linesize;
539 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
540 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
541 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
542 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
544 srcY -= s->mspel * (1 + s->linesize);
545 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
546 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
547 srcY = s->edge_emu_buffer;
548 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
549 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
550 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
551 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
552 srcU = uvbuf;
553 srcV = uvbuf + 16;
554 /* if we deal with range reduction we need to scale source blocks */
555 if(v->rangeredfrm) {
556 int i, j;
557 uint8_t *src, *src2;
559 src = srcY;
560 for(j = 0; j < 17 + s->mspel*2; j++) {
561 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
562 src += s->linesize;
564 src = srcU; src2 = srcV;
565 for(j = 0; j < 9; j++) {
566 for(i = 0; i < 9; i++) {
567 src[i] = ((src[i] - 128) >> 1) + 128;
568 src2[i] = ((src2[i] - 128) >> 1) + 128;
570 src += s->uvlinesize;
571 src2 += s->uvlinesize;
574 /* if we deal with intensity compensation we need to scale source blocks */
575 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
576 int i, j;
577 uint8_t *src, *src2;
579 src = srcY;
580 for(j = 0; j < 17 + s->mspel*2; j++) {
581 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
582 src += s->linesize;
584 src = srcU; src2 = srcV;
585 for(j = 0; j < 9; j++) {
586 for(i = 0; i < 9; i++) {
587 src[i] = v->lutuv[src[i]];
588 src2[i] = v->lutuv[src2[i]];
590 src += s->uvlinesize;
591 src2 += s->uvlinesize;
594 srcY += s->mspel * (1 + s->linesize);
597 if(s->mspel) {
598 dxy = ((my & 3) << 2) | (mx & 3);
599 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
600 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
601 srcY += s->linesize * 8;
602 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
603 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
604 } else { // hpel mc - always used for luma
605 dxy = (my & 2) | ((mx & 2) >> 1);
607 if(!v->rnd)
608 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
609 else
610 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
613 if(s->flags & CODEC_FLAG_GRAY) return;
614 /* Chroma MC always uses qpel bilinear */
615 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
616 uvmx = (uvmx&3)<<1;
617 uvmy = (uvmy&3)<<1;
618 if(!v->rnd){
619 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
620 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
621 }else{
622 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
623 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
627 /** Do motion compensation for 4-MV macroblock - luminance block
629 static void vc1_mc_4mv_luma(VC1Context *v, int n)
631 MpegEncContext *s = &v->s;
632 DSPContext *dsp = &v->s.dsp;
633 uint8_t *srcY;
634 int dxy, mx, my, src_x, src_y;
635 int off;
637 if(!v->s.last_picture.data[0])return;
638 mx = s->mv[0][n][0];
639 my = s->mv[0][n][1];
640 srcY = s->last_picture.data[0];
642 off = s->linesize * 4 * (n&2) + (n&1) * 8;
644 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
645 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
647 if(v->profile != PROFILE_ADVANCED){
648 src_x = av_clip( src_x, -16, s->mb_width * 16);
649 src_y = av_clip( src_y, -16, s->mb_height * 16);
650 }else{
651 src_x = av_clip( src_x, -17, s->avctx->coded_width);
652 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
655 srcY += src_y * s->linesize + src_x;
657 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
658 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
659 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
660 srcY -= s->mspel * (1 + s->linesize);
661 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
662 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
663 srcY = s->edge_emu_buffer;
664 /* if we deal with range reduction we need to scale source blocks */
665 if(v->rangeredfrm) {
666 int i, j;
667 uint8_t *src;
669 src = srcY;
670 for(j = 0; j < 9 + s->mspel*2; j++) {
671 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
672 src += s->linesize;
675 /* if we deal with intensity compensation we need to scale source blocks */
676 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
677 int i, j;
678 uint8_t *src;
680 src = srcY;
681 for(j = 0; j < 9 + s->mspel*2; j++) {
682 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
683 src += s->linesize;
686 srcY += s->mspel * (1 + s->linesize);
689 if(s->mspel) {
690 dxy = ((my & 3) << 2) | (mx & 3);
691 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
692 } else { // hpel mc - always used for luma
693 dxy = (my & 2) | ((mx & 2) >> 1);
694 if(!v->rnd)
695 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
696 else
697 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
701 static inline int median4(int a, int b, int c, int d)
703 if(a < b) {
704 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
705 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
706 } else {
707 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
708 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
713 /** Do motion compensation for 4-MV macroblock - both chroma blocks
715 static void vc1_mc_4mv_chroma(VC1Context *v)
717 MpegEncContext *s = &v->s;
718 DSPContext *dsp = &v->s.dsp;
719 uint8_t *srcU, *srcV;
720 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
721 int i, idx, tx = 0, ty = 0;
722 int mvx[4], mvy[4], intra[4];
723 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
725 if(!v->s.last_picture.data[0])return;
726 if(s->flags & CODEC_FLAG_GRAY) return;
728 for(i = 0; i < 4; i++) {
729 mvx[i] = s->mv[0][i][0];
730 mvy[i] = s->mv[0][i][1];
731 intra[i] = v->mb_type[0][s->block_index[i]];
734 /* calculate chroma MV vector from four luma MVs */
735 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
736 if(!idx) { // all blocks are inter
737 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
738 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
739 } else if(count[idx] == 1) { // 3 inter blocks
740 switch(idx) {
741 case 0x1:
742 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
743 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
744 break;
745 case 0x2:
746 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
747 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
748 break;
749 case 0x4:
750 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
751 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
752 break;
753 case 0x8:
754 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
755 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
756 break;
758 } else if(count[idx] == 2) {
759 int t1 = 0, t2 = 0;
760 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
761 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
762 tx = (mvx[t1] + mvx[t2]) / 2;
763 ty = (mvy[t1] + mvy[t2]) / 2;
764 } else {
765 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
766 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
767 return; //no need to do MC for inter blocks
770 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
771 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
772 uvmx = (tx + ((tx&3) == 3)) >> 1;
773 uvmy = (ty + ((ty&3) == 3)) >> 1;
774 if(v->fastuvmc) {
775 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
776 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
779 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
780 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
782 if(v->profile != PROFILE_ADVANCED){
783 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
784 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
785 }else{
786 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
787 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
790 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
791 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
792 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
793 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
794 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
795 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
796 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
797 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
798 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
799 srcU = s->edge_emu_buffer;
800 srcV = s->edge_emu_buffer + 16;
802 /* if we deal with range reduction we need to scale source blocks */
803 if(v->rangeredfrm) {
804 int i, j;
805 uint8_t *src, *src2;
807 src = srcU; src2 = srcV;
808 for(j = 0; j < 9; j++) {
809 for(i = 0; i < 9; i++) {
810 src[i] = ((src[i] - 128) >> 1) + 128;
811 src2[i] = ((src2[i] - 128) >> 1) + 128;
813 src += s->uvlinesize;
814 src2 += s->uvlinesize;
817 /* if we deal with intensity compensation we need to scale source blocks */
818 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
819 int i, j;
820 uint8_t *src, *src2;
822 src = srcU; src2 = srcV;
823 for(j = 0; j < 9; j++) {
824 for(i = 0; i < 9; i++) {
825 src[i] = v->lutuv[src[i]];
826 src2[i] = v->lutuv[src2[i]];
828 src += s->uvlinesize;
829 src2 += s->uvlinesize;
834 /* Chroma MC always uses qpel bilinear */
835 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
836 uvmx = (uvmx&3)<<1;
837 uvmy = (uvmy&3)<<1;
838 if(!v->rnd){
839 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
840 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
841 }else{
842 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
843 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
847 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
850 * Decode Simple/Main Profiles sequence header
851 * @see Figure 7-8, p16-17
852 * @param avctx Codec context
853 * @param gb GetBit context initialized from Codec context extra_data
854 * @return Status
856 static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
858 VC1Context *v = avctx->priv_data;
860 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
861 v->profile = get_bits(gb, 2);
862 if (v->profile == PROFILE_COMPLEX)
864 av_log(avctx, AV_LOG_ERROR, "WMV3 Complex Profile is not fully supported\n");
867 if (v->profile == PROFILE_ADVANCED)
869 v->zz_8x4 = ff_vc1_adv_progressive_8x4_zz;
870 v->zz_4x8 = ff_vc1_adv_progressive_4x8_zz;
871 return decode_sequence_header_adv(v, gb);
873 else
875 v->zz_8x4 = wmv2_scantableA;
876 v->zz_4x8 = wmv2_scantableB;
877 v->res_sm = get_bits(gb, 2); //reserved
878 if (v->res_sm)
880 av_log(avctx, AV_LOG_ERROR,
881 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
882 return -1;
886 // (fps-2)/4 (->30)
887 v->frmrtq_postproc = get_bits(gb, 3); //common
888 // (bitrate-32kbps)/64kbps
889 v->bitrtq_postproc = get_bits(gb, 5); //common
890 v->s.loop_filter = get_bits1(gb); //common
891 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
893 av_log(avctx, AV_LOG_ERROR,
894 "LOOPFILTER shell not be enabled in simple profile\n");
896 if(v->s.avctx->skip_loop_filter >= AVDISCARD_ALL)
897 v->s.loop_filter = 0;
899 v->res_x8 = get_bits1(gb); //reserved
900 v->multires = get_bits1(gb);
901 v->res_fasttx = get_bits1(gb);
902 if (!v->res_fasttx)
904 v->s.dsp.vc1_inv_trans_8x8 = ff_simple_idct;
905 v->s.dsp.vc1_inv_trans_8x4 = ff_simple_idct84_add;
906 v->s.dsp.vc1_inv_trans_4x8 = ff_simple_idct48_add;
907 v->s.dsp.vc1_inv_trans_4x4 = ff_simple_idct44_add;
910 v->fastuvmc = get_bits1(gb); //common
911 if (!v->profile && !v->fastuvmc)
913 av_log(avctx, AV_LOG_ERROR,
914 "FASTUVMC unavailable in Simple Profile\n");
915 return -1;
917 v->extended_mv = get_bits1(gb); //common
918 if (!v->profile && v->extended_mv)
920 av_log(avctx, AV_LOG_ERROR,
921 "Extended MVs unavailable in Simple Profile\n");
922 return -1;
924 v->dquant = get_bits(gb, 2); //common
925 v->vstransform = get_bits1(gb); //common
927 v->res_transtab = get_bits1(gb);
928 if (v->res_transtab)
930 av_log(avctx, AV_LOG_ERROR,
931 "1 for reserved RES_TRANSTAB is forbidden\n");
932 return -1;
935 v->overlap = get_bits1(gb); //common
937 v->s.resync_marker = get_bits1(gb);
938 v->rangered = get_bits1(gb);
939 if (v->rangered && v->profile == PROFILE_SIMPLE)
941 av_log(avctx, AV_LOG_INFO,
942 "RANGERED should be set to 0 in simple profile\n");
945 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
946 v->quantizer_mode = get_bits(gb, 2); //common
948 v->finterpflag = get_bits1(gb); //common
949 v->res_rtm_flag = get_bits1(gb); //reserved
950 if (!v->res_rtm_flag)
952 // av_log(avctx, AV_LOG_ERROR,
953 // "0 for reserved RES_RTM_FLAG is forbidden\n");
954 av_log(avctx, AV_LOG_ERROR,
955 "Old WMV3 version detected, only I-frames will be decoded\n");
956 //return -1;
958 //TODO: figure out what they mean (always 0x402F)
959 if(!v->res_fasttx) skip_bits(gb, 16);
960 av_log(avctx, AV_LOG_DEBUG,
961 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
962 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
963 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
964 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
965 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
966 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
967 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
968 v->dquant, v->quantizer_mode, avctx->max_b_frames
970 return 0;
973 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
975 v->res_rtm_flag = 1;
976 v->level = get_bits(gb, 3);
977 if(v->level >= 5)
979 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
981 v->chromaformat = get_bits(gb, 2);
982 if (v->chromaformat != 1)
984 av_log(v->s.avctx, AV_LOG_ERROR,
985 "Only 4:2:0 chroma format supported\n");
986 return -1;
989 // (fps-2)/4 (->30)
990 v->frmrtq_postproc = get_bits(gb, 3); //common
991 // (bitrate-32kbps)/64kbps
992 v->bitrtq_postproc = get_bits(gb, 5); //common
993 v->postprocflag = get_bits1(gb); //common
995 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
996 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
997 v->s.avctx->width = v->s.avctx->coded_width;
998 v->s.avctx->height = v->s.avctx->coded_height;
999 v->broadcast = get_bits1(gb);
1000 v->interlace = get_bits1(gb);
1001 v->tfcntrflag = get_bits1(gb);
1002 v->finterpflag = get_bits1(gb);
1003 skip_bits1(gb); // reserved
1005 v->s.h_edge_pos = v->s.avctx->coded_width;
1006 v->s.v_edge_pos = v->s.avctx->coded_height;
1008 av_log(v->s.avctx, AV_LOG_DEBUG,
1009 "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1010 "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
1011 "TFCTRflag=%i, FINTERPflag=%i\n",
1012 v->level, v->frmrtq_postproc, v->bitrtq_postproc,
1013 v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
1014 v->tfcntrflag, v->finterpflag
1017 v->psf = get_bits1(gb);
1018 if(v->psf) { //PsF, 6.1.13
1019 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1020 return -1;
1022 v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
1023 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1024 int w, h, ar = 0;
1025 av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
1026 v->s.avctx->coded_width = w = get_bits(gb, 14) + 1;
1027 v->s.avctx->coded_height = h = get_bits(gb, 14) + 1;
1028 av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
1029 if(get_bits1(gb))
1030 ar = get_bits(gb, 4);
1031 if(ar && ar < 14){
1032 v->s.avctx->sample_aspect_ratio = ff_vc1_pixel_aspect[ar];
1033 }else if(ar == 15){
1034 w = get_bits(gb, 8);
1035 h = get_bits(gb, 8);
1036 v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
1038 av_log(v->s.avctx, AV_LOG_DEBUG, "Aspect: %i:%i\n", v->s.avctx->sample_aspect_ratio.num, v->s.avctx->sample_aspect_ratio.den);
1040 if(get_bits1(gb)){ //framerate stuff
1041 if(get_bits1(gb)) {
1042 v->s.avctx->time_base.num = 32;
1043 v->s.avctx->time_base.den = get_bits(gb, 16) + 1;
1044 } else {
1045 int nr, dr;
1046 nr = get_bits(gb, 8);
1047 dr = get_bits(gb, 4);
1048 if(nr && nr < 8 && dr && dr < 3){
1049 v->s.avctx->time_base.num = ff_vc1_fps_dr[dr - 1];
1050 v->s.avctx->time_base.den = ff_vc1_fps_nr[nr - 1] * 1000;
1055 if(get_bits1(gb)){
1056 v->color_prim = get_bits(gb, 8);
1057 v->transfer_char = get_bits(gb, 8);
1058 v->matrix_coef = get_bits(gb, 8);
1062 v->hrd_param_flag = get_bits1(gb);
1063 if(v->hrd_param_flag) {
1064 int i;
1065 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1066 skip_bits(gb, 4); //bitrate exponent
1067 skip_bits(gb, 4); //buffer size exponent
1068 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1069 skip_bits(gb, 16); //hrd_rate[n]
1070 skip_bits(gb, 16); //hrd_buffer[n]
1073 return 0;
1076 static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1078 VC1Context *v = avctx->priv_data;
1079 int i;
1081 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1082 v->broken_link = get_bits1(gb);
1083 v->closed_entry = get_bits1(gb);
1084 v->panscanflag = get_bits1(gb);
1085 v->refdist_flag = get_bits1(gb);
1086 v->s.loop_filter = get_bits1(gb);
1087 v->fastuvmc = get_bits1(gb);
1088 v->extended_mv = get_bits1(gb);
1089 v->dquant = get_bits(gb, 2);
1090 v->vstransform = get_bits1(gb);
1091 v->overlap = get_bits1(gb);
1092 v->quantizer_mode = get_bits(gb, 2);
1094 if(v->hrd_param_flag){
1095 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1096 skip_bits(gb, 8); //hrd_full[n]
1100 if(get_bits1(gb)){
1101 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1102 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1104 if(v->extended_mv)
1105 v->extended_dmv = get_bits1(gb);
1106 if((v->range_mapy_flag = get_bits1(gb))) {
1107 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1108 v->range_mapy = get_bits(gb, 3);
1110 if((v->range_mapuv_flag = get_bits1(gb))) {
1111 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1112 v->range_mapuv = get_bits(gb, 3);
1115 av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
1116 "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
1117 "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
1118 "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
1119 v->broken_link, v->closed_entry, v->panscanflag, v->refdist_flag, v->s.loop_filter,
1120 v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
1122 return 0;
1125 static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1127 int pqindex, lowquant, status;
1129 if(v->finterpflag) v->interpfrm = get_bits1(gb);
1130 skip_bits(gb, 2); //framecnt unused
1131 v->rangeredfrm = 0;
1132 if (v->rangered) v->rangeredfrm = get_bits1(gb);
1133 v->s.pict_type = get_bits1(gb);
1134 if (v->s.avctx->max_b_frames) {
1135 if (!v->s.pict_type) {
1136 if (get_bits1(gb)) v->s.pict_type = FF_I_TYPE;
1137 else v->s.pict_type = FF_B_TYPE;
1138 } else v->s.pict_type = FF_P_TYPE;
1139 } else v->s.pict_type = v->s.pict_type ? FF_P_TYPE : FF_I_TYPE;
1141 v->bi_type = 0;
1142 if(v->s.pict_type == FF_B_TYPE) {
1143 v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1144 v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
1145 if(v->bfraction == 0) {
1146 v->s.pict_type = FF_BI_TYPE;
1149 if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
1150 skip_bits(gb, 7); // skip buffer fullness
1152 /* calculate RND */
1153 if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
1154 v->rnd = 1;
1155 if(v->s.pict_type == FF_P_TYPE)
1156 v->rnd ^= 1;
1158 /* Quantizer stuff */
1159 pqindex = get_bits(gb, 5);
1160 if(!pqindex) return -1;
1161 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1162 v->pq = ff_vc1_pquant_table[0][pqindex];
1163 else
1164 v->pq = ff_vc1_pquant_table[1][pqindex];
1166 v->pquantizer = 1;
1167 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1168 v->pquantizer = pqindex < 9;
1169 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1170 v->pquantizer = 0;
1171 v->pqindex = pqindex;
1172 if (pqindex < 9) v->halfpq = get_bits1(gb);
1173 else v->halfpq = 0;
1174 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1175 v->pquantizer = get_bits1(gb);
1176 v->dquantfrm = 0;
1177 if (v->extended_mv == 1) v->mvrange = get_unary(gb, 0, 3);
1178 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1179 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1180 v->range_x = 1 << (v->k_x - 1);
1181 v->range_y = 1 << (v->k_y - 1);
1182 if (v->multires && v->s.pict_type != FF_B_TYPE) v->respic = get_bits(gb, 2);
1184 if(v->res_x8 && (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)){
1185 v->x8_type = get_bits1(gb);
1186 }else v->x8_type = 0;
1187 //av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1188 // (v->s.pict_type == FF_P_TYPE) ? 'P' : ((v->s.pict_type == FF_I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1190 if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
1192 switch(v->s.pict_type) {
1193 case FF_P_TYPE:
1194 if (v->pq < 5) v->tt_index = 0;
1195 else if(v->pq < 13) v->tt_index = 1;
1196 else v->tt_index = 2;
1198 lowquant = (v->pq > 12) ? 0 : 1;
1199 v->mv_mode = ff_vc1_mv_pmode_table[lowquant][get_unary(gb, 1, 4)];
1200 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1202 int scale, shift, i;
1203 v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][get_unary(gb, 1, 3)];
1204 v->lumscale = get_bits(gb, 6);
1205 v->lumshift = get_bits(gb, 6);
1206 v->use_ic = 1;
1207 /* fill lookup tables for intensity compensation */
1208 if(!v->lumscale) {
1209 scale = -64;
1210 shift = (255 - v->lumshift * 2) << 6;
1211 if(v->lumshift > 31)
1212 shift += 128 << 6;
1213 } else {
1214 scale = v->lumscale + 32;
1215 if(v->lumshift > 31)
1216 shift = (v->lumshift - 64) << 6;
1217 else
1218 shift = v->lumshift << 6;
1220 for(i = 0; i < 256; i++) {
1221 v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1222 v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1225 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1226 v->s.quarter_sample = 0;
1227 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1228 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1229 v->s.quarter_sample = 0;
1230 else
1231 v->s.quarter_sample = 1;
1232 } else
1233 v->s.quarter_sample = 1;
1234 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1236 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1237 v->mv_mode2 == MV_PMODE_MIXED_MV)
1238 || v->mv_mode == MV_PMODE_MIXED_MV)
1240 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1241 if (status < 0) return -1;
1242 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1243 "Imode: %i, Invert: %i\n", status>>1, status&1);
1244 } else {
1245 v->mv_type_is_raw = 0;
1246 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1248 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1249 if (status < 0) return -1;
1250 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1251 "Imode: %i, Invert: %i\n", status>>1, status&1);
1253 /* Hopefully this is correct for P frames */
1254 v->s.mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
1255 v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1257 if (v->dquant)
1259 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1260 vop_dquant_decoding(v);
1263 v->ttfrm = 0; //FIXME Is that so ?
1264 if (v->vstransform)
1266 v->ttmbf = get_bits1(gb);
1267 if (v->ttmbf)
1269 v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
1271 } else {
1272 v->ttmbf = 1;
1273 v->ttfrm = TT_8X8;
1275 break;
1276 case FF_B_TYPE:
1277 if (v->pq < 5) v->tt_index = 0;
1278 else if(v->pq < 13) v->tt_index = 1;
1279 else v->tt_index = 2;
1281 lowquant = (v->pq > 12) ? 0 : 1;
1282 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1283 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1284 v->s.mspel = v->s.quarter_sample;
1286 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1287 if (status < 0) return -1;
1288 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1289 "Imode: %i, Invert: %i\n", status>>1, status&1);
1290 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1291 if (status < 0) return -1;
1292 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1293 "Imode: %i, Invert: %i\n", status>>1, status&1);
1295 v->s.mv_table_index = get_bits(gb, 2);
1296 v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1298 if (v->dquant)
1300 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1301 vop_dquant_decoding(v);
1304 v->ttfrm = 0;
1305 if (v->vstransform)
1307 v->ttmbf = get_bits1(gb);
1308 if (v->ttmbf)
1310 v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
1312 } else {
1313 v->ttmbf = 1;
1314 v->ttfrm = TT_8X8;
1316 break;
1319 if(!v->x8_type)
1321 /* AC Syntax */
1322 v->c_ac_table_index = decode012(gb);
1323 if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
1325 v->y_ac_table_index = decode012(gb);
1327 /* DC Syntax */
1328 v->s.dc_table_index = get_bits1(gb);
1331 if(v->s.pict_type == FF_BI_TYPE) {
1332 v->s.pict_type = FF_B_TYPE;
1333 v->bi_type = 1;
1335 return 0;
1338 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1340 int pqindex, lowquant;
1341 int status;
1343 v->p_frame_skipped = 0;
1345 if(v->interlace){
1346 v->fcm = decode012(gb);
1347 if(v->fcm) return -1; // interlaced frames/fields are not implemented
1349 switch(get_unary(gb, 0, 4)) {
1350 case 0:
1351 v->s.pict_type = FF_P_TYPE;
1352 break;
1353 case 1:
1354 v->s.pict_type = FF_B_TYPE;
1355 break;
1356 case 2:
1357 v->s.pict_type = FF_I_TYPE;
1358 break;
1359 case 3:
1360 v->s.pict_type = FF_BI_TYPE;
1361 break;
1362 case 4:
1363 v->s.pict_type = FF_P_TYPE; // skipped pic
1364 v->p_frame_skipped = 1;
1365 return 0;
1367 if(v->tfcntrflag)
1368 skip_bits(gb, 8);
1369 if(v->broadcast) {
1370 if(!v->interlace || v->psf) {
1371 v->rptfrm = get_bits(gb, 2);
1372 } else {
1373 v->tff = get_bits1(gb);
1374 v->rptfrm = get_bits1(gb);
1377 if(v->panscanflag) {
1378 //...
1380 v->rnd = get_bits1(gb);
1381 if(v->interlace)
1382 v->uvsamp = get_bits1(gb);
1383 if(v->finterpflag) v->interpfrm = get_bits1(gb);
1384 if(v->s.pict_type == FF_B_TYPE) {
1385 v->bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1386 v->bfraction = ff_vc1_bfraction_lut[v->bfraction_lut_index];
1387 if(v->bfraction == 0) {
1388 v->s.pict_type = FF_BI_TYPE; /* XXX: should not happen here */
1391 pqindex = get_bits(gb, 5);
1392 if(!pqindex) return -1;
1393 v->pqindex = pqindex;
1394 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1395 v->pq = ff_vc1_pquant_table[0][pqindex];
1396 else
1397 v->pq = ff_vc1_pquant_table[1][pqindex];
1399 v->pquantizer = 1;
1400 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1401 v->pquantizer = pqindex < 9;
1402 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1403 v->pquantizer = 0;
1404 v->pqindex = pqindex;
1405 if (pqindex < 9) v->halfpq = get_bits1(gb);
1406 else v->halfpq = 0;
1407 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1408 v->pquantizer = get_bits1(gb);
1409 if(v->postprocflag)
1410 v->postproc = get_bits(gb, 2);
1412 if(v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_P_TYPE) v->use_ic = 0;
1414 switch(v->s.pict_type) {
1415 case FF_I_TYPE:
1416 case FF_BI_TYPE:
1417 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1418 if (status < 0) return -1;
1419 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1420 "Imode: %i, Invert: %i\n", status>>1, status&1);
1421 v->condover = CONDOVER_NONE;
1422 if(v->overlap && v->pq <= 8) {
1423 v->condover = decode012(gb);
1424 if(v->condover == CONDOVER_SELECT) {
1425 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1426 if (status < 0) return -1;
1427 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1428 "Imode: %i, Invert: %i\n", status>>1, status&1);
1431 break;
1432 case FF_P_TYPE:
1433 if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
1434 else v->mvrange = 0;
1435 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1436 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1437 v->range_x = 1 << (v->k_x - 1);
1438 v->range_y = 1 << (v->k_y - 1);
1440 if (v->pq < 5) v->tt_index = 0;
1441 else if(v->pq < 13) v->tt_index = 1;
1442 else v->tt_index = 2;
1444 lowquant = (v->pq > 12) ? 0 : 1;
1445 v->mv_mode = ff_vc1_mv_pmode_table[lowquant][get_unary(gb, 1, 4)];
1446 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1448 int scale, shift, i;
1449 v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][get_unary(gb, 1, 3)];
1450 v->lumscale = get_bits(gb, 6);
1451 v->lumshift = get_bits(gb, 6);
1452 /* fill lookup tables for intensity compensation */
1453 if(!v->lumscale) {
1454 scale = -64;
1455 shift = (255 - v->lumshift * 2) << 6;
1456 if(v->lumshift > 31)
1457 shift += 128 << 6;
1458 } else {
1459 scale = v->lumscale + 32;
1460 if(v->lumshift > 31)
1461 shift = (v->lumshift - 64) << 6;
1462 else
1463 shift = v->lumshift << 6;
1465 for(i = 0; i < 256; i++) {
1466 v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1467 v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1469 v->use_ic = 1;
1471 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1472 v->s.quarter_sample = 0;
1473 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1474 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1475 v->s.quarter_sample = 0;
1476 else
1477 v->s.quarter_sample = 1;
1478 } else
1479 v->s.quarter_sample = 1;
1480 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1482 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1483 v->mv_mode2 == MV_PMODE_MIXED_MV)
1484 || v->mv_mode == MV_PMODE_MIXED_MV)
1486 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1487 if (status < 0) return -1;
1488 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1489 "Imode: %i, Invert: %i\n", status>>1, status&1);
1490 } else {
1491 v->mv_type_is_raw = 0;
1492 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1494 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1495 if (status < 0) return -1;
1496 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1497 "Imode: %i, Invert: %i\n", status>>1, status&1);
1499 /* Hopefully this is correct for P frames */
1500 v->s.mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
1501 v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1502 if (v->dquant)
1504 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1505 vop_dquant_decoding(v);
1508 v->ttfrm = 0; //FIXME Is that so ?
1509 if (v->vstransform)
1511 v->ttmbf = get_bits1(gb);
1512 if (v->ttmbf)
1514 v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
1516 } else {
1517 v->ttmbf = 1;
1518 v->ttfrm = TT_8X8;
1520 break;
1521 case FF_B_TYPE:
1522 if (v->extended_mv) v->mvrange = get_unary(gb, 0, 3);
1523 else v->mvrange = 0;
1524 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1525 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1526 v->range_x = 1 << (v->k_x - 1);
1527 v->range_y = 1 << (v->k_y - 1);
1529 if (v->pq < 5) v->tt_index = 0;
1530 else if(v->pq < 13) v->tt_index = 1;
1531 else v->tt_index = 2;
1533 lowquant = (v->pq > 12) ? 0 : 1;
1534 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1535 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1536 v->s.mspel = v->s.quarter_sample;
1538 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1539 if (status < 0) return -1;
1540 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1541 "Imode: %i, Invert: %i\n", status>>1, status&1);
1542 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1543 if (status < 0) return -1;
1544 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1545 "Imode: %i, Invert: %i\n", status>>1, status&1);
1547 v->s.mv_table_index = get_bits(gb, 2);
1548 v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1550 if (v->dquant)
1552 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1553 vop_dquant_decoding(v);
1556 v->ttfrm = 0;
1557 if (v->vstransform)
1559 v->ttmbf = get_bits1(gb);
1560 if (v->ttmbf)
1562 v->ttfrm = ff_vc1_ttfrm_to_tt[get_bits(gb, 2)];
1564 } else {
1565 v->ttmbf = 1;
1566 v->ttfrm = TT_8X8;
1568 break;
1571 /* AC Syntax */
1572 v->c_ac_table_index = decode012(gb);
1573 if (v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE)
1575 v->y_ac_table_index = decode012(gb);
1577 /* DC Syntax */
1578 v->s.dc_table_index = get_bits1(gb);
1579 if ((v->s.pict_type == FF_I_TYPE || v->s.pict_type == FF_BI_TYPE) && v->dquant) {
1580 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1581 vop_dquant_decoding(v);
1584 v->bi_type = 0;
1585 if(v->s.pict_type == FF_BI_TYPE) {
1586 v->s.pict_type = FF_B_TYPE;
1587 v->bi_type = 1;
1589 return 0;
1592 /***********************************************************************/
1594 * @defgroup vc1block VC-1 Block-level functions
1595 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1596 * @{
1600 * @def GET_MQUANT
1601 * @brief Get macroblock-level quantizer scale
1603 #define GET_MQUANT() \
1604 if (v->dquantfrm) \
1606 int edges = 0; \
1607 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1609 if (v->dqbilevel) \
1611 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1613 else \
1615 mqdiff = get_bits(gb, 3); \
1616 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1617 else mquant = get_bits(gb, 5); \
1620 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1621 edges = 1 << v->dqsbedge; \
1622 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1623 edges = (3 << v->dqsbedge) % 15; \
1624 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1625 edges = 15; \
1626 if((edges&1) && !s->mb_x) \
1627 mquant = v->altpq; \
1628 if((edges&2) && s->first_slice_line) \
1629 mquant = v->altpq; \
1630 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1631 mquant = v->altpq; \
1632 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1633 mquant = v->altpq; \
1637 * @def GET_MVDATA(_dmv_x, _dmv_y)
1638 * @brief Get MV differentials
1639 * @see MVDATA decoding from 8.3.5.2, p(1)20
1640 * @param _dmv_x Horizontal differential for decoded MV
1641 * @param _dmv_y Vertical differential for decoded MV
1643 #define GET_MVDATA(_dmv_x, _dmv_y) \
1644 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table,\
1645 VC1_MV_DIFF_VLC_BITS, 2); \
1646 if (index > 36) \
1648 mb_has_coeffs = 1; \
1649 index -= 37; \
1651 else mb_has_coeffs = 0; \
1652 s->mb_intra = 0; \
1653 if (!index) { _dmv_x = _dmv_y = 0; } \
1654 else if (index == 35) \
1656 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1657 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1659 else if (index == 36) \
1661 _dmv_x = 0; \
1662 _dmv_y = 0; \
1663 s->mb_intra = 1; \
1665 else \
1667 index1 = index%6; \
1668 if (!s->quarter_sample && index1 == 5) val = 1; \
1669 else val = 0; \
1670 if(size_table[index1] - val > 0) \
1671 val = get_bits(gb, size_table[index1] - val); \
1672 else val = 0; \
1673 sign = 0 - (val&1); \
1674 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1676 index1 = index/6; \
1677 if (!s->quarter_sample && index1 == 5) val = 1; \
1678 else val = 0; \
1679 if(size_table[index1] - val > 0) \
1680 val = get_bits(gb, size_table[index1] - val); \
1681 else val = 0; \
1682 sign = 0 - (val&1); \
1683 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1686 /** Predict and set motion vector
1688 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1690 int xy, wrap, off = 0;
1691 int16_t *A, *B, *C;
1692 int px, py;
1693 int sum;
1695 /* scale MV difference to be quad-pel */
1696 dmv_x <<= 1 - s->quarter_sample;
1697 dmv_y <<= 1 - s->quarter_sample;
1699 wrap = s->b8_stride;
1700 xy = s->block_index[n];
1702 if(s->mb_intra){
1703 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1704 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1705 s->current_picture.motion_val[1][xy][0] = 0;
1706 s->current_picture.motion_val[1][xy][1] = 0;
1707 if(mv1) { /* duplicate motion data for 1-MV block */
1708 s->current_picture.motion_val[0][xy + 1][0] = 0;
1709 s->current_picture.motion_val[0][xy + 1][1] = 0;
1710 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1711 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1712 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1713 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1714 s->current_picture.motion_val[1][xy + 1][0] = 0;
1715 s->current_picture.motion_val[1][xy + 1][1] = 0;
1716 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1717 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1718 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1719 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1721 return;
1724 C = s->current_picture.motion_val[0][xy - 1];
1725 A = s->current_picture.motion_val[0][xy - wrap];
1726 if(mv1)
1727 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1728 else {
1729 //in 4-MV mode different blocks have different B predictor position
1730 switch(n){
1731 case 0:
1732 off = (s->mb_x > 0) ? -1 : 1;
1733 break;
1734 case 1:
1735 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1736 break;
1737 case 2:
1738 off = 1;
1739 break;
1740 case 3:
1741 off = -1;
1744 B = s->current_picture.motion_val[0][xy - wrap + off];
1746 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1747 if(s->mb_width == 1) {
1748 px = A[0];
1749 py = A[1];
1750 } else {
1751 px = mid_pred(A[0], B[0], C[0]);
1752 py = mid_pred(A[1], B[1], C[1]);
1754 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1755 px = C[0];
1756 py = C[1];
1757 } else {
1758 px = py = 0;
1760 /* Pullback MV as specified in 8.3.5.3.4 */
1762 int qx, qy, X, Y;
1763 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1764 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1765 X = (s->mb_width << 6) - 4;
1766 Y = (s->mb_height << 6) - 4;
1767 if(mv1) {
1768 if(qx + px < -60) px = -60 - qx;
1769 if(qy + py < -60) py = -60 - qy;
1770 } else {
1771 if(qx + px < -28) px = -28 - qx;
1772 if(qy + py < -28) py = -28 - qy;
1774 if(qx + px > X) px = X - qx;
1775 if(qy + py > Y) py = Y - qy;
1777 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1778 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1779 if(is_intra[xy - wrap])
1780 sum = FFABS(px) + FFABS(py);
1781 else
1782 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1783 if(sum > 32) {
1784 if(get_bits1(&s->gb)) {
1785 px = A[0];
1786 py = A[1];
1787 } else {
1788 px = C[0];
1789 py = C[1];
1791 } else {
1792 if(is_intra[xy - 1])
1793 sum = FFABS(px) + FFABS(py);
1794 else
1795 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1796 if(sum > 32) {
1797 if(get_bits1(&s->gb)) {
1798 px = A[0];
1799 py = A[1];
1800 } else {
1801 px = C[0];
1802 py = C[1];
1807 /* store MV using signed modulus of MV range defined in 4.11 */
1808 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1809 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1810 if(mv1) { /* duplicate motion data for 1-MV block */
1811 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1812 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1813 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1814 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1815 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1816 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1820 /** Motion compensation for direct or interpolated blocks in B-frames
1822 static void vc1_interp_mc(VC1Context *v)
1824 MpegEncContext *s = &v->s;
1825 DSPContext *dsp = &v->s.dsp;
1826 uint8_t *srcY, *srcU, *srcV;
1827 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1829 if(!v->s.next_picture.data[0])return;
1831 mx = s->mv[1][0][0];
1832 my = s->mv[1][0][1];
1833 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1834 uvmy = (my + ((my & 3) == 3)) >> 1;
1835 if(v->fastuvmc) {
1836 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
1837 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
1839 srcY = s->next_picture.data[0];
1840 srcU = s->next_picture.data[1];
1841 srcV = s->next_picture.data[2];
1843 src_x = s->mb_x * 16 + (mx >> 2);
1844 src_y = s->mb_y * 16 + (my >> 2);
1845 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1846 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1848 if(v->profile != PROFILE_ADVANCED){
1849 src_x = av_clip( src_x, -16, s->mb_width * 16);
1850 src_y = av_clip( src_y, -16, s->mb_height * 16);
1851 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1852 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1853 }else{
1854 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1855 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1856 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1857 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1860 srcY += src_y * s->linesize + src_x;
1861 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1862 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1864 /* for grayscale we should not try to read from unknown area */
1865 if(s->flags & CODEC_FLAG_GRAY) {
1866 srcU = s->edge_emu_buffer + 18 * s->linesize;
1867 srcV = s->edge_emu_buffer + 18 * s->linesize;
1870 if(v->rangeredfrm
1871 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
1872 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
1873 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
1875 srcY -= s->mspel * (1 + s->linesize);
1876 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
1877 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
1878 srcY = s->edge_emu_buffer;
1879 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
1880 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1881 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
1882 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1883 srcU = uvbuf;
1884 srcV = uvbuf + 16;
1885 /* if we deal with range reduction we need to scale source blocks */
1886 if(v->rangeredfrm) {
1887 int i, j;
1888 uint8_t *src, *src2;
1890 src = srcY;
1891 for(j = 0; j < 17 + s->mspel*2; j++) {
1892 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
1893 src += s->linesize;
1895 src = srcU; src2 = srcV;
1896 for(j = 0; j < 9; j++) {
1897 for(i = 0; i < 9; i++) {
1898 src[i] = ((src[i] - 128) >> 1) + 128;
1899 src2[i] = ((src2[i] - 128) >> 1) + 128;
1901 src += s->uvlinesize;
1902 src2 += s->uvlinesize;
1905 srcY += s->mspel * (1 + s->linesize);
1908 mx >>= 1;
1909 my >>= 1;
1910 dxy = ((my & 1) << 1) | (mx & 1);
1912 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
1914 if(s->flags & CODEC_FLAG_GRAY) return;
1915 /* Chroma MC always uses qpel blilinear */
1916 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1917 uvmx = (uvmx&3)<<1;
1918 uvmy = (uvmy&3)<<1;
1919 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1920 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1923 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1925 int n = bfrac;
1927 #if B_FRACTION_DEN==256
1928 if(inv)
1929 n -= 256;
1930 if(!qs)
1931 return 2 * ((value * n + 255) >> 9);
1932 return (value * n + 128) >> 8;
1933 #else
1934 if(inv)
1935 n -= B_FRACTION_DEN;
1936 if(!qs)
1937 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
1938 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
1939 #endif
1942 /** Reconstruct motion vector for B-frame and do motion compensation
1944 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
1946 if(v->use_ic) {
1947 v->mv_mode2 = v->mv_mode;
1948 v->mv_mode = MV_PMODE_INTENSITY_COMP;
1950 if(direct) {
1951 vc1_mc_1mv(v, 0);
1952 vc1_interp_mc(v);
1953 if(v->use_ic) v->mv_mode = v->mv_mode2;
1954 return;
1956 if(mode == BMV_TYPE_INTERPOLATED) {
1957 vc1_mc_1mv(v, 0);
1958 vc1_interp_mc(v);
1959 if(v->use_ic) v->mv_mode = v->mv_mode2;
1960 return;
1963 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
1964 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
1965 if(v->use_ic) v->mv_mode = v->mv_mode2;
1968 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
1970 MpegEncContext *s = &v->s;
1971 int xy, wrap, off = 0;
1972 int16_t *A, *B, *C;
1973 int px, py;
1974 int sum;
1975 int r_x, r_y;
1976 const uint8_t *is_intra = v->mb_type[0];
1978 r_x = v->range_x;
1979 r_y = v->range_y;
1980 /* scale MV difference to be quad-pel */
1981 dmv_x[0] <<= 1 - s->quarter_sample;
1982 dmv_y[0] <<= 1 - s->quarter_sample;
1983 dmv_x[1] <<= 1 - s->quarter_sample;
1984 dmv_y[1] <<= 1 - s->quarter_sample;
1986 wrap = s->b8_stride;
1987 xy = s->block_index[0];
1989 if(s->mb_intra) {
1990 s->current_picture.motion_val[0][xy][0] =
1991 s->current_picture.motion_val[0][xy][1] =
1992 s->current_picture.motion_val[1][xy][0] =
1993 s->current_picture.motion_val[1][xy][1] = 0;
1994 return;
1996 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
1997 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
1998 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
1999 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2001 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2002 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2003 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2004 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2005 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2006 if(direct) {
2007 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2008 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2009 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2010 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2011 return;
2014 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2015 C = s->current_picture.motion_val[0][xy - 2];
2016 A = s->current_picture.motion_val[0][xy - wrap*2];
2017 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2018 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2020 if(!s->mb_x) C[0] = C[1] = 0;
2021 if(!s->first_slice_line) { // predictor A is not out of bounds
2022 if(s->mb_width == 1) {
2023 px = A[0];
2024 py = A[1];
2025 } else {
2026 px = mid_pred(A[0], B[0], C[0]);
2027 py = mid_pred(A[1], B[1], C[1]);
2029 } else if(s->mb_x) { // predictor C is not out of bounds
2030 px = C[0];
2031 py = C[1];
2032 } else {
2033 px = py = 0;
2035 /* Pullback MV as specified in 8.3.5.3.4 */
2037 int qx, qy, X, Y;
2038 if(v->profile < PROFILE_ADVANCED) {
2039 qx = (s->mb_x << 5);
2040 qy = (s->mb_y << 5);
2041 X = (s->mb_width << 5) - 4;
2042 Y = (s->mb_height << 5) - 4;
2043 if(qx + px < -28) px = -28 - qx;
2044 if(qy + py < -28) py = -28 - qy;
2045 if(qx + px > X) px = X - qx;
2046 if(qy + py > Y) py = Y - qy;
2047 } else {
2048 qx = (s->mb_x << 6);
2049 qy = (s->mb_y << 6);
2050 X = (s->mb_width << 6) - 4;
2051 Y = (s->mb_height << 6) - 4;
2052 if(qx + px < -60) px = -60 - qx;
2053 if(qy + py < -60) py = -60 - qy;
2054 if(qx + px > X) px = X - qx;
2055 if(qy + py > Y) py = Y - qy;
2058 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2059 if(0 && !s->first_slice_line && s->mb_x) {
2060 if(is_intra[xy - wrap])
2061 sum = FFABS(px) + FFABS(py);
2062 else
2063 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2064 if(sum > 32) {
2065 if(get_bits1(&s->gb)) {
2066 px = A[0];
2067 py = A[1];
2068 } else {
2069 px = C[0];
2070 py = C[1];
2072 } else {
2073 if(is_intra[xy - 2])
2074 sum = FFABS(px) + FFABS(py);
2075 else
2076 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2077 if(sum > 32) {
2078 if(get_bits1(&s->gb)) {
2079 px = A[0];
2080 py = A[1];
2081 } else {
2082 px = C[0];
2083 py = C[1];
2088 /* store MV using signed modulus of MV range defined in 4.11 */
2089 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2090 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2092 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2093 C = s->current_picture.motion_val[1][xy - 2];
2094 A = s->current_picture.motion_val[1][xy - wrap*2];
2095 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2096 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2098 if(!s->mb_x) C[0] = C[1] = 0;
2099 if(!s->first_slice_line) { // predictor A is not out of bounds
2100 if(s->mb_width == 1) {
2101 px = A[0];
2102 py = A[1];
2103 } else {
2104 px = mid_pred(A[0], B[0], C[0]);
2105 py = mid_pred(A[1], B[1], C[1]);
2107 } else if(s->mb_x) { // predictor C is not out of bounds
2108 px = C[0];
2109 py = C[1];
2110 } else {
2111 px = py = 0;
2113 /* Pullback MV as specified in 8.3.5.3.4 */
2115 int qx, qy, X, Y;
2116 if(v->profile < PROFILE_ADVANCED) {
2117 qx = (s->mb_x << 5);
2118 qy = (s->mb_y << 5);
2119 X = (s->mb_width << 5) - 4;
2120 Y = (s->mb_height << 5) - 4;
2121 if(qx + px < -28) px = -28 - qx;
2122 if(qy + py < -28) py = -28 - qy;
2123 if(qx + px > X) px = X - qx;
2124 if(qy + py > Y) py = Y - qy;
2125 } else {
2126 qx = (s->mb_x << 6);
2127 qy = (s->mb_y << 6);
2128 X = (s->mb_width << 6) - 4;
2129 Y = (s->mb_height << 6) - 4;
2130 if(qx + px < -60) px = -60 - qx;
2131 if(qy + py < -60) py = -60 - qy;
2132 if(qx + px > X) px = X - qx;
2133 if(qy + py > Y) py = Y - qy;
2136 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2137 if(0 && !s->first_slice_line && s->mb_x) {
2138 if(is_intra[xy - wrap])
2139 sum = FFABS(px) + FFABS(py);
2140 else
2141 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2142 if(sum > 32) {
2143 if(get_bits1(&s->gb)) {
2144 px = A[0];
2145 py = A[1];
2146 } else {
2147 px = C[0];
2148 py = C[1];
2150 } else {
2151 if(is_intra[xy - 2])
2152 sum = FFABS(px) + FFABS(py);
2153 else
2154 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2155 if(sum > 32) {
2156 if(get_bits1(&s->gb)) {
2157 px = A[0];
2158 py = A[1];
2159 } else {
2160 px = C[0];
2161 py = C[1];
2166 /* store MV using signed modulus of MV range defined in 4.11 */
2168 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2169 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2171 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2172 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2173 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2174 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2177 /** Get predicted DC value for I-frames only
2178 * prediction dir: left=0, top=1
2179 * @param s MpegEncContext
2180 * @param overlap flag indicating that overlap filtering is used
2181 * @param pq integer part of picture quantizer
2182 * @param[in] n block index in the current MB
2183 * @param dc_val_ptr Pointer to DC predictor
2184 * @param dir_ptr Prediction direction for use in AC prediction
2186 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2187 int16_t **dc_val_ptr, int *dir_ptr)
2189 int a, b, c, wrap, pred, scale;
2190 int16_t *dc_val;
2191 static const uint16_t dcpred[32] = {
2192 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2193 114, 102, 93, 85, 79, 73, 68, 64,
2194 60, 57, 54, 51, 49, 47, 45, 43,
2195 41, 39, 38, 37, 35, 34, 33
2198 /* find prediction - wmv3_dc_scale always used here in fact */
2199 if (n < 4) scale = s->y_dc_scale;
2200 else scale = s->c_dc_scale;
2202 wrap = s->block_wrap[n];
2203 dc_val= s->dc_val[0] + s->block_index[n];
2205 /* B A
2206 * C X
2208 c = dc_val[ - 1];
2209 b = dc_val[ - 1 - wrap];
2210 a = dc_val[ - wrap];
2212 if (pq < 9 || !overlap)
2214 /* Set outer values */
2215 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2216 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2218 else
2220 /* Set outer values */
2221 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2222 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2225 if (abs(a - b) <= abs(b - c)) {
2226 pred = c;
2227 *dir_ptr = 1;//left
2228 } else {
2229 pred = a;
2230 *dir_ptr = 0;//top
2233 /* update predictor */
2234 *dc_val_ptr = &dc_val[0];
2235 return pred;
2239 /** Get predicted DC value
2240 * prediction dir: left=0, top=1
2241 * @param s MpegEncContext
2242 * @param overlap flag indicating that overlap filtering is used
2243 * @param pq integer part of picture quantizer
2244 * @param[in] n block index in the current MB
2245 * @param a_avail flag indicating top block availability
2246 * @param c_avail flag indicating left block availability
2247 * @param dc_val_ptr Pointer to DC predictor
2248 * @param dir_ptr Prediction direction for use in AC prediction
2250 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2251 int a_avail, int c_avail,
2252 int16_t **dc_val_ptr, int *dir_ptr)
2254 int a, b, c, wrap, pred, scale;
2255 int16_t *dc_val;
2256 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2257 int q1, q2 = 0;
2259 /* find prediction - wmv3_dc_scale always used here in fact */
2260 if (n < 4) scale = s->y_dc_scale;
2261 else scale = s->c_dc_scale;
2263 wrap = s->block_wrap[n];
2264 dc_val= s->dc_val[0] + s->block_index[n];
2266 /* B A
2267 * C X
2269 c = dc_val[ - 1];
2270 b = dc_val[ - 1 - wrap];
2271 a = dc_val[ - wrap];
2272 /* scale predictors if needed */
2273 q1 = s->current_picture.qscale_table[mb_pos];
2274 if(c_avail && (n!= 1 && n!=3)) {
2275 q2 = s->current_picture.qscale_table[mb_pos - 1];
2276 if(q2 && q2 != q1)
2277 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2279 if(a_avail && (n!= 2 && n!=3)) {
2280 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2281 if(q2 && q2 != q1)
2282 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2284 if(a_avail && c_avail && (n!=3)) {
2285 int off = mb_pos;
2286 if(n != 1) off--;
2287 if(n != 2) off -= s->mb_stride;
2288 q2 = s->current_picture.qscale_table[off];
2289 if(q2 && q2 != q1)
2290 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2293 if(a_avail && c_avail) {
2294 if(abs(a - b) <= abs(b - c)) {
2295 pred = c;
2296 *dir_ptr = 1;//left
2297 } else {
2298 pred = a;
2299 *dir_ptr = 0;//top
2301 } else if(a_avail) {
2302 pred = a;
2303 *dir_ptr = 0;//top
2304 } else if(c_avail) {
2305 pred = c;
2306 *dir_ptr = 1;//left
2307 } else {
2308 pred = 0;
2309 *dir_ptr = 1;//left
2312 /* update predictor */
2313 *dc_val_ptr = &dc_val[0];
2314 return pred;
2317 /** @} */ // Block group
2320 * @defgroup vc1_std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2321 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2322 * @{
2325 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2327 int xy, wrap, pred, a, b, c;
2329 xy = s->block_index[n];
2330 wrap = s->b8_stride;
2332 /* B C
2333 * A X
2335 a = s->coded_block[xy - 1 ];
2336 b = s->coded_block[xy - 1 - wrap];
2337 c = s->coded_block[xy - wrap];
2339 if (b == c) {
2340 pred = a;
2341 } else {
2342 pred = c;
2345 /* store value */
2346 *coded_block_ptr = &s->coded_block[xy];
2348 return pred;
2352 * Decode one AC coefficient
2353 * @param v The VC1 context
2354 * @param last Last coefficient
2355 * @param skip How much zero coefficients to skip
2356 * @param value Decoded AC coefficient value
2357 * @param codingset set of VLC to decode data
2358 * @see 8.1.3.4
2360 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2362 GetBitContext *gb = &v->s.gb;
2363 int index, escape, run = 0, level = 0, lst = 0;
2365 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2366 if (index != vc1_ac_sizes[codingset] - 1) {
2367 run = vc1_index_decode_table[codingset][index][0];
2368 level = vc1_index_decode_table[codingset][index][1];
2369 lst = index >= vc1_last_decode_table[codingset];
2370 if(get_bits1(gb))
2371 level = -level;
2372 } else {
2373 escape = decode210(gb);
2374 if (escape != 2) {
2375 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2376 run = vc1_index_decode_table[codingset][index][0];
2377 level = vc1_index_decode_table[codingset][index][1];
2378 lst = index >= vc1_last_decode_table[codingset];
2379 if(escape == 0) {
2380 if(lst)
2381 level += vc1_last_delta_level_table[codingset][run];
2382 else
2383 level += vc1_delta_level_table[codingset][run];
2384 } else {
2385 if(lst)
2386 run += vc1_last_delta_run_table[codingset][level] + 1;
2387 else
2388 run += vc1_delta_run_table[codingset][level] + 1;
2390 if(get_bits1(gb))
2391 level = -level;
2392 } else {
2393 int sign;
2394 lst = get_bits1(gb);
2395 if(v->s.esc3_level_length == 0) {
2396 if(v->pq < 8 || v->dquantfrm) { // table 59
2397 v->s.esc3_level_length = get_bits(gb, 3);
2398 if(!v->s.esc3_level_length)
2399 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2400 } else { //table 60
2401 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2403 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2405 run = get_bits(gb, v->s.esc3_run_length);
2406 sign = get_bits1(gb);
2407 level = get_bits(gb, v->s.esc3_level_length);
2408 if(sign)
2409 level = -level;
2413 *last = lst;
2414 *skip = run;
2415 *value = level;
2418 /** Decode intra block in intra frames - should be faster than decode_intra_block
2419 * @param v VC1Context
2420 * @param block block to decode
2421 * @param[in] n subblock index
2422 * @param coded are AC coeffs present or not
2423 * @param codingset set of VLC to decode data
2425 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2427 GetBitContext *gb = &v->s.gb;
2428 MpegEncContext *s = &v->s;
2429 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2430 int run_diff, i;
2431 int16_t *dc_val;
2432 int16_t *ac_val, *ac_val2;
2433 int dcdiff;
2435 /* Get DC differential */
2436 if (n < 4) {
2437 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2438 } else {
2439 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2441 if (dcdiff < 0){
2442 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2443 return -1;
2445 if (dcdiff)
2447 if (dcdiff == 119 /* ESC index value */)
2449 /* TODO: Optimize */
2450 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2451 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2452 else dcdiff = get_bits(gb, 8);
2454 else
2456 if (v->pq == 1)
2457 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2458 else if (v->pq == 2)
2459 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
2461 if (get_bits1(gb))
2462 dcdiff = -dcdiff;
2465 /* Prediction */
2466 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2467 *dc_val = dcdiff;
2469 /* Store the quantized DC coeff, used for prediction */
2470 if (n < 4) {
2471 block[0] = dcdiff * s->y_dc_scale;
2472 } else {
2473 block[0] = dcdiff * s->c_dc_scale;
2475 /* Skip ? */
2476 run_diff = 0;
2477 i = 0;
2478 if (!coded) {
2479 goto not_coded;
2482 //AC Decoding
2483 i = 1;
2486 int last = 0, skip, value;
2487 const int8_t *zz_table;
2488 int scale;
2489 int k;
2491 scale = v->pq * 2 + v->halfpq;
2493 if(v->s.ac_pred) {
2494 if(!dc_pred_dir)
2495 zz_table = wmv1_scantable[2];
2496 else
2497 zz_table = wmv1_scantable[3];
2498 } else
2499 zz_table = wmv1_scantable[1];
2501 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2502 ac_val2 = ac_val;
2503 if(dc_pred_dir) //left
2504 ac_val -= 16;
2505 else //top
2506 ac_val -= 16 * s->block_wrap[n];
2508 while (!last) {
2509 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2510 i += skip;
2511 if(i > 63)
2512 break;
2513 block[zz_table[i++]] = value;
2516 /* apply AC prediction if needed */
2517 if(s->ac_pred) {
2518 if(dc_pred_dir) { //left
2519 for(k = 1; k < 8; k++)
2520 block[k << 3] += ac_val[k];
2521 } else { //top
2522 for(k = 1; k < 8; k++)
2523 block[k] += ac_val[k + 8];
2526 /* save AC coeffs for further prediction */
2527 for(k = 1; k < 8; k++) {
2528 ac_val2[k] = block[k << 3];
2529 ac_val2[k + 8] = block[k];
2532 /* scale AC coeffs */
2533 for(k = 1; k < 64; k++)
2534 if(block[k]) {
2535 block[k] *= scale;
2536 if(!v->pquantizer)
2537 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2540 if(s->ac_pred) i = 63;
2543 not_coded:
2544 if(!coded) {
2545 int k, scale;
2546 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2547 ac_val2 = ac_val;
2549 scale = v->pq * 2 + v->halfpq;
2550 memset(ac_val2, 0, 16 * 2);
2551 if(dc_pred_dir) {//left
2552 ac_val -= 16;
2553 if(s->ac_pred)
2554 memcpy(ac_val2, ac_val, 8 * 2);
2555 } else {//top
2556 ac_val -= 16 * s->block_wrap[n];
2557 if(s->ac_pred)
2558 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2561 /* apply AC prediction if needed */
2562 if(s->ac_pred) {
2563 if(dc_pred_dir) { //left
2564 for(k = 1; k < 8; k++) {
2565 block[k << 3] = ac_val[k] * scale;
2566 if(!v->pquantizer && block[k << 3])
2567 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2569 } else { //top
2570 for(k = 1; k < 8; k++) {
2571 block[k] = ac_val[k + 8] * scale;
2572 if(!v->pquantizer && block[k])
2573 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2576 i = 63;
2579 s->block_last_index[n] = i;
2581 return 0;
2584 /** Decode intra block in intra frames - should be faster than decode_intra_block
2585 * @param v VC1Context
2586 * @param block block to decode
2587 * @param[in] n subblock number
2588 * @param coded are AC coeffs present or not
2589 * @param codingset set of VLC to decode data
2590 * @param mquant quantizer value for this macroblock
2592 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2594 GetBitContext *gb = &v->s.gb;
2595 MpegEncContext *s = &v->s;
2596 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2597 int run_diff, i;
2598 int16_t *dc_val;
2599 int16_t *ac_val, *ac_val2;
2600 int dcdiff;
2601 int a_avail = v->a_avail, c_avail = v->c_avail;
2602 int use_pred = s->ac_pred;
2603 int scale;
2604 int q1, q2 = 0;
2605 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2607 /* Get DC differential */
2608 if (n < 4) {
2609 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2610 } else {
2611 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2613 if (dcdiff < 0){
2614 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2615 return -1;
2617 if (dcdiff)
2619 if (dcdiff == 119 /* ESC index value */)
2621 /* TODO: Optimize */
2622 if (mquant == 1) dcdiff = get_bits(gb, 10);
2623 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2624 else dcdiff = get_bits(gb, 8);
2626 else
2628 if (mquant == 1)
2629 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2630 else if (mquant == 2)
2631 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
2633 if (get_bits1(gb))
2634 dcdiff = -dcdiff;
2637 /* Prediction */
2638 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2639 *dc_val = dcdiff;
2641 /* Store the quantized DC coeff, used for prediction */
2642 if (n < 4) {
2643 block[0] = dcdiff * s->y_dc_scale;
2644 } else {
2645 block[0] = dcdiff * s->c_dc_scale;
2647 /* Skip ? */
2648 run_diff = 0;
2649 i = 0;
2651 //AC Decoding
2652 i = 1;
2654 /* check if AC is needed at all */
2655 if(!a_avail && !c_avail) use_pred = 0;
2656 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2657 ac_val2 = ac_val;
2659 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2661 if(dc_pred_dir) //left
2662 ac_val -= 16;
2663 else //top
2664 ac_val -= 16 * s->block_wrap[n];
2666 q1 = s->current_picture.qscale_table[mb_pos];
2667 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
2668 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2669 if(dc_pred_dir && n==1) q2 = q1;
2670 if(!dc_pred_dir && n==2) q2 = q1;
2671 if(n==3) q2 = q1;
2673 if(coded) {
2674 int last = 0, skip, value;
2675 const int8_t *zz_table;
2676 int k;
2678 if(v->s.ac_pred) {
2679 if(!dc_pred_dir)
2680 zz_table = wmv1_scantable[2];
2681 else
2682 zz_table = wmv1_scantable[3];
2683 } else
2684 zz_table = wmv1_scantable[1];
2686 while (!last) {
2687 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2688 i += skip;
2689 if(i > 63)
2690 break;
2691 block[zz_table[i++]] = value;
2694 /* apply AC prediction if needed */
2695 if(use_pred) {
2696 /* scale predictors if needed*/
2697 if(q2 && q1!=q2) {
2698 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2699 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2701 if(dc_pred_dir) { //left
2702 for(k = 1; k < 8; k++)
2703 block[k << 3] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2704 } else { //top
2705 for(k = 1; k < 8; k++)
2706 block[k] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2708 } else {
2709 if(dc_pred_dir) { //left
2710 for(k = 1; k < 8; k++)
2711 block[k << 3] += ac_val[k];
2712 } else { //top
2713 for(k = 1; k < 8; k++)
2714 block[k] += ac_val[k + 8];
2718 /* save AC coeffs for further prediction */
2719 for(k = 1; k < 8; k++) {
2720 ac_val2[k] = block[k << 3];
2721 ac_val2[k + 8] = block[k];
2724 /* scale AC coeffs */
2725 for(k = 1; k < 64; k++)
2726 if(block[k]) {
2727 block[k] *= scale;
2728 if(!v->pquantizer)
2729 block[k] += (block[k] < 0) ? -mquant : mquant;
2732 if(use_pred) i = 63;
2733 } else { // no AC coeffs
2734 int k;
2736 memset(ac_val2, 0, 16 * 2);
2737 if(dc_pred_dir) {//left
2738 if(use_pred) {
2739 memcpy(ac_val2, ac_val, 8 * 2);
2740 if(q2 && q1!=q2) {
2741 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2742 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2743 for(k = 1; k < 8; k++)
2744 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2747 } else {//top
2748 if(use_pred) {
2749 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2750 if(q2 && q1!=q2) {
2751 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2752 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2753 for(k = 1; k < 8; k++)
2754 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2759 /* apply AC prediction if needed */
2760 if(use_pred) {
2761 if(dc_pred_dir) { //left
2762 for(k = 1; k < 8; k++) {
2763 block[k << 3] = ac_val2[k] * scale;
2764 if(!v->pquantizer && block[k << 3])
2765 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2767 } else { //top
2768 for(k = 1; k < 8; k++) {
2769 block[k] = ac_val2[k + 8] * scale;
2770 if(!v->pquantizer && block[k])
2771 block[k] += (block[k] < 0) ? -mquant : mquant;
2774 i = 63;
2777 s->block_last_index[n] = i;
2779 return 0;
2782 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2783 * @param v VC1Context
2784 * @param block block to decode
2785 * @param[in] n subblock index
2786 * @param coded are AC coeffs present or not
2787 * @param mquant block quantizer
2788 * @param codingset set of VLC to decode data
2790 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2792 GetBitContext *gb = &v->s.gb;
2793 MpegEncContext *s = &v->s;
2794 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2795 int run_diff, i;
2796 int16_t *dc_val;
2797 int16_t *ac_val, *ac_val2;
2798 int dcdiff;
2799 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2800 int a_avail = v->a_avail, c_avail = v->c_avail;
2801 int use_pred = s->ac_pred;
2802 int scale;
2803 int q1, q2 = 0;
2805 /* XXX: Guard against dumb values of mquant */
2806 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2808 /* Set DC scale - y and c use the same */
2809 s->y_dc_scale = s->y_dc_scale_table[mquant];
2810 s->c_dc_scale = s->c_dc_scale_table[mquant];
2812 /* Get DC differential */
2813 if (n < 4) {
2814 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2815 } else {
2816 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2818 if (dcdiff < 0){
2819 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2820 return -1;
2822 if (dcdiff)
2824 if (dcdiff == 119 /* ESC index value */)
2826 /* TODO: Optimize */
2827 if (mquant == 1) dcdiff = get_bits(gb, 10);
2828 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2829 else dcdiff = get_bits(gb, 8);
2831 else
2833 if (mquant == 1)
2834 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2835 else if (mquant == 2)
2836 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
2838 if (get_bits1(gb))
2839 dcdiff = -dcdiff;
2842 /* Prediction */
2843 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2844 *dc_val = dcdiff;
2846 /* Store the quantized DC coeff, used for prediction */
2848 if (n < 4) {
2849 block[0] = dcdiff * s->y_dc_scale;
2850 } else {
2851 block[0] = dcdiff * s->c_dc_scale;
2853 /* Skip ? */
2854 run_diff = 0;
2855 i = 0;
2857 //AC Decoding
2858 i = 1;
2860 /* check if AC is needed at all and adjust direction if needed */
2861 if(!a_avail) dc_pred_dir = 1;
2862 if(!c_avail) dc_pred_dir = 0;
2863 if(!a_avail && !c_avail) use_pred = 0;
2864 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2865 ac_val2 = ac_val;
2867 scale = mquant * 2 + v->halfpq;
2869 if(dc_pred_dir) //left
2870 ac_val -= 16;
2871 else //top
2872 ac_val -= 16 * s->block_wrap[n];
2874 q1 = s->current_picture.qscale_table[mb_pos];
2875 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
2876 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2877 if(dc_pred_dir && n==1) q2 = q1;
2878 if(!dc_pred_dir && n==2) q2 = q1;
2879 if(n==3) q2 = q1;
2881 if(coded) {
2882 int last = 0, skip, value;
2883 const int8_t *zz_table;
2884 int k;
2886 zz_table = wmv1_scantable[0];
2888 while (!last) {
2889 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2890 i += skip;
2891 if(i > 63)
2892 break;
2893 block[zz_table[i++]] = value;
2896 /* apply AC prediction if needed */
2897 if(use_pred) {
2898 /* scale predictors if needed*/
2899 if(q2 && q1!=q2) {
2900 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2901 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2903 if(dc_pred_dir) { //left
2904 for(k = 1; k < 8; k++)
2905 block[k << 3] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2906 } else { //top
2907 for(k = 1; k < 8; k++)
2908 block[k] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2910 } else {
2911 if(dc_pred_dir) { //left
2912 for(k = 1; k < 8; k++)
2913 block[k << 3] += ac_val[k];
2914 } else { //top
2915 for(k = 1; k < 8; k++)
2916 block[k] += ac_val[k + 8];
2920 /* save AC coeffs for further prediction */
2921 for(k = 1; k < 8; k++) {
2922 ac_val2[k] = block[k << 3];
2923 ac_val2[k + 8] = block[k];
2926 /* scale AC coeffs */
2927 for(k = 1; k < 64; k++)
2928 if(block[k]) {
2929 block[k] *= scale;
2930 if(!v->pquantizer)
2931 block[k] += (block[k] < 0) ? -mquant : mquant;
2934 if(use_pred) i = 63;
2935 } else { // no AC coeffs
2936 int k;
2938 memset(ac_val2, 0, 16 * 2);
2939 if(dc_pred_dir) {//left
2940 if(use_pred) {
2941 memcpy(ac_val2, ac_val, 8 * 2);
2942 if(q2 && q1!=q2) {
2943 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2944 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2945 for(k = 1; k < 8; k++)
2946 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2949 } else {//top
2950 if(use_pred) {
2951 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2952 if(q2 && q1!=q2) {
2953 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2954 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2955 for(k = 1; k < 8; k++)
2956 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2961 /* apply AC prediction if needed */
2962 if(use_pred) {
2963 if(dc_pred_dir) { //left
2964 for(k = 1; k < 8; k++) {
2965 block[k << 3] = ac_val2[k] * scale;
2966 if(!v->pquantizer && block[k << 3])
2967 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2969 } else { //top
2970 for(k = 1; k < 8; k++) {
2971 block[k] = ac_val2[k + 8] * scale;
2972 if(!v->pquantizer && block[k])
2973 block[k] += (block[k] < 0) ? -mquant : mquant;
2976 i = 63;
2979 s->block_last_index[n] = i;
2981 return 0;
2984 /** Decode P block
2986 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block,
2987 uint8_t *dst, int linesize, int skip_block, int apply_filter, int cbp_top, int cbp_left)
2989 MpegEncContext *s = &v->s;
2990 GetBitContext *gb = &s->gb;
2991 int i, j;
2992 int subblkpat = 0;
2993 int scale, off, idx, last, skip, value;
2994 int ttblk = ttmb & 7;
2995 int pat = 0;
2997 if(ttmb == -1) {
2998 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3000 if(ttblk == TT_4X4) {
3001 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3003 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3004 subblkpat = decode012(gb);
3005 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3006 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3007 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3009 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3011 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3012 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3013 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3014 ttblk = TT_8X4;
3016 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3017 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3018 ttblk = TT_4X8;
3020 switch(ttblk) {
3021 case TT_8X8:
3022 pat = 0xF;
3023 i = 0;
3024 last = 0;
3025 while (!last) {
3026 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3027 i += skip;
3028 if(i > 63)
3029 break;
3030 idx = wmv1_scantable[0][i++];
3031 block[idx] = value * scale;
3032 if(!v->pquantizer)
3033 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3035 if(!skip_block){
3036 s->dsp.vc1_inv_trans_8x8(block);
3037 s->dsp.add_pixels_clamped(block, dst, linesize);
3038 if(apply_filter && cbp_top & 0xC)
3039 vc1_loop_filter(dst, 1, linesize, 8, mquant);
3040 if(apply_filter && cbp_left & 0xA)
3041 vc1_loop_filter(dst, linesize, 1, 8, mquant);
3043 break;
3044 case TT_4X4:
3045 pat = ~subblkpat & 0xF;
3046 for(j = 0; j < 4; j++) {
3047 last = subblkpat & (1 << (3 - j));
3048 i = 0;
3049 off = (j & 1) * 4 + (j & 2) * 16;
3050 while (!last) {
3051 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3052 i += skip;
3053 if(i > 15)
3054 break;
3055 idx = ff_vc1_simple_progressive_4x4_zz[i++];
3056 block[idx + off] = value * scale;
3057 if(!v->pquantizer)
3058 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3060 if(!(subblkpat & (1 << (3 - j))) && !skip_block){
3061 s->dsp.vc1_inv_trans_4x4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
3062 if(apply_filter && (j&2 ? pat & (1<<(j-2)) : (cbp_top & (1 << (j + 2)))))
3063 vc1_loop_filter(dst + (j&1)*4 + (j&2)*2*linesize, 1, linesize, 4, mquant);
3064 if(apply_filter && (j&1 ? pat & (1<<(j-1)) : (cbp_left & (1 << (j + 1)))))
3065 vc1_loop_filter(dst + (j&1)*4 + (j&2)*2*linesize, linesize, 1, 4, mquant);
3068 break;
3069 case TT_8X4:
3070 pat = ~((subblkpat & 2)*6 + (subblkpat & 1)*3) & 0xF;
3071 for(j = 0; j < 2; j++) {
3072 last = subblkpat & (1 << (1 - j));
3073 i = 0;
3074 off = j * 32;
3075 while (!last) {
3076 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3077 i += skip;
3078 if(i > 31)
3079 break;
3080 idx = v->zz_8x4[i++]+off;
3081 block[idx] = value * scale;
3082 if(!v->pquantizer)
3083 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3085 if(!(subblkpat & (1 << (1 - j))) && !skip_block){
3086 s->dsp.vc1_inv_trans_8x4(dst + j*4*linesize, linesize, block + off);
3087 if(apply_filter && j ? pat & 0x3 : (cbp_top & 0xC))
3088 vc1_loop_filter(dst + j*4*linesize, 1, linesize, 8, mquant);
3089 if(apply_filter && cbp_left & (2 << j))
3090 vc1_loop_filter(dst + j*4*linesize, linesize, 1, 4, mquant);
3093 break;
3094 case TT_4X8:
3095 pat = ~(subblkpat*5) & 0xF;
3096 for(j = 0; j < 2; j++) {
3097 last = subblkpat & (1 << (1 - j));
3098 i = 0;
3099 off = j * 4;
3100 while (!last) {
3101 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3102 i += skip;
3103 if(i > 31)
3104 break;
3105 idx = v->zz_4x8[i++]+off;
3106 block[idx] = value * scale;
3107 if(!v->pquantizer)
3108 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3110 if(!(subblkpat & (1 << (1 - j))) && !skip_block){
3111 s->dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3112 if(apply_filter && cbp_top & (2 << j))
3113 vc1_loop_filter(dst + j*4, 1, linesize, 4, mquant);
3114 if(apply_filter && j ? pat & 0x5 : (cbp_left & 0xA))
3115 vc1_loop_filter(dst + j*4, linesize, 1, 8, mquant);
3118 break;
3120 return pat;
3123 /** @} */ // Macroblock group
3125 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3126 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3128 /** Decode one P-frame MB (in Simple/Main profile)
3130 static int vc1_decode_p_mb(VC1Context *v)
3132 MpegEncContext *s = &v->s;
3133 GetBitContext *gb = &s->gb;
3134 int i, j;
3135 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3136 int cbp; /* cbp decoding stuff */
3137 int mqdiff, mquant; /* MB quantization */
3138 int ttmb = v->ttfrm; /* MB Transform type */
3140 int mb_has_coeffs = 1; /* last_flag */
3141 int dmv_x, dmv_y; /* Differential MV components */
3142 int index, index1; /* LUT indexes */
3143 int val, sign; /* temp values */
3144 int first_block = 1;
3145 int dst_idx, off;
3146 int skipped, fourmv;
3147 int block_cbp = 0, pat;
3148 int apply_loop_filter;
3150 mquant = v->pq; /* Loosy initialization */
3152 if (v->mv_type_is_raw)
3153 fourmv = get_bits1(gb);
3154 else
3155 fourmv = v->mv_type_mb_plane[mb_pos];
3156 if (v->skip_is_raw)
3157 skipped = get_bits1(gb);
3158 else
3159 skipped = v->s.mbskip_table[mb_pos];
3161 s->dsp.clear_blocks(s->block[0]);
3163 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
3164 if (!fourmv) /* 1MV mode */
3166 if (!skipped)
3168 GET_MVDATA(dmv_x, dmv_y);
3170 if (s->mb_intra) {
3171 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3172 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3174 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3175 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3177 /* FIXME Set DC val for inter block ? */
3178 if (s->mb_intra && !mb_has_coeffs)
3180 GET_MQUANT();
3181 s->ac_pred = get_bits1(gb);
3182 cbp = 0;
3184 else if (mb_has_coeffs)
3186 if (s->mb_intra) s->ac_pred = get_bits1(gb);
3187 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3188 GET_MQUANT();
3190 else
3192 mquant = v->pq;
3193 cbp = 0;
3195 s->current_picture.qscale_table[mb_pos] = mquant;
3197 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3198 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3199 VC1_TTMB_VLC_BITS, 2);
3200 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3201 dst_idx = 0;
3202 for (i=0; i<6; i++)
3204 s->dc_val[0][s->block_index[i]] = 0;
3205 dst_idx += i >> 2;
3206 val = ((cbp >> (5 - i)) & 1);
3207 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3208 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3209 if(s->mb_intra) {
3210 /* check if prediction blocks A and C are available */
3211 v->a_avail = v->c_avail = 0;
3212 if(i == 2 || i == 3 || !s->first_slice_line)
3213 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3214 if(i == 1 || i == 3 || s->mb_x)
3215 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3217 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3218 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3219 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3220 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3221 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3222 if(v->pq >= 9 && v->overlap) {
3223 if(v->c_avail)
3224 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3225 if(v->a_avail)
3226 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3228 if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
3229 int left_cbp, top_cbp;
3230 if(i & 4){
3231 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
3232 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
3233 }else{
3234 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
3235 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
3237 if(left_cbp & 0xC)
3238 vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
3239 if(top_cbp & 0xA)
3240 vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
3242 block_cbp |= 0xF << (i << 2);
3243 } else if(val) {
3244 int left_cbp = 0, top_cbp = 0, filter = 0;
3245 if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
3246 filter = 1;
3247 if(i & 4){
3248 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
3249 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
3250 }else{
3251 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
3252 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
3254 if(left_cbp & 0xC)
3255 vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
3256 if(top_cbp & 0xA)
3257 vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
3259 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
3260 block_cbp |= pat << (i << 2);
3261 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3262 first_block = 0;
3266 else //Skipped
3268 s->mb_intra = 0;
3269 for(i = 0; i < 6; i++) {
3270 v->mb_type[0][s->block_index[i]] = 0;
3271 s->dc_val[0][s->block_index[i]] = 0;
3273 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3274 s->current_picture.qscale_table[mb_pos] = 0;
3275 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3276 vc1_mc_1mv(v, 0);
3277 return 0;
3279 } //1MV mode
3280 else //4MV mode
3282 if (!skipped /* unskipped MB */)
3284 int intra_count = 0, coded_inter = 0;
3285 int is_intra[6], is_coded[6];
3286 /* Get CBPCY */
3287 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3288 for (i=0; i<6; i++)
3290 val = ((cbp >> (5 - i)) & 1);
3291 s->dc_val[0][s->block_index[i]] = 0;
3292 s->mb_intra = 0;
3293 if(i < 4) {
3294 dmv_x = dmv_y = 0;
3295 s->mb_intra = 0;
3296 mb_has_coeffs = 0;
3297 if(val) {
3298 GET_MVDATA(dmv_x, dmv_y);
3300 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3301 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3302 intra_count += s->mb_intra;
3303 is_intra[i] = s->mb_intra;
3304 is_coded[i] = mb_has_coeffs;
3306 if(i&4){
3307 is_intra[i] = (intra_count >= 3);
3308 is_coded[i] = val;
3310 if(i == 4) vc1_mc_4mv_chroma(v);
3311 v->mb_type[0][s->block_index[i]] = is_intra[i];
3312 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3314 // if there are no coded blocks then don't do anything more
3315 if(!intra_count && !coded_inter) return 0;
3316 dst_idx = 0;
3317 GET_MQUANT();
3318 s->current_picture.qscale_table[mb_pos] = mquant;
3319 /* test if block is intra and has pred */
3321 int intrapred = 0;
3322 for(i=0; i<6; i++)
3323 if(is_intra[i]) {
3324 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3325 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3326 intrapred = 1;
3327 break;
3330 if(intrapred)s->ac_pred = get_bits1(gb);
3331 else s->ac_pred = 0;
3333 if (!v->ttmbf && coded_inter)
3334 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3335 for (i=0; i<6; i++)
3337 dst_idx += i >> 2;
3338 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3339 s->mb_intra = is_intra[i];
3340 if (is_intra[i]) {
3341 /* check if prediction blocks A and C are available */
3342 v->a_avail = v->c_avail = 0;
3343 if(i == 2 || i == 3 || !s->first_slice_line)
3344 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3345 if(i == 1 || i == 3 || s->mb_x)
3346 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3348 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3349 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3350 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3351 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3352 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3353 if(v->pq >= 9 && v->overlap) {
3354 if(v->c_avail)
3355 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3356 if(v->a_avail)
3357 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3359 if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
3360 int left_cbp, top_cbp;
3361 if(i & 4){
3362 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
3363 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
3364 }else{
3365 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
3366 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
3368 if(left_cbp & 0xC)
3369 vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
3370 if(top_cbp & 0xA)
3371 vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
3373 block_cbp |= 0xF << (i << 2);
3374 } else if(is_coded[i]) {
3375 int left_cbp = 0, top_cbp = 0, filter = 0;
3376 if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
3377 filter = 1;
3378 if(i & 4){
3379 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
3380 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
3381 }else{
3382 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
3383 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
3385 if(left_cbp & 0xC)
3386 vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
3387 if(top_cbp & 0xA)
3388 vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
3390 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
3391 block_cbp |= pat << (i << 2);
3392 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3393 first_block = 0;
3396 return 0;
3398 else //Skipped MB
3400 s->mb_intra = 0;
3401 s->current_picture.qscale_table[mb_pos] = 0;
3402 for (i=0; i<6; i++) {
3403 v->mb_type[0][s->block_index[i]] = 0;
3404 s->dc_val[0][s->block_index[i]] = 0;
3406 for (i=0; i<4; i++)
3408 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3409 vc1_mc_4mv_luma(v, i);
3411 vc1_mc_4mv_chroma(v);
3412 s->current_picture.qscale_table[mb_pos] = 0;
3413 return 0;
3416 v->cbp[s->mb_x] = block_cbp;
3418 /* Should never happen */
3419 return -1;
3422 /** Decode one B-frame MB (in Main profile)
3424 static void vc1_decode_b_mb(VC1Context *v)
3426 MpegEncContext *s = &v->s;
3427 GetBitContext *gb = &s->gb;
3428 int i, j;
3429 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3430 int cbp = 0; /* cbp decoding stuff */
3431 int mqdiff, mquant; /* MB quantization */
3432 int ttmb = v->ttfrm; /* MB Transform type */
3433 int mb_has_coeffs = 0; /* last_flag */
3434 int index, index1; /* LUT indexes */
3435 int val, sign; /* temp values */
3436 int first_block = 1;
3437 int dst_idx, off;
3438 int skipped, direct;
3439 int dmv_x[2], dmv_y[2];
3440 int bmvtype = BMV_TYPE_BACKWARD;
3442 mquant = v->pq; /* Loosy initialization */
3443 s->mb_intra = 0;
3445 if (v->dmb_is_raw)
3446 direct = get_bits1(gb);
3447 else
3448 direct = v->direct_mb_plane[mb_pos];
3449 if (v->skip_is_raw)
3450 skipped = get_bits1(gb);
3451 else
3452 skipped = v->s.mbskip_table[mb_pos];
3454 s->dsp.clear_blocks(s->block[0]);
3455 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3456 for(i = 0; i < 6; i++) {
3457 v->mb_type[0][s->block_index[i]] = 0;
3458 s->dc_val[0][s->block_index[i]] = 0;
3460 s->current_picture.qscale_table[mb_pos] = 0;
3462 if (!direct) {
3463 if (!skipped) {
3464 GET_MVDATA(dmv_x[0], dmv_y[0]);
3465 dmv_x[1] = dmv_x[0];
3466 dmv_y[1] = dmv_y[0];
3468 if(skipped || !s->mb_intra) {
3469 bmvtype = decode012(gb);
3470 switch(bmvtype) {
3471 case 0:
3472 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3473 break;
3474 case 1:
3475 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3476 break;
3477 case 2:
3478 bmvtype = BMV_TYPE_INTERPOLATED;
3479 dmv_x[0] = dmv_y[0] = 0;
3483 for(i = 0; i < 6; i++)
3484 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3486 if (skipped) {
3487 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3488 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3489 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3490 return;
3492 if (direct) {
3493 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3494 GET_MQUANT();
3495 s->mb_intra = 0;
3496 mb_has_coeffs = 0;
3497 s->current_picture.qscale_table[mb_pos] = mquant;
3498 if(!v->ttmbf)
3499 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3500 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3501 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3502 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3503 } else {
3504 if(!mb_has_coeffs && !s->mb_intra) {
3505 /* no coded blocks - effectively skipped */
3506 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3507 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3508 return;
3510 if(s->mb_intra && !mb_has_coeffs) {
3511 GET_MQUANT();
3512 s->current_picture.qscale_table[mb_pos] = mquant;
3513 s->ac_pred = get_bits1(gb);
3514 cbp = 0;
3515 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3516 } else {
3517 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3518 GET_MVDATA(dmv_x[0], dmv_y[0]);
3519 if(!mb_has_coeffs) {
3520 /* interpolated skipped block */
3521 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3522 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3523 return;
3526 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3527 if(!s->mb_intra) {
3528 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3530 if(s->mb_intra)
3531 s->ac_pred = get_bits1(gb);
3532 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3533 GET_MQUANT();
3534 s->current_picture.qscale_table[mb_pos] = mquant;
3535 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3536 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3539 dst_idx = 0;
3540 for (i=0; i<6; i++)
3542 s->dc_val[0][s->block_index[i]] = 0;
3543 dst_idx += i >> 2;
3544 val = ((cbp >> (5 - i)) & 1);
3545 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3546 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3547 if(s->mb_intra) {
3548 /* check if prediction blocks A and C are available */
3549 v->a_avail = v->c_avail = 0;
3550 if(i == 2 || i == 3 || !s->first_slice_line)
3551 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3552 if(i == 1 || i == 3 || s->mb_x)
3553 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3555 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3556 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3557 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3558 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3559 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3560 } else if(val) {
3561 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), 0, 0, 0);
3562 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3563 first_block = 0;
3568 /** Decode blocks of I-frame
3570 static void vc1_decode_i_blocks(VC1Context *v)
3572 int k, j;
3573 MpegEncContext *s = &v->s;
3574 int cbp, val;
3575 uint8_t *coded_val;
3576 int mb_pos;
3578 /* select codingmode used for VLC tables selection */
3579 switch(v->y_ac_table_index){
3580 case 0:
3581 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3582 break;
3583 case 1:
3584 v->codingset = CS_HIGH_MOT_INTRA;
3585 break;
3586 case 2:
3587 v->codingset = CS_MID_RATE_INTRA;
3588 break;
3591 switch(v->c_ac_table_index){
3592 case 0:
3593 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3594 break;
3595 case 1:
3596 v->codingset2 = CS_HIGH_MOT_INTER;
3597 break;
3598 case 2:
3599 v->codingset2 = CS_MID_RATE_INTER;
3600 break;
3603 /* Set DC scale - y and c use the same */
3604 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3605 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3607 //do frame decode
3608 s->mb_x = s->mb_y = 0;
3609 s->mb_intra = 1;
3610 s->first_slice_line = 1;
3611 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3612 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3613 ff_init_block_index(s);
3614 ff_update_block_index(s);
3615 s->dsp.clear_blocks(s->block[0]);
3616 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3617 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3618 s->current_picture.qscale_table[mb_pos] = v->pq;
3619 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3620 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3622 // do actual MB decoding and displaying
3623 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3624 v->s.ac_pred = get_bits1(&v->s.gb);
3626 for(k = 0; k < 6; k++) {
3627 val = ((cbp >> (5 - k)) & 1);
3629 if (k < 4) {
3630 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3631 val = val ^ pred;
3632 *coded_val = val;
3634 cbp |= val << (5 - k);
3636 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3638 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3639 if(v->pq >= 9 && v->overlap) {
3640 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3644 vc1_put_block(v, s->block);
3645 if(v->pq >= 9 && v->overlap) {
3646 if(s->mb_x) {
3647 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3648 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3649 if(!(s->flags & CODEC_FLAG_GRAY)) {
3650 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3651 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3654 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3655 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3656 if(!s->first_slice_line) {
3657 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3658 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3659 if(!(s->flags & CODEC_FLAG_GRAY)) {
3660 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3661 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3664 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3665 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3667 if(v->s.loop_filter) vc1_loop_filter_iblk(s, s->current_picture.qscale_table[mb_pos]);
3669 if(get_bits_count(&s->gb) > v->bits) {
3670 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
3671 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3672 return;
3675 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3676 s->first_slice_line = 0;
3678 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3681 /** Decode blocks of I-frame for advanced profile
3683 static void vc1_decode_i_blocks_adv(VC1Context *v)
3685 int k, j;
3686 MpegEncContext *s = &v->s;
3687 int cbp, val;
3688 uint8_t *coded_val;
3689 int mb_pos;
3690 int mquant = v->pq;
3691 int mqdiff;
3692 int overlap;
3693 GetBitContext *gb = &s->gb;
3695 /* select codingmode used for VLC tables selection */
3696 switch(v->y_ac_table_index){
3697 case 0:
3698 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3699 break;
3700 case 1:
3701 v->codingset = CS_HIGH_MOT_INTRA;
3702 break;
3703 case 2:
3704 v->codingset = CS_MID_RATE_INTRA;
3705 break;
3708 switch(v->c_ac_table_index){
3709 case 0:
3710 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3711 break;
3712 case 1:
3713 v->codingset2 = CS_HIGH_MOT_INTER;
3714 break;
3715 case 2:
3716 v->codingset2 = CS_MID_RATE_INTER;
3717 break;
3720 //do frame decode
3721 s->mb_x = s->mb_y = 0;
3722 s->mb_intra = 1;
3723 s->first_slice_line = 1;
3724 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3725 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3726 ff_init_block_index(s);
3727 ff_update_block_index(s);
3728 s->dsp.clear_blocks(s->block[0]);
3729 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3730 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3731 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3732 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3734 // do actual MB decoding and displaying
3735 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3736 if(v->acpred_is_raw)
3737 v->s.ac_pred = get_bits1(&v->s.gb);
3738 else
3739 v->s.ac_pred = v->acpred_plane[mb_pos];
3741 if(v->condover == CONDOVER_SELECT) {
3742 if(v->overflg_is_raw)
3743 overlap = get_bits1(&v->s.gb);
3744 else
3745 overlap = v->over_flags_plane[mb_pos];
3746 } else
3747 overlap = (v->condover == CONDOVER_ALL);
3749 GET_MQUANT();
3751 s->current_picture.qscale_table[mb_pos] = mquant;
3752 /* Set DC scale - y and c use the same */
3753 s->y_dc_scale = s->y_dc_scale_table[mquant];
3754 s->c_dc_scale = s->c_dc_scale_table[mquant];
3756 for(k = 0; k < 6; k++) {
3757 val = ((cbp >> (5 - k)) & 1);
3759 if (k < 4) {
3760 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3761 val = val ^ pred;
3762 *coded_val = val;
3764 cbp |= val << (5 - k);
3766 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3767 v->c_avail = !!s->mb_x || (k==1 || k==3);
3769 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3771 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3772 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3775 vc1_put_block(v, s->block);
3776 if(overlap) {
3777 if(s->mb_x) {
3778 s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
3779 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3780 if(!(s->flags & CODEC_FLAG_GRAY)) {
3781 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
3782 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
3785 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
3786 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3787 if(!s->first_slice_line) {
3788 s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
3789 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
3790 if(!(s->flags & CODEC_FLAG_GRAY)) {
3791 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
3792 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
3795 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
3796 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
3798 if(v->s.loop_filter) vc1_loop_filter_iblk(s, s->current_picture.qscale_table[mb_pos]);
3800 if(get_bits_count(&s->gb) > v->bits) {
3801 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
3802 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3803 return;
3806 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3807 s->first_slice_line = 0;
3809 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3812 static void vc1_decode_p_blocks(VC1Context *v)
3814 MpegEncContext *s = &v->s;
3816 /* select codingmode used for VLC tables selection */
3817 switch(v->c_ac_table_index){
3818 case 0:
3819 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3820 break;
3821 case 1:
3822 v->codingset = CS_HIGH_MOT_INTRA;
3823 break;
3824 case 2:
3825 v->codingset = CS_MID_RATE_INTRA;
3826 break;
3829 switch(v->c_ac_table_index){
3830 case 0:
3831 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3832 break;
3833 case 1:
3834 v->codingset2 = CS_HIGH_MOT_INTER;
3835 break;
3836 case 2:
3837 v->codingset2 = CS_MID_RATE_INTER;
3838 break;
3841 s->first_slice_line = 1;
3842 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
3843 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3844 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3845 ff_init_block_index(s);
3846 ff_update_block_index(s);
3847 s->dsp.clear_blocks(s->block[0]);
3849 vc1_decode_p_mb(v);
3850 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3851 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
3852 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3853 return;
3856 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0])*s->mb_stride);
3857 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3858 s->first_slice_line = 0;
3860 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3863 static void vc1_decode_b_blocks(VC1Context *v)
3865 MpegEncContext *s = &v->s;
3867 /* select codingmode used for VLC tables selection */
3868 switch(v->c_ac_table_index){
3869 case 0:
3870 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3871 break;
3872 case 1:
3873 v->codingset = CS_HIGH_MOT_INTRA;
3874 break;
3875 case 2:
3876 v->codingset = CS_MID_RATE_INTRA;
3877 break;
3880 switch(v->c_ac_table_index){
3881 case 0:
3882 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3883 break;
3884 case 1:
3885 v->codingset2 = CS_HIGH_MOT_INTER;
3886 break;
3887 case 2:
3888 v->codingset2 = CS_MID_RATE_INTER;
3889 break;
3892 s->first_slice_line = 1;
3893 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3894 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3895 ff_init_block_index(s);
3896 ff_update_block_index(s);
3897 s->dsp.clear_blocks(s->block[0]);
3899 vc1_decode_b_mb(v);
3900 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3901 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
3902 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3903 return;
3905 if(v->s.loop_filter) vc1_loop_filter_iblk(s, s->current_picture.qscale_table[s->mb_x + s->mb_y *s->mb_stride]);
3907 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3908 s->first_slice_line = 0;
3910 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3913 static void vc1_decode_skip_blocks(VC1Context *v)
3915 MpegEncContext *s = &v->s;
3917 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3918 s->first_slice_line = 1;
3919 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3920 s->mb_x = 0;
3921 ff_init_block_index(s);
3922 ff_update_block_index(s);
3923 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3924 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3925 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3926 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3927 s->first_slice_line = 0;
3929 s->pict_type = FF_P_TYPE;
3932 static void vc1_decode_blocks(VC1Context *v)
3935 v->s.esc3_level_length = 0;
3936 if(v->x8_type){
3937 ff_intrax8_decode_picture(&v->x8, 2*v->pq+v->halfpq, v->pq*(!v->pquantizer) );
3938 }else{
3940 switch(v->s.pict_type) {
3941 case FF_I_TYPE:
3942 if(v->profile == PROFILE_ADVANCED)
3943 vc1_decode_i_blocks_adv(v);
3944 else
3945 vc1_decode_i_blocks(v);
3946 break;
3947 case FF_P_TYPE:
3948 if(v->p_frame_skipped)
3949 vc1_decode_skip_blocks(v);
3950 else
3951 vc1_decode_p_blocks(v);
3952 break;
3953 case FF_B_TYPE:
3954 if(v->bi_type){
3955 if(v->profile == PROFILE_ADVANCED)
3956 vc1_decode_i_blocks_adv(v);
3957 else
3958 vc1_decode_i_blocks(v);
3959 }else
3960 vc1_decode_b_blocks(v);
3961 break;
3966 /** Find VC-1 marker in buffer
3967 * @return position where next marker starts or end of buffer if no marker found
3969 static av_always_inline const uint8_t* find_next_marker(const uint8_t *src, const uint8_t *end)
3971 uint32_t mrk = 0xFFFFFFFF;
3973 if(end-src < 4) return end;
3974 while(src < end){
3975 mrk = (mrk << 8) | *src++;
3976 if(IS_MARKER(mrk))
3977 return src-4;
3979 return end;
3982 static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
3984 int dsize = 0, i;
3986 if(size < 4){
3987 for(dsize = 0; dsize < size; dsize++) *dst++ = *src++;
3988 return size;
3990 for(i = 0; i < size; i++, src++) {
3991 if(src[0] == 3 && i >= 2 && !src[-1] && !src[-2] && i < size-1 && src[1] < 4) {
3992 dst[dsize++] = src[1];
3993 src++;
3994 i++;
3995 } else
3996 dst[dsize++] = *src;
3998 return dsize;
4001 /** Initialize a VC1/WMV3 decoder
4002 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4003 * @todo TODO: Decypher remaining bits in extra_data
4005 static av_cold int vc1_decode_init(AVCodecContext *avctx)
4007 VC1Context *v = avctx->priv_data;
4008 MpegEncContext *s = &v->s;
4009 GetBitContext gb;
4011 if (!avctx->extradata_size || !avctx->extradata) return -1;
4012 if (!(avctx->flags & CODEC_FLAG_GRAY))
4013 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
4014 else
4015 avctx->pix_fmt = PIX_FMT_GRAY8;
4016 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
4017 v->s.avctx = avctx;
4018 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4019 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4021 if(avctx->idct_algo==FF_IDCT_AUTO){
4022 avctx->idct_algo=FF_IDCT_WMV2;
4025 if(ff_h263_decode_init(avctx) < 0)
4026 return -1;
4027 if (vc1_init_common(v) < 0) return -1;
4029 avctx->coded_width = avctx->width;
4030 avctx->coded_height = avctx->height;
4031 if (avctx->codec_id == CODEC_ID_WMV3)
4033 int count = 0;
4035 // looks like WMV3 has a sequence header stored in the extradata
4036 // advanced sequence header may be before the first frame
4037 // the last byte of the extradata is a version number, 1 for the
4038 // samples we can decode
4040 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4042 if (decode_sequence_header(avctx, &gb) < 0)
4043 return -1;
4045 count = avctx->extradata_size*8 - get_bits_count(&gb);
4046 if (count>0)
4048 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4049 count, get_bits(&gb, count));
4051 else if (count < 0)
4053 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4055 } else { // VC1/WVC1
4056 const uint8_t *start = avctx->extradata;
4057 uint8_t *end = avctx->extradata + avctx->extradata_size;
4058 const uint8_t *next;
4059 int size, buf2_size;
4060 uint8_t *buf2 = NULL;
4061 int seq_initialized = 0, ep_initialized = 0;
4063 if(avctx->extradata_size < 16) {
4064 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
4065 return -1;
4068 buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
4069 if(start[0]) start++; // in WVC1 extradata first byte is its size
4070 next = start;
4071 for(; next < end; start = next){
4072 next = find_next_marker(start + 4, end);
4073 size = next - start - 4;
4074 if(size <= 0) continue;
4075 buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
4076 init_get_bits(&gb, buf2, buf2_size * 8);
4077 switch(AV_RB32(start)){
4078 case VC1_CODE_SEQHDR:
4079 if(decode_sequence_header(avctx, &gb) < 0){
4080 av_free(buf2);
4081 return -1;
4083 seq_initialized = 1;
4084 break;
4085 case VC1_CODE_ENTRYPOINT:
4086 if(decode_entry_point(avctx, &gb) < 0){
4087 av_free(buf2);
4088 return -1;
4090 ep_initialized = 1;
4091 break;
4094 av_free(buf2);
4095 if(!seq_initialized || !ep_initialized){
4096 av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
4097 return -1;
4100 avctx->has_b_frames= !!(avctx->max_b_frames);
4101 s->low_delay = !avctx->has_b_frames;
4103 s->mb_width = (avctx->coded_width+15)>>4;
4104 s->mb_height = (avctx->coded_height+15)>>4;
4106 /* Allocate mb bitplanes */
4107 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4108 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4109 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4110 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4112 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
4113 v->cbp = v->cbp_base + s->mb_stride;
4115 /* allocate block type info in that way so it could be used with s->block_index[] */
4116 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4117 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4118 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4119 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4121 /* Init coded blocks info */
4122 if (v->profile == PROFILE_ADVANCED)
4124 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4125 // return -1;
4126 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4127 // return -1;
4130 ff_intrax8_common_init(&v->x8,s);
4131 return 0;
4135 /** Decode a VC1/WMV3 frame
4136 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4138 static int vc1_decode_frame(AVCodecContext *avctx,
4139 void *data, int *data_size,
4140 const uint8_t *buf, int buf_size)
4142 VC1Context *v = avctx->priv_data;
4143 MpegEncContext *s = &v->s;
4144 AVFrame *pict = data;
4145 uint8_t *buf2 = NULL;
4146 const uint8_t *buf_start = buf;
4148 /* no supplementary picture */
4149 if (buf_size == 0) {
4150 /* special case for last picture */
4151 if (s->low_delay==0 && s->next_picture_ptr) {
4152 *pict= *(AVFrame*)s->next_picture_ptr;
4153 s->next_picture_ptr= NULL;
4155 *data_size = sizeof(AVFrame);
4158 return 0;
4161 /* We need to set current_picture_ptr before reading the header,
4162 * otherwise we cannot store anything in there. */
4163 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4164 int i= ff_find_unused_picture(s, 0);
4165 s->current_picture_ptr= &s->picture[i];
4168 if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){
4169 if (v->profile < PROFILE_ADVANCED)
4170 avctx->pix_fmt = PIX_FMT_VDPAU_WMV3;
4171 else
4172 avctx->pix_fmt = PIX_FMT_VDPAU_VC1;
4175 //for advanced profile we may need to parse and unescape data
4176 if (avctx->codec_id == CODEC_ID_VC1) {
4177 int buf_size2 = 0;
4178 buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4180 if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */
4181 const uint8_t *start, *end, *next;
4182 int size;
4184 next = buf;
4185 for(start = buf, end = buf + buf_size; next < end; start = next){
4186 next = find_next_marker(start + 4, end);
4187 size = next - start - 4;
4188 if(size <= 0) continue;
4189 switch(AV_RB32(start)){
4190 case VC1_CODE_FRAME:
4191 if (avctx->hwaccel ||
4192 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
4193 buf_start = start;
4194 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
4195 break;
4196 case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
4197 buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
4198 init_get_bits(&s->gb, buf2, buf_size2*8);
4199 decode_entry_point(avctx, &s->gb);
4200 break;
4201 case VC1_CODE_SLICE:
4202 av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n");
4203 av_free(buf2);
4204 return -1;
4207 }else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */
4208 const uint8_t *divider;
4210 divider = find_next_marker(buf, buf + buf_size);
4211 if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){
4212 av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
4213 av_free(buf2);
4214 return -1;
4217 buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
4218 // TODO
4219 av_free(buf2);return -1;
4220 }else{
4221 buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
4223 init_get_bits(&s->gb, buf2, buf_size2*8);
4224 } else
4225 init_get_bits(&s->gb, buf, buf_size*8);
4226 // do parse frame header
4227 if(v->profile < PROFILE_ADVANCED) {
4228 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4229 av_free(buf2);
4230 return -1;
4232 } else {
4233 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4234 av_free(buf2);
4235 return -1;
4239 if(s->pict_type != FF_I_TYPE && !v->res_rtm_flag){
4240 av_free(buf2);
4241 return -1;
4244 // for hurry_up==5
4245 s->current_picture.pict_type= s->pict_type;
4246 s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
4248 /* skip B-frames if we don't have reference frames */
4249 if(s->last_picture_ptr==NULL && (s->pict_type==FF_B_TYPE || s->dropable)){
4250 av_free(buf2);
4251 return -1;//buf_size;
4253 /* skip b frames if we are in a hurry */
4254 if(avctx->hurry_up && s->pict_type==FF_B_TYPE) return -1;//buf_size;
4255 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
4256 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
4257 || avctx->skip_frame >= AVDISCARD_ALL) {
4258 av_free(buf2);
4259 return buf_size;
4261 /* skip everything if we are in a hurry>=5 */
4262 if(avctx->hurry_up>=5) {
4263 av_free(buf2);
4264 return -1;//buf_size;
4267 if(s->next_p_frame_damaged){
4268 if(s->pict_type==FF_B_TYPE)
4269 return buf_size;
4270 else
4271 s->next_p_frame_damaged=0;
4274 if(MPV_frame_start(s, avctx) < 0) {
4275 av_free(buf2);
4276 return -1;
4279 s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
4280 s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
4282 if ((CONFIG_VC1_VDPAU_DECODER || CONFIG_WMV3_VDPAU_DECODER)
4283 &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
4284 ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
4285 else if (avctx->hwaccel) {
4286 if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
4287 return -1;
4288 if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
4289 return -1;
4290 if (avctx->hwaccel->end_frame(avctx) < 0)
4291 return -1;
4292 } else {
4293 ff_er_frame_start(s);
4295 v->bits = buf_size * 8;
4296 vc1_decode_blocks(v);
4297 //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4298 // if(get_bits_count(&s->gb) > buf_size * 8)
4299 // return -1;
4300 ff_er_frame_end(s);
4303 MPV_frame_end(s);
4305 assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4306 assert(s->current_picture.pict_type == s->pict_type);
4307 if (s->pict_type == FF_B_TYPE || s->low_delay) {
4308 *pict= *(AVFrame*)s->current_picture_ptr;
4309 } else if (s->last_picture_ptr != NULL) {
4310 *pict= *(AVFrame*)s->last_picture_ptr;
4313 if(s->last_picture_ptr || s->low_delay){
4314 *data_size = sizeof(AVFrame);
4315 ff_print_debug_info(s, pict);
4318 /* Return the Picture timestamp as the frame number */
4319 /* we subtract 1 because it is added on utils.c */
4320 avctx->frame_number = s->picture_number - 1;
4322 av_free(buf2);
4323 return buf_size;
4327 /** Close a VC1/WMV3 decoder
4328 * @warning Initial try at using MpegEncContext stuff
4330 static av_cold int vc1_decode_end(AVCodecContext *avctx)
4332 VC1Context *v = avctx->priv_data;
4334 av_freep(&v->hrd_rate);
4335 av_freep(&v->hrd_buffer);
4336 MPV_common_end(&v->s);
4337 av_freep(&v->mv_type_mb_plane);
4338 av_freep(&v->direct_mb_plane);
4339 av_freep(&v->acpred_plane);
4340 av_freep(&v->over_flags_plane);
4341 av_freep(&v->mb_type_base);
4342 av_freep(&v->cbp_base);
4343 ff_intrax8_common_end(&v->x8);
4344 return 0;
4348 AVCodec vc1_decoder = {
4349 "vc1",
4350 CODEC_TYPE_VIDEO,
4351 CODEC_ID_VC1,
4352 sizeof(VC1Context),
4353 vc1_decode_init,
4354 NULL,
4355 vc1_decode_end,
4356 vc1_decode_frame,
4357 CODEC_CAP_DELAY,
4358 NULL,
4359 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
4360 .pix_fmts = ff_hwaccel_pixfmt_list_420
4363 AVCodec wmv3_decoder = {
4364 "wmv3",
4365 CODEC_TYPE_VIDEO,
4366 CODEC_ID_WMV3,
4367 sizeof(VC1Context),
4368 vc1_decode_init,
4369 NULL,
4370 vc1_decode_end,
4371 vc1_decode_frame,
4372 CODEC_CAP_DELAY,
4373 NULL,
4374 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
4375 .pix_fmts = ff_hwaccel_pixfmt_list_420
4378 #if CONFIG_WMV3_VDPAU_DECODER
4379 AVCodec wmv3_vdpau_decoder = {
4380 "wmv3_vdpau",
4381 CODEC_TYPE_VIDEO,
4382 CODEC_ID_WMV3,
4383 sizeof(VC1Context),
4384 vc1_decode_init,
4385 NULL,
4386 vc1_decode_end,
4387 vc1_decode_frame,
4388 CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
4389 NULL,
4390 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
4391 .pix_fmts = (enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE}
4393 #endif
4395 #if CONFIG_VC1_VDPAU_DECODER
4396 AVCodec vc1_vdpau_decoder = {
4397 "vc1_vdpau",
4398 CODEC_TYPE_VIDEO,
4399 CODEC_ID_VC1,
4400 sizeof(VC1Context),
4401 vc1_decode_init,
4402 NULL,
4403 vc1_decode_end,
4404 vc1_decode_frame,
4405 CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
4406 NULL,
4407 .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
4408 .pix_fmts = (enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE}
4410 #endif