Implement Realmedia/RTSP-compatible SETUP command. This includes calculation
[ffmpeg-lucabe.git] / libavcodec / svq3.c
blob67fa3172b467bc62e1ca75e2cd52150aec124f00
1 /*
2 * Copyright (c) 2003 The FFmpeg Project.
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * ftp://ftp.mplayerhq.hu/MPlayer/samples/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 /**
44 * @file svq3.c
45 * svq3 decoder.
48 #define FULLPEL_MODE 1
49 #define HALFPEL_MODE 2
50 #define THIRDPEL_MODE 3
51 #define PREDICT_MODE 4
53 /* dual scan (from some older h264 draft)
54 o-->o-->o o
55 | /|
56 o o o / o
57 | / | |/ |
58 o o o o
60 o-->o-->o-->o
62 static const uint8_t svq3_scan[16]={
63 0+0*4, 1+0*4, 2+0*4, 2+1*4,
64 2+2*4, 3+0*4, 3+1*4, 3+2*4,
65 0+1*4, 0+2*4, 1+1*4, 1+2*4,
66 0+3*4, 1+3*4, 2+3*4, 3+3*4,
69 static const uint8_t svq3_pred_0[25][2] = {
70 { 0, 0 },
71 { 1, 0 }, { 0, 1 },
72 { 0, 2 }, { 1, 1 }, { 2, 0 },
73 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
74 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
75 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
76 { 2, 4 }, { 3, 3 }, { 4, 2 },
77 { 4, 3 }, { 3, 4 },
78 { 4, 4 }
81 static const int8_t svq3_pred_1[6][6][5] = {
82 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
83 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
84 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
85 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
86 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
87 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
88 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
89 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
90 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
91 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
92 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
93 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
96 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
97 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
98 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
99 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
100 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
103 static const uint32_t svq3_dequant_coeff[32] = {
104 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
105 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
106 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
107 61694, 68745, 77615, 89113,100253,109366,126635,141533
111 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
112 const int qmul= svq3_dequant_coeff[qp];
113 #define stride 16
114 int i;
115 int temp[16];
116 static const int x_offset[4]={0, 1*stride, 4* stride, 5*stride};
117 static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
119 for(i=0; i<4; i++){
120 const int offset= y_offset[i];
121 const int z0= 13*(block[offset+stride*0] + block[offset+stride*4]);
122 const int z1= 13*(block[offset+stride*0] - block[offset+stride*4]);
123 const int z2= 7* block[offset+stride*1] - 17*block[offset+stride*5];
124 const int z3= 17* block[offset+stride*1] + 7*block[offset+stride*5];
126 temp[4*i+0]= z0+z3;
127 temp[4*i+1]= z1+z2;
128 temp[4*i+2]= z1-z2;
129 temp[4*i+3]= z0-z3;
132 for(i=0; i<4; i++){
133 const int offset= x_offset[i];
134 const int z0= 13*(temp[4*0+i] + temp[4*2+i]);
135 const int z1= 13*(temp[4*0+i] - temp[4*2+i]);
136 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i];
137 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i];
139 block[stride*0 +offset]= ((z0 + z3)*qmul + 0x80000)>>20;
140 block[stride*2 +offset]= ((z1 + z2)*qmul + 0x80000)>>20;
141 block[stride*8 +offset]= ((z1 - z2)*qmul + 0x80000)>>20;
142 block[stride*10+offset]= ((z0 - z3)*qmul + 0x80000)>>20;
145 #undef stride
147 static void svq3_add_idct_c (uint8_t *dst, DCTELEM *block, int stride, int qp, int dc){
148 const int qmul= svq3_dequant_coeff[qp];
149 int i;
150 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
152 if (dc) {
153 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
154 block[0] = 0;
157 for (i=0; i < 4; i++) {
158 const int z0= 13*(block[0 + 4*i] + block[2 + 4*i]);
159 const int z1= 13*(block[0 + 4*i] - block[2 + 4*i]);
160 const int z2= 7* block[1 + 4*i] - 17*block[3 + 4*i];
161 const int z3= 17* block[1 + 4*i] + 7*block[3 + 4*i];
163 block[0 + 4*i]= z0 + z3;
164 block[1 + 4*i]= z1 + z2;
165 block[2 + 4*i]= z1 - z2;
166 block[3 + 4*i]= z0 - z3;
169 for (i=0; i < 4; i++) {
170 const int z0= 13*(block[i + 4*0] + block[i + 4*2]);
171 const int z1= 13*(block[i + 4*0] - block[i + 4*2]);
172 const int z2= 7* block[i + 4*1] - 17*block[i + 4*3];
173 const int z3= 17* block[i + 4*1] + 7*block[i + 4*3];
174 const int rr= (dc + 0x80000);
176 dst[i + stride*0]= cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
177 dst[i + stride*1]= cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
178 dst[i + stride*2]= cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
179 dst[i + stride*3]= cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
183 static inline int svq3_decode_block (GetBitContext *gb, DCTELEM *block,
184 int index, const int type) {
186 static const uint8_t *const scan_patterns[4] =
187 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
189 int run, level, sign, vlc, limit;
190 const int intra = (3 * type) >> 2;
191 const uint8_t *const scan = scan_patterns[type];
193 for (limit=(16 >> intra); index < 16; index=limit, limit+=8) {
194 for (; (vlc = svq3_get_ue_golomb (gb)) != 0; index++) {
196 if (vlc == INVALID_VLC)
197 return -1;
199 sign = (vlc & 0x1) - 1;
200 vlc = (vlc + 1) >> 1;
202 if (type == 3) {
203 if (vlc < 3) {
204 run = 0;
205 level = vlc;
206 } else if (vlc < 4) {
207 run = 1;
208 level = 1;
209 } else {
210 run = (vlc & 0x3);
211 level = ((vlc + 9) >> 2) - run;
213 } else {
214 if (vlc < 16) {
215 run = svq3_dct_tables[intra][vlc].run;
216 level = svq3_dct_tables[intra][vlc].level;
217 } else if (intra) {
218 run = (vlc & 0x7);
219 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
220 } else {
221 run = (vlc & 0xF);
222 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
226 if ((index += run) >= limit)
227 return -1;
229 block[scan[index]] = (level ^ sign) - sign;
232 if (type != 2) {
233 break;
237 return 0;
240 static inline void svq3_mc_dir_part (MpegEncContext *s,
241 int x, int y, int width, int height,
242 int mx, int my, int dxy,
243 int thirdpel, int dir, int avg) {
245 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
246 uint8_t *src, *dest;
247 int i, emu = 0;
248 int blocksize= 2 - (width>>3); //16->0, 8->1, 4->2
250 mx += x;
251 my += y;
253 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
254 my < 0 || my >= (s->v_edge_pos - height - 1)) {
256 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
257 emu = 1;
260 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
261 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
264 /* form component predictions */
265 dest = s->current_picture.data[0] + x + y*s->linesize;
266 src = pic->data[0] + mx + my*s->linesize;
268 if (emu) {
269 ff_emulated_edge_mc (s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
270 mx, my, s->h_edge_pos, s->v_edge_pos);
271 src = s->edge_emu_buffer;
273 if(thirdpel)
274 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
275 else
276 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
278 if (!(s->flags & CODEC_FLAG_GRAY)) {
279 mx = (mx + (mx < (int) x)) >> 1;
280 my = (my + (my < (int) y)) >> 1;
281 width = (width >> 1);
282 height = (height >> 1);
283 blocksize++;
285 for (i=1; i < 3; i++) {
286 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
287 src = pic->data[i] + mx + my*s->uvlinesize;
289 if (emu) {
290 ff_emulated_edge_mc (s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
291 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
292 src = s->edge_emu_buffer;
294 if(thirdpel)
295 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
296 else
297 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
302 static inline int svq3_mc_dir (H264Context *h, int size, int mode, int dir, int avg) {
304 int i, j, k, mx, my, dx, dy, x, y;
305 MpegEncContext *const s = (MpegEncContext *) h;
306 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
307 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
308 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
309 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
310 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
312 for (i=0; i < 16; i+=part_height) {
313 for (j=0; j < 16; j+=part_width) {
314 const int b_xy = (4*s->mb_x+(j>>2)) + (4*s->mb_y+(i>>2))*h->b_stride;
315 int dxy;
316 x = 16*s->mb_x + j;
317 y = 16*s->mb_y + i;
318 k = ((j>>2)&1) + ((i>>1)&2) + ((j>>1)&4) + (i&8);
320 if (mode != PREDICT_MODE) {
321 pred_motion (h, k, (part_width >> 2), dir, 1, &mx, &my);
322 } else {
323 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
324 my = s->next_picture.motion_val[0][b_xy][1]<<1;
326 if (dir == 0) {
327 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1)>>1;
328 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1)>>1;
329 } else {
330 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1)>>1;
331 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1)>>1;
335 /* clip motion vector prediction to frame border */
336 mx = av_clip (mx, extra_width - 6*x, h_edge_pos - 6*x);
337 my = av_clip (my, extra_width - 6*y, v_edge_pos - 6*y);
339 /* get (optional) motion vector differential */
340 if (mode == PREDICT_MODE) {
341 dx = dy = 0;
342 } else {
343 dy = svq3_get_se_golomb (&s->gb);
344 dx = svq3_get_se_golomb (&s->gb);
346 if (dx == INVALID_VLC || dy == INVALID_VLC) {
347 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
348 return -1;
352 /* compute motion vector */
353 if (mode == THIRDPEL_MODE) {
354 int fx, fy;
355 mx = ((mx + 1)>>1) + dx;
356 my = ((my + 1)>>1) + dy;
357 fx= ((unsigned)(mx + 0x3000))/3 - 0x1000;
358 fy= ((unsigned)(my + 0x3000))/3 - 0x1000;
359 dxy= (mx - 3*fx) + 4*(my - 3*fy);
361 svq3_mc_dir_part (s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
362 mx += mx;
363 my += my;
364 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
365 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
366 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
367 dxy= (mx&1) + 2*(my&1);
369 svq3_mc_dir_part (s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
370 mx *= 3;
371 my *= 3;
372 } else {
373 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
374 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
376 svq3_mc_dir_part (s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
377 mx *= 6;
378 my *= 6;
381 /* update mv_cache */
382 if (mode != PREDICT_MODE) {
383 int32_t mv = pack16to32(mx,my);
385 if (part_height == 8 && i < 8) {
386 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
388 if (part_width == 8 && j < 8) {
389 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
392 if (part_width == 8 && j < 8) {
393 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
395 if (part_width == 4 || part_height == 4) {
396 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
400 /* write back motion vectors */
401 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
405 return 0;
408 static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
409 int i, j, k, m, dir, mode;
410 int cbp = 0;
411 uint32_t vlc;
412 int8_t *top, *left;
413 MpegEncContext *const s = (MpegEncContext *) h;
414 const int mb_xy = h->mb_xy;
415 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
417 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
418 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
419 h->topright_samples_available = 0xFFFF;
421 if (mb_type == 0) { /* SKIP */
422 if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
423 svq3_mc_dir_part (s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
425 if (s->pict_type == FF_B_TYPE) {
426 svq3_mc_dir_part (s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
429 mb_type = MB_TYPE_SKIP;
430 } else {
431 mb_type= FFMIN(s->next_picture.mb_type[mb_xy], 6);
432 if(svq3_mc_dir (h, mb_type, PREDICT_MODE, 0, 0) < 0)
433 return -1;
434 if(svq3_mc_dir (h, mb_type, PREDICT_MODE, 1, 1) < 0)
435 return -1;
437 mb_type = MB_TYPE_16x16;
439 } else if (mb_type < 8) { /* INTER */
440 if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
441 mode = THIRDPEL_MODE;
442 } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
443 mode = HALFPEL_MODE;
444 } else {
445 mode = FULLPEL_MODE;
448 /* fill caches */
449 /* note ref_cache should contain here:
450 ????????
451 ???11111
452 N??11111
453 N??11111
454 N??11111
457 for (m=0; m < 2; m++) {
458 if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
459 for (i=0; i < 4; i++) {
460 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
462 } else {
463 for (i=0; i < 4; i++) {
464 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
467 if (s->mb_y > 0) {
468 memcpy (h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
469 memset (&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
471 if (s->mb_x < (s->mb_width - 1)) {
472 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
473 h->ref_cache[m][scan8[0] + 4 - 1*8] =
474 (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
475 h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1;
476 }else
477 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
478 if (s->mb_x > 0) {
479 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
480 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
481 }else
482 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
483 }else
484 memset (&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
486 if (s->pict_type != FF_B_TYPE)
487 break;
490 /* decode motion vector(s) and form prediction(s) */
491 if (s->pict_type == FF_P_TYPE) {
492 if(svq3_mc_dir (h, (mb_type - 1), mode, 0, 0) < 0)
493 return -1;
494 } else { /* FF_B_TYPE */
495 if (mb_type != 2) {
496 if(svq3_mc_dir (h, 0, mode, 0, 0) < 0)
497 return -1;
498 } else {
499 for (i=0; i < 4; i++) {
500 memset (s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
503 if (mb_type != 1) {
504 if(svq3_mc_dir (h, 0, mode, 1, (mb_type == 3)) < 0)
505 return -1;
506 } else {
507 for (i=0; i < 4; i++) {
508 memset (s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
513 mb_type = MB_TYPE_16x16;
514 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
515 memset (h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
517 if (mb_type == 8) {
518 if (s->mb_x > 0) {
519 for (i=0; i < 4; i++) {
520 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
522 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
523 h->left_samples_available = 0x5F5F;
526 if (s->mb_y > 0) {
527 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
528 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
529 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
530 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
532 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
533 h->top_samples_available = 0x33FF;
537 /* decode prediction codes for luma blocks */
538 for (i=0; i < 16; i+=2) {
539 vlc = svq3_get_ue_golomb (&s->gb);
541 if (vlc >= 25){
542 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
543 return -1;
546 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
547 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
549 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
550 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
552 if (left[1] == -1 || left[2] == -1){
553 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
554 return -1;
557 } else { /* mb_type == 33, DC_128_PRED block type */
558 for (i=0; i < 4; i++) {
559 memset (&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
563 write_back_intra_pred_mode (h);
565 if (mb_type == 8) {
566 check_intra4x4_pred_mode (h);
568 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
569 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
570 } else {
571 for (i=0; i < 4; i++) {
572 memset (&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
575 h->top_samples_available = 0x33FF;
576 h->left_samples_available = 0x5F5F;
579 mb_type = MB_TYPE_INTRA4x4;
580 } else { /* INTRA16x16 */
581 dir = i_mb_type_info[mb_type - 8].pred_mode;
582 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
584 if ((h->intra16x16_pred_mode = check_intra_pred_mode (h, dir)) == -1){
585 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
586 return -1;
589 cbp = i_mb_type_info[mb_type - 8].cbp;
590 mb_type = MB_TYPE_INTRA16x16;
593 if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
594 for (i=0; i < 4; i++) {
595 memset (s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
597 if (s->pict_type == FF_B_TYPE) {
598 for (i=0; i < 4; i++) {
599 memset (s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
603 if (!IS_INTRA4x4(mb_type)) {
604 memset (h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
606 if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
607 memset (h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
608 s->dsp.clear_blocks(h->mb);
611 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
612 if ((vlc = svq3_get_ue_golomb (&s->gb)) >= 48){
613 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
614 return -1;
617 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
619 if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
620 s->qscale += svq3_get_se_golomb (&s->gb);
622 if (s->qscale > 31){
623 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
624 return -1;
627 if (IS_INTRA16x16(mb_type)) {
628 if (svq3_decode_block (&s->gb, h->mb, 0, 0)){
629 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
630 return -1;
634 if (cbp) {
635 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
636 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
638 for (i=0; i < 4; i++) {
639 if ((cbp & (1 << i))) {
640 for (j=0; j < 4; j++) {
641 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
642 h->non_zero_count_cache[ scan8[k] ] = 1;
644 if (svq3_decode_block (&s->gb, &h->mb[16*k], index, type)){
645 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
646 return -1;
652 if ((cbp & 0x30)) {
653 for (i=0; i < 2; ++i) {
654 if (svq3_decode_block (&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
655 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
656 return -1;
660 if ((cbp & 0x20)) {
661 for (i=0; i < 8; i++) {
662 h->non_zero_count_cache[ scan8[16+i] ] = 1;
664 if (svq3_decode_block (&s->gb, &h->mb[16*(16 + i)], 1, 1)){
665 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
666 return -1;
673 s->current_picture.mb_type[mb_xy] = mb_type;
675 if (IS_INTRA(mb_type)) {
676 h->chroma_pred_mode = check_intra_pred_mode (h, DC_PRED8x8);
679 return 0;
682 static int svq3_decode_slice_header (H264Context *h) {
683 MpegEncContext *const s = (MpegEncContext *) h;
684 const int mb_xy = h->mb_xy;
685 int i, header;
687 header = get_bits (&s->gb, 8);
689 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
690 /* TODO: what? */
691 av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
692 return -1;
693 } else {
694 int length = (header >> 5) & 3;
696 h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits (&s->gb, 8*length) + 8*length;
698 if (h->next_slice_index > s->gb.size_in_bits){
699 av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
700 return -1;
703 s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
704 skip_bits(&s->gb, 8);
706 if (length > 0) {
707 memcpy ((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
708 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
712 if ((i = svq3_get_ue_golomb (&s->gb)) == INVALID_VLC || i >= 3){
713 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
714 return -1;
717 h->slice_type = golomb_to_pict_type[i];
719 if ((header & 0x9F) == 2) {
720 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
721 s->mb_skip_run = get_bits (&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
722 } else {
723 skip_bits1 (&s->gb);
724 s->mb_skip_run = 0;
727 h->slice_num = get_bits (&s->gb, 8);
728 s->qscale = get_bits (&s->gb, 5);
729 s->adaptive_quant = get_bits1 (&s->gb);
731 /* unknown fields */
732 skip_bits1 (&s->gb);
734 if (h->unknown_svq3_flag) {
735 skip_bits1 (&s->gb);
738 skip_bits1 (&s->gb);
739 skip_bits (&s->gb, 2);
741 while (get_bits1 (&s->gb)) {
742 skip_bits (&s->gb, 8);
745 /* reset intra predictors and invalidate motion vector references */
746 if (s->mb_x > 0) {
747 memset (h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
748 memset (h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
750 if (s->mb_y > 0) {
751 memset (h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
753 if (s->mb_x > 0) {
754 h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
758 return 0;
761 static int svq3_decode_frame (AVCodecContext *avctx,
762 void *data, int *data_size,
763 const uint8_t *buf, int buf_size) {
764 MpegEncContext *const s = avctx->priv_data;
765 H264Context *const h = avctx->priv_data;
766 int m, mb_type;
767 unsigned char *extradata;
768 unsigned int size;
770 s->flags = avctx->flags;
771 s->flags2 = avctx->flags2;
772 s->unrestricted_mv = 1;
774 if (!s->context_initialized) {
775 s->width = avctx->width;
776 s->height = avctx->height;
777 h->halfpel_flag = 1;
778 h->thirdpel_flag = 1;
779 h->unknown_svq3_flag = 0;
780 h->chroma_qp[0] = h->chroma_qp[1] = 4;
782 if (MPV_common_init (s) < 0)
783 return -1;
785 h->b_stride = 4*s->mb_width;
787 alloc_tables (h);
789 /* prowl for the "SEQH" marker in the extradata */
790 extradata = (unsigned char *)avctx->extradata;
791 for (m = 0; m < avctx->extradata_size; m++) {
792 if (!memcmp (extradata, "SEQH", 4))
793 break;
794 extradata++;
797 /* if a match was found, parse the extra data */
798 if (extradata && !memcmp (extradata, "SEQH", 4)) {
800 GetBitContext gb;
802 size = AV_RB32(&extradata[4]);
803 init_get_bits (&gb, extradata + 8, size*8);
805 /* 'frame size code' and optional 'width, height' */
806 if (get_bits (&gb, 3) == 7) {
807 skip_bits (&gb, 12);
808 skip_bits (&gb, 12);
811 h->halfpel_flag = get_bits1 (&gb);
812 h->thirdpel_flag = get_bits1 (&gb);
814 /* unknown fields */
815 skip_bits1 (&gb);
816 skip_bits1 (&gb);
817 skip_bits1 (&gb);
818 skip_bits1 (&gb);
820 s->low_delay = get_bits1 (&gb);
822 /* unknown field */
823 skip_bits1 (&gb);
825 while (get_bits1 (&gb)) {
826 skip_bits (&gb, 8);
829 h->unknown_svq3_flag = get_bits1 (&gb);
830 avctx->has_b_frames = !s->low_delay;
834 /* special case for last picture */
835 if (buf_size == 0) {
836 if (s->next_picture_ptr && !s->low_delay) {
837 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
838 s->next_picture_ptr= NULL;
839 *data_size = sizeof(AVFrame);
841 return 0;
844 init_get_bits (&s->gb, buf, 8*buf_size);
846 s->mb_x = s->mb_y = h->mb_xy = 0;
848 if (svq3_decode_slice_header (h))
849 return -1;
851 s->pict_type = h->slice_type;
852 s->picture_number = h->slice_num;
854 if(avctx->debug&FF_DEBUG_PICT_INFO){
855 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
856 av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
857 s->adaptive_quant, s->qscale, h->slice_num
861 /* for hurry_up==5 */
862 s->current_picture.pict_type = s->pict_type;
863 s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
865 /* Skip B-frames if we do not have reference frames. */
866 if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE) return 0;
867 /* Skip B-frames if we are in a hurry. */
868 if (avctx->hurry_up && s->pict_type == FF_B_TYPE) return 0;
869 /* Skip everything if we are in a hurry >= 5. */
870 if (avctx->hurry_up >= 5) return 0;
871 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
872 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
873 || avctx->skip_frame >= AVDISCARD_ALL)
874 return 0;
876 if (s->next_p_frame_damaged) {
877 if (s->pict_type == FF_B_TYPE)
878 return 0;
879 else
880 s->next_p_frame_damaged = 0;
883 if (frame_start (h) < 0)
884 return -1;
886 if (s->pict_type == FF_B_TYPE) {
887 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
889 if (h->frame_num_offset < 0) {
890 h->frame_num_offset += 256;
892 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
893 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
894 return -1;
896 } else {
897 h->prev_frame_num = h->frame_num;
898 h->frame_num = h->slice_num;
899 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
901 if (h->prev_frame_num_offset < 0) {
902 h->prev_frame_num_offset += 256;
906 for(m=0; m<2; m++){
907 int i;
908 for(i=0; i<4; i++){
909 int j;
910 for(j=-1; j<4; j++)
911 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
912 if(i<3)
913 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
917 for (s->mb_y=0; s->mb_y < s->mb_height; s->mb_y++) {
918 for (s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
919 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
921 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
922 ((get_bits_count(&s->gb) & 7) == 0 || show_bits (&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
924 skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
925 s->gb.size_in_bits = 8*buf_size;
927 if (svq3_decode_slice_header (h))
928 return -1;
930 /* TODO: support s->mb_skip_run */
933 mb_type = svq3_get_ue_golomb (&s->gb);
935 if (s->pict_type == FF_I_TYPE) {
936 mb_type += 8;
937 } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
938 mb_type += 4;
940 if (mb_type > 33 || svq3_decode_mb (h, mb_type)) {
941 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
942 return -1;
945 if (mb_type != 0) {
946 hl_decode_mb (h);
949 if (s->pict_type != FF_B_TYPE && !s->low_delay) {
950 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
951 (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
955 ff_draw_horiz_band(s, 16*s->mb_y, 16);
958 MPV_frame_end(s);
960 if (s->pict_type == FF_B_TYPE || s->low_delay) {
961 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
962 } else {
963 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
966 avctx->frame_number = s->picture_number - 1;
968 /* Do not output the last pic after seeking. */
969 if (s->last_picture_ptr || s->low_delay) {
970 *data_size = sizeof(AVFrame);
973 return buf_size;
977 AVCodec svq3_decoder = {
978 "svq3",
979 CODEC_TYPE_VIDEO,
980 CODEC_ID_SVQ3,
981 sizeof(H264Context),
982 decode_init,
983 NULL,
984 decode_end,
985 svq3_decode_frame,
986 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
987 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3"),