Merge branch 'mirror' into vdpau
[FFMpeg-mirror/ffmpeg-vdpau.git] / libavcodec / svq3.c
blob499d11d6bff9757a48a14558bba827a7267a1a4c
1 /*
2 * Copyright (c) 2003 The FFmpeg Project.
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * ftp://ftp.mplayerhq.hu/MPlayer/samples/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #ifdef CONFIG_ZLIB
44 #include <zlib.h>
45 #endif
47 #include "svq1.h"
49 /**
50 * @file svq3.c
51 * svq3 decoder.
54 #define FULLPEL_MODE 1
55 #define HALFPEL_MODE 2
56 #define THIRDPEL_MODE 3
57 #define PREDICT_MODE 4
59 /* dual scan (from some older h264 draft)
60 o-->o-->o o
61 | /|
62 o o o / o
63 | / | |/ |
64 o o o o
66 o-->o-->o-->o
68 static const uint8_t svq3_scan[16] = {
69 0+0*4, 1+0*4, 2+0*4, 2+1*4,
70 2+2*4, 3+0*4, 3+1*4, 3+2*4,
71 0+1*4, 0+2*4, 1+1*4, 1+2*4,
72 0+3*4, 1+3*4, 2+3*4, 3+3*4,
75 static const uint8_t svq3_pred_0[25][2] = {
76 { 0, 0 },
77 { 1, 0 }, { 0, 1 },
78 { 0, 2 }, { 1, 1 }, { 2, 0 },
79 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
80 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
81 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
82 { 2, 4 }, { 3, 3 }, { 4, 2 },
83 { 4, 3 }, { 3, 4 },
84 { 4, 4 }
87 static const int8_t svq3_pred_1[6][6][5] = {
88 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
89 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
90 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
91 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
92 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
93 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
94 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
95 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
96 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
97 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
98 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
99 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
102 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
103 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
104 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
105 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
106 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
109 static const uint32_t svq3_dequant_coeff[32] = {
110 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
111 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
112 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
113 61694, 68745, 77615, 89113,100253,109366,126635,141533
117 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp)
119 const int qmul = svq3_dequant_coeff[qp];
120 #define stride 16
121 int i;
122 int temp[16];
123 static const int x_offset[4] = {0, 1*stride, 4* stride, 5*stride};
124 static const int y_offset[4] = {0, 2*stride, 8* stride, 10*stride};
126 for (i = 0; i < 4; i++){
127 const int offset = y_offset[i];
128 const int z0 = 13*(block[offset+stride*0] + block[offset+stride*4]);
129 const int z1 = 13*(block[offset+stride*0] - block[offset+stride*4]);
130 const int z2 = 7* block[offset+stride*1] - 17*block[offset+stride*5];
131 const int z3 = 17* block[offset+stride*1] + 7*block[offset+stride*5];
133 temp[4*i+0] = z0+z3;
134 temp[4*i+1] = z1+z2;
135 temp[4*i+2] = z1-z2;
136 temp[4*i+3] = z0-z3;
139 for (i = 0; i < 4; i++){
140 const int offset = x_offset[i];
141 const int z0 = 13*(temp[4*0+i] + temp[4*2+i]);
142 const int z1 = 13*(temp[4*0+i] - temp[4*2+i]);
143 const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
144 const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
146 block[stride*0 +offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
147 block[stride*2 +offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
148 block[stride*8 +offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
149 block[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
152 #undef stride
154 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
155 int dc)
157 const int qmul = svq3_dequant_coeff[qp];
158 int i;
159 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
161 if (dc) {
162 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
163 block[0] = 0;
166 for (i = 0; i < 4; i++) {
167 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
168 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
169 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
170 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
172 block[0 + 4*i] = z0 + z3;
173 block[1 + 4*i] = z1 + z2;
174 block[2 + 4*i] = z1 - z2;
175 block[3 + 4*i] = z0 - z3;
178 for (i = 0; i < 4; i++) {
179 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
180 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
181 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
182 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
183 const int rr = (dc + 0x80000);
185 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
186 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
187 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
188 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
192 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
193 int index, const int type)
195 static const uint8_t *const scan_patterns[4] =
196 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
198 int run, level, sign, vlc, limit;
199 const int intra = (3 * type) >> 2;
200 const uint8_t *const scan = scan_patterns[type];
202 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
203 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
205 if (vlc == INVALID_VLC)
206 return -1;
208 sign = (vlc & 0x1) - 1;
209 vlc = (vlc + 1) >> 1;
211 if (type == 3) {
212 if (vlc < 3) {
213 run = 0;
214 level = vlc;
215 } else if (vlc < 4) {
216 run = 1;
217 level = 1;
218 } else {
219 run = (vlc & 0x3);
220 level = ((vlc + 9) >> 2) - run;
222 } else {
223 if (vlc < 16) {
224 run = svq3_dct_tables[intra][vlc].run;
225 level = svq3_dct_tables[intra][vlc].level;
226 } else if (intra) {
227 run = (vlc & 0x7);
228 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
229 } else {
230 run = (vlc & 0xF);
231 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
235 if ((index += run) >= limit)
236 return -1;
238 block[scan[index]] = (level ^ sign) - sign;
241 if (type != 2) {
242 break;
246 return 0;
249 static inline void svq3_mc_dir_part(MpegEncContext *s,
250 int x, int y, int width, int height,
251 int mx, int my, int dxy,
252 int thirdpel, int dir, int avg)
254 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
255 uint8_t *src, *dest;
256 int i, emu = 0;
257 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
259 mx += x;
260 my += y;
262 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
263 my < 0 || my >= (s->v_edge_pos - height - 1)) {
265 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
266 emu = 1;
269 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
270 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
273 /* form component predictions */
274 dest = s->current_picture.data[0] + x + y*s->linesize;
275 src = pic->data[0] + mx + my*s->linesize;
277 if (emu) {
278 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
279 mx, my, s->h_edge_pos, s->v_edge_pos);
280 src = s->edge_emu_buffer;
282 if (thirdpel)
283 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
284 else
285 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
287 if (!(s->flags & CODEC_FLAG_GRAY)) {
288 mx = (mx + (mx < (int) x)) >> 1;
289 my = (my + (my < (int) y)) >> 1;
290 width = (width >> 1);
291 height = (height >> 1);
292 blocksize++;
294 for (i = 1; i < 3; i++) {
295 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
296 src = pic->data[i] + mx + my*s->uvlinesize;
298 if (emu) {
299 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
300 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
301 src = s->edge_emu_buffer;
303 if (thirdpel)
304 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
305 else
306 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
311 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
312 int avg)
314 int i, j, k, mx, my, dx, dy, x, y;
315 MpegEncContext *const s = (MpegEncContext *) h;
316 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
317 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
318 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
319 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
320 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
322 for (i = 0; i < 16; i += part_height) {
323 for (j = 0; j < 16; j += part_width) {
324 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
325 int dxy;
326 x = 16*s->mb_x + j;
327 y = 16*s->mb_y + i;
328 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
330 if (mode != PREDICT_MODE) {
331 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
332 } else {
333 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
334 my = s->next_picture.motion_val[0][b_xy][1]<<1;
336 if (dir == 0) {
337 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
338 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
339 } else {
340 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
341 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
345 /* clip motion vector prediction to frame border */
346 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
347 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
349 /* get (optional) motion vector differential */
350 if (mode == PREDICT_MODE) {
351 dx = dy = 0;
352 } else {
353 dy = svq3_get_se_golomb(&s->gb);
354 dx = svq3_get_se_golomb(&s->gb);
356 if (dx == INVALID_VLC || dy == INVALID_VLC) {
357 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
358 return -1;
362 /* compute motion vector */
363 if (mode == THIRDPEL_MODE) {
364 int fx, fy;
365 mx = ((mx + 1)>>1) + dx;
366 my = ((my + 1)>>1) + dy;
367 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
368 fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
369 dxy = (mx - 3*fx) + 4*(my - 3*fy);
371 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
372 mx += mx;
373 my += my;
374 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
375 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
376 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
377 dxy = (mx&1) + 2*(my&1);
379 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
380 mx *= 3;
381 my *= 3;
382 } else {
383 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
384 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
386 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
387 mx *= 6;
388 my *= 6;
391 /* update mv_cache */
392 if (mode != PREDICT_MODE) {
393 int32_t mv = pack16to32(mx,my);
395 if (part_height == 8 && i < 8) {
396 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
398 if (part_width == 8 && j < 8) {
399 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
402 if (part_width == 8 && j < 8) {
403 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
405 if (part_width == 4 || part_height == 4) {
406 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
410 /* write back motion vectors */
411 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
415 return 0;
418 static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
420 int i, j, k, m, dir, mode;
421 int cbp = 0;
422 uint32_t vlc;
423 int8_t *top, *left;
424 MpegEncContext *const s = (MpegEncContext *) h;
425 const int mb_xy = h->mb_xy;
426 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
428 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
429 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
430 h->topright_samples_available = 0xFFFF;
432 if (mb_type == 0) { /* SKIP */
433 if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
434 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
436 if (s->pict_type == FF_B_TYPE) {
437 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
440 mb_type = MB_TYPE_SKIP;
441 } else {
442 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
443 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
444 return -1;
445 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
446 return -1;
448 mb_type = MB_TYPE_16x16;
450 } else if (mb_type < 8) { /* INTER */
451 if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
452 mode = THIRDPEL_MODE;
453 } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
454 mode = HALFPEL_MODE;
455 } else {
456 mode = FULLPEL_MODE;
459 /* fill caches */
460 /* note ref_cache should contain here:
461 ????????
462 ???11111
463 N??11111
464 N??11111
465 N??11111
468 for (m = 0; m < 2; m++) {
469 if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
470 for (i = 0; i < 4; i++) {
471 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
473 } else {
474 for (i = 0; i < 4; i++) {
475 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
478 if (s->mb_y > 0) {
479 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
480 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
482 if (s->mb_x < (s->mb_width - 1)) {
483 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
484 h->ref_cache[m][scan8[0] + 4 - 1*8] =
485 (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
486 h->intra4x4_pred_mode[mb_xy - s->mb_stride ][4] == -1) ? PART_NOT_AVAILABLE : 1;
487 }else
488 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
489 if (s->mb_x > 0) {
490 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
491 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
492 }else
493 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
494 }else
495 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
497 if (s->pict_type != FF_B_TYPE)
498 break;
501 /* decode motion vector(s) and form prediction(s) */
502 if (s->pict_type == FF_P_TYPE) {
503 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
504 return -1;
505 } else { /* FF_B_TYPE */
506 if (mb_type != 2) {
507 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
508 return -1;
509 } else {
510 for (i = 0; i < 4; i++) {
511 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
514 if (mb_type != 1) {
515 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
516 return -1;
517 } else {
518 for (i = 0; i < 4; i++) {
519 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
524 mb_type = MB_TYPE_16x16;
525 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
526 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
528 if (mb_type == 8) {
529 if (s->mb_x > 0) {
530 for (i = 0; i < 4; i++) {
531 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
533 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
534 h->left_samples_available = 0x5F5F;
537 if (s->mb_y > 0) {
538 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
539 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
540 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
541 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
543 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
544 h->top_samples_available = 0x33FF;
548 /* decode prediction codes for luma blocks */
549 for (i = 0; i < 16; i+=2) {
550 vlc = svq3_get_ue_golomb(&s->gb);
552 if (vlc >= 25){
553 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
554 return -1;
557 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
558 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
560 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
561 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
563 if (left[1] == -1 || left[2] == -1){
564 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
565 return -1;
568 } else { /* mb_type == 33, DC_128_PRED block type */
569 for (i = 0; i < 4; i++) {
570 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
574 write_back_intra_pred_mode(h);
576 if (mb_type == 8) {
577 check_intra4x4_pred_mode(h);
579 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
580 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
581 } else {
582 for (i = 0; i < 4; i++) {
583 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
586 h->top_samples_available = 0x33FF;
587 h->left_samples_available = 0x5F5F;
590 mb_type = MB_TYPE_INTRA4x4;
591 } else { /* INTRA16x16 */
592 dir = i_mb_type_info[mb_type - 8].pred_mode;
593 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
595 if ((h->intra16x16_pred_mode = check_intra_pred_mode(h, dir)) == -1){
596 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
597 return -1;
600 cbp = i_mb_type_info[mb_type - 8].cbp;
601 mb_type = MB_TYPE_INTRA16x16;
604 if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
605 for (i = 0; i < 4; i++) {
606 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
608 if (s->pict_type == FF_B_TYPE) {
609 for (i = 0; i < 4; i++) {
610 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
614 if (!IS_INTRA4x4(mb_type)) {
615 memset(h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
617 if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
618 memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
619 s->dsp.clear_blocks(h->mb);
622 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
623 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
624 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
625 return -1;
628 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
630 if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
631 s->qscale += svq3_get_se_golomb(&s->gb);
633 if (s->qscale > 31){
634 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
635 return -1;
638 if (IS_INTRA16x16(mb_type)) {
639 if (svq3_decode_block(&s->gb, h->mb, 0, 0)){
640 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
641 return -1;
645 if (cbp) {
646 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
647 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
649 for (i = 0; i < 4; i++) {
650 if ((cbp & (1 << i))) {
651 for (j = 0; j < 4; j++) {
652 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
653 h->non_zero_count_cache[ scan8[k] ] = 1;
655 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
656 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
657 return -1;
663 if ((cbp & 0x30)) {
664 for (i = 0; i < 2; ++i) {
665 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
666 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
667 return -1;
671 if ((cbp & 0x20)) {
672 for (i = 0; i < 8; i++) {
673 h->non_zero_count_cache[ scan8[16+i] ] = 1;
675 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
676 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
677 return -1;
684 s->current_picture.mb_type[mb_xy] = mb_type;
686 if (IS_INTRA(mb_type)) {
687 h->chroma_pred_mode = check_intra_pred_mode(h, DC_PRED8x8);
690 return 0;
693 static int svq3_decode_slice_header(H264Context *h)
695 MpegEncContext *const s = (MpegEncContext *) h;
696 const int mb_xy = h->mb_xy;
697 int i, header;
699 header = get_bits(&s->gb, 8);
701 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
702 /* TODO: what? */
703 av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
704 return -1;
705 } else {
706 int length = (header >> 5) & 3;
708 h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
710 if (h->next_slice_index > s->gb.size_in_bits) {
711 av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
712 return -1;
715 s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
716 skip_bits(&s->gb, 8);
718 if (h->svq3_watermark_key) {
719 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
720 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
722 if (length > 0) {
723 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
724 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
728 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
729 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
730 return -1;
733 h->slice_type = golomb_to_pict_type[i];
735 if ((header & 0x9F) == 2) {
736 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
737 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
738 } else {
739 skip_bits1(&s->gb);
740 s->mb_skip_run = 0;
743 h->slice_num = get_bits(&s->gb, 8);
744 s->qscale = get_bits(&s->gb, 5);
745 s->adaptive_quant = get_bits1(&s->gb);
747 /* unknown fields */
748 skip_bits1(&s->gb);
750 if (h->unknown_svq3_flag) {
751 skip_bits1(&s->gb);
754 skip_bits1(&s->gb);
755 skip_bits(&s->gb, 2);
757 while (get_bits1(&s->gb)) {
758 skip_bits(&s->gb, 8);
761 /* reset intra predictors and invalidate motion vector references */
762 if (s->mb_x > 0) {
763 memset(h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
764 memset(h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
766 if (s->mb_y > 0) {
767 memset(h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
769 if (s->mb_x > 0) {
770 h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
774 return 0;
777 static int svq3_decode_init(AVCodecContext *avctx)
779 MpegEncContext *const s = avctx->priv_data;
780 H264Context *const h = avctx->priv_data;
781 int m;
782 unsigned char *extradata;
783 unsigned int size;
785 if (decode_init(avctx) < 0)
786 return -1;
788 s->flags = avctx->flags;
789 s->flags2 = avctx->flags2;
790 s->unrestricted_mv = 1;
792 if (!s->context_initialized) {
793 s->width = avctx->width;
794 s->height = avctx->height;
795 h->halfpel_flag = 1;
796 h->thirdpel_flag = 1;
797 h->unknown_svq3_flag = 0;
798 h->chroma_qp[0] = h->chroma_qp[1] = 4;
800 if (MPV_common_init(s) < 0)
801 return -1;
803 h->b_stride = 4*s->mb_width;
805 alloc_tables(h);
807 /* prowl for the "SEQH" marker in the extradata */
808 extradata = (unsigned char *)avctx->extradata;
809 for (m = 0; m < avctx->extradata_size; m++) {
810 if (!memcmp(extradata, "SEQH", 4))
811 break;
812 extradata++;
815 /* if a match was found, parse the extra data */
816 if (extradata && !memcmp(extradata, "SEQH", 4)) {
818 GetBitContext gb;
820 size = AV_RB32(&extradata[4]);
821 init_get_bits(&gb, extradata + 8, size*8);
823 /* 'frame size code' and optional 'width, height' */
824 if (get_bits(&gb, 3) == 7) {
825 skip_bits(&gb, 12);
826 skip_bits(&gb, 12);
829 h->halfpel_flag = get_bits1(&gb);
830 h->thirdpel_flag = get_bits1(&gb);
832 /* unknown fields */
833 skip_bits1(&gb);
834 skip_bits1(&gb);
835 skip_bits1(&gb);
836 skip_bits1(&gb);
838 s->low_delay = get_bits1(&gb);
840 /* unknown field */
841 skip_bits1(&gb);
843 while (get_bits1(&gb)) {
844 skip_bits(&gb, 8);
847 h->unknown_svq3_flag = get_bits1(&gb);
848 avctx->has_b_frames = !s->low_delay;
849 if (h->unknown_svq3_flag) {
850 #ifdef CONFIG_ZLIB
851 unsigned watermark_width = svq3_get_ue_golomb(&gb);
852 unsigned watermark_height = svq3_get_ue_golomb(&gb);
853 int u1 = svq3_get_ue_golomb(&gb);
854 int u2 = get_bits(&gb, 8);
855 int u3 = get_bits(&gb, 2);
856 int u4 = svq3_get_ue_golomb(&gb);
857 unsigned buf_len = watermark_width*watermark_height*4;
858 int offset = (get_bits_count(&gb)+7)>>3;
859 uint8_t *buf;
861 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
862 return -1;
864 buf = av_malloc(buf_len);
865 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
866 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
867 if (uncompress(buf, (uLong*)&buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
868 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
869 av_free(buf);
870 return -1;
872 h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
873 h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
874 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
875 av_free(buf);
876 #else
877 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
878 return -1;
879 #endif
884 return 0;
887 static int svq3_decode_frame(AVCodecContext *avctx,
888 void *data, int *data_size,
889 const uint8_t *buf, int buf_size)
891 MpegEncContext *const s = avctx->priv_data;
892 H264Context *const h = avctx->priv_data;
893 int m, mb_type;
895 /* special case for last picture */
896 if (buf_size == 0) {
897 if (s->next_picture_ptr && !s->low_delay) {
898 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
899 s->next_picture_ptr = NULL;
900 *data_size = sizeof(AVFrame);
902 return 0;
905 init_get_bits (&s->gb, buf, 8*buf_size);
907 s->mb_x = s->mb_y = h->mb_xy = 0;
909 if (svq3_decode_slice_header(h))
910 return -1;
912 s->pict_type = h->slice_type;
913 s->picture_number = h->slice_num;
915 if (avctx->debug&FF_DEBUG_PICT_INFO){
916 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
917 av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
918 s->adaptive_quant, s->qscale, h->slice_num);
921 /* for hurry_up == 5 */
922 s->current_picture.pict_type = s->pict_type;
923 s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
925 /* Skip B-frames if we do not have reference frames. */
926 if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
927 return 0;
928 /* Skip B-frames if we are in a hurry. */
929 if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
930 return 0;
931 /* Skip everything if we are in a hurry >= 5. */
932 if (avctx->hurry_up >= 5)
933 return 0;
934 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
935 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
936 || avctx->skip_frame >= AVDISCARD_ALL)
937 return 0;
939 if (s->next_p_frame_damaged) {
940 if (s->pict_type == FF_B_TYPE)
941 return 0;
942 else
943 s->next_p_frame_damaged = 0;
946 if (frame_start(h) < 0)
947 return -1;
949 if (s->pict_type == FF_B_TYPE) {
950 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
952 if (h->frame_num_offset < 0) {
953 h->frame_num_offset += 256;
955 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
956 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
957 return -1;
959 } else {
960 h->prev_frame_num = h->frame_num;
961 h->frame_num = h->slice_num;
962 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
964 if (h->prev_frame_num_offset < 0) {
965 h->prev_frame_num_offset += 256;
969 for (m = 0; m < 2; m++){
970 int i;
971 for (i = 0; i < 4; i++){
972 int j;
973 for (j = -1; j < 4; j++)
974 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
975 if (i < 3)
976 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
980 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
981 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
982 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
984 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
985 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
987 skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
988 s->gb.size_in_bits = 8*buf_size;
990 if (svq3_decode_slice_header(h))
991 return -1;
993 /* TODO: support s->mb_skip_run */
996 mb_type = svq3_get_ue_golomb(&s->gb);
998 if (s->pict_type == FF_I_TYPE) {
999 mb_type += 8;
1000 } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
1001 mb_type += 4;
1003 if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
1004 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1005 return -1;
1008 if (mb_type != 0) {
1009 hl_decode_mb (h);
1012 if (s->pict_type != FF_B_TYPE && !s->low_delay) {
1013 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
1014 (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
1018 ff_draw_horiz_band(s, 16*s->mb_y, 16);
1021 MPV_frame_end(s);
1023 if (s->pict_type == FF_B_TYPE || s->low_delay) {
1024 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1025 } else {
1026 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1029 avctx->frame_number = s->picture_number - 1;
1031 /* Do not output the last pic after seeking. */
1032 if (s->last_picture_ptr || s->low_delay) {
1033 *data_size = sizeof(AVFrame);
1036 return buf_size;
1040 AVCodec svq3_decoder = {
1041 "svq3",
1042 CODEC_TYPE_VIDEO,
1043 CODEC_ID_SVQ3,
1044 sizeof(H264Context),
1045 svq3_decode_init,
1046 NULL,
1047 decode_end,
1048 svq3_decode_frame,
1049 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1050 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3"),