Add Speex support to the Ogg muxer.
[FFMpeg-mirror/lagarith.git] / libavcodec / svq3.c
blob9598a14db41dd63944b5af7fb0fbc279d3a5d282
1 /*
2 * Copyright (c) 2003 The FFmpeg Project
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
43 #if CONFIG_ZLIB
44 #include <zlib.h>
45 #endif
47 #include "svq1.h"
49 /**
50 * @file libavcodec/svq3.c
51 * svq3 decoder.
54 #define FULLPEL_MODE 1
55 #define HALFPEL_MODE 2
56 #define THIRDPEL_MODE 3
57 #define PREDICT_MODE 4
59 /* dual scan (from some older h264 draft)
60 o-->o-->o o
61 | /|
62 o o o / o
63 | / | |/ |
64 o o o o
66 o-->o-->o-->o
68 static const uint8_t svq3_scan[16] = {
69 0+0*4, 1+0*4, 2+0*4, 2+1*4,
70 2+2*4, 3+0*4, 3+1*4, 3+2*4,
71 0+1*4, 0+2*4, 1+1*4, 1+2*4,
72 0+3*4, 1+3*4, 2+3*4, 3+3*4,
75 static const uint8_t svq3_pred_0[25][2] = {
76 { 0, 0 },
77 { 1, 0 }, { 0, 1 },
78 { 0, 2 }, { 1, 1 }, { 2, 0 },
79 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
80 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
81 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
82 { 2, 4 }, { 3, 3 }, { 4, 2 },
83 { 4, 3 }, { 3, 4 },
84 { 4, 4 }
87 static const int8_t svq3_pred_1[6][6][5] = {
88 { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
89 { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
90 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
91 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
92 { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
93 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
94 { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
95 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
96 { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
97 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
98 { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
99 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
102 static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
103 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
104 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
105 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
106 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
109 static const uint32_t svq3_dequant_coeff[32] = {
110 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
111 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
112 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
113 61694, 68745, 77615, 89113,100253,109366,126635,141533
117 static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp)
119 const int qmul = svq3_dequant_coeff[qp];
120 #define stride 16
121 int i;
122 int temp[16];
123 static const int x_offset[4] = {0, 1*stride, 4* stride, 5*stride};
124 static const int y_offset[4] = {0, 2*stride, 8* stride, 10*stride};
126 for (i = 0; i < 4; i++){
127 const int offset = y_offset[i];
128 const int z0 = 13*(block[offset+stride*0] + block[offset+stride*4]);
129 const int z1 = 13*(block[offset+stride*0] - block[offset+stride*4]);
130 const int z2 = 7* block[offset+stride*1] - 17*block[offset+stride*5];
131 const int z3 = 17* block[offset+stride*1] + 7*block[offset+stride*5];
133 temp[4*i+0] = z0+z3;
134 temp[4*i+1] = z1+z2;
135 temp[4*i+2] = z1-z2;
136 temp[4*i+3] = z0-z3;
139 for (i = 0; i < 4; i++){
140 const int offset = x_offset[i];
141 const int z0 = 13*(temp[4*0+i] + temp[4*2+i]);
142 const int z1 = 13*(temp[4*0+i] - temp[4*2+i]);
143 const int z2 = 7* temp[4*1+i] - 17*temp[4*3+i];
144 const int z3 = 17* temp[4*1+i] + 7*temp[4*3+i];
146 block[stride*0 +offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
147 block[stride*2 +offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
148 block[stride*8 +offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
149 block[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
152 #undef stride
154 static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
155 int dc)
157 const int qmul = svq3_dequant_coeff[qp];
158 int i;
159 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
161 if (dc) {
162 dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
163 block[0] = 0;
166 for (i = 0; i < 4; i++) {
167 const int z0 = 13*(block[0 + 4*i] + block[2 + 4*i]);
168 const int z1 = 13*(block[0 + 4*i] - block[2 + 4*i]);
169 const int z2 = 7* block[1 + 4*i] - 17*block[3 + 4*i];
170 const int z3 = 17* block[1 + 4*i] + 7*block[3 + 4*i];
172 block[0 + 4*i] = z0 + z3;
173 block[1 + 4*i] = z1 + z2;
174 block[2 + 4*i] = z1 - z2;
175 block[3 + 4*i] = z0 - z3;
178 for (i = 0; i < 4; i++) {
179 const int z0 = 13*(block[i + 4*0] + block[i + 4*2]);
180 const int z1 = 13*(block[i + 4*0] - block[i + 4*2]);
181 const int z2 = 7* block[i + 4*1] - 17*block[i + 4*3];
182 const int z3 = 17* block[i + 4*1] + 7*block[i + 4*3];
183 const int rr = (dc + 0x80000);
185 dst[i + stride*0] = cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
186 dst[i + stride*1] = cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
187 dst[i + stride*2] = cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
188 dst[i + stride*3] = cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
192 static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
193 int index, const int type)
195 static const uint8_t *const scan_patterns[4] =
196 { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
198 int run, level, sign, vlc, limit;
199 const int intra = (3 * type) >> 2;
200 const uint8_t *const scan = scan_patterns[type];
202 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
203 for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
205 if (vlc == INVALID_VLC)
206 return -1;
208 sign = (vlc & 0x1) - 1;
209 vlc = (vlc + 1) >> 1;
211 if (type == 3) {
212 if (vlc < 3) {
213 run = 0;
214 level = vlc;
215 } else if (vlc < 4) {
216 run = 1;
217 level = 1;
218 } else {
219 run = (vlc & 0x3);
220 level = ((vlc + 9) >> 2) - run;
222 } else {
223 if (vlc < 16) {
224 run = svq3_dct_tables[intra][vlc].run;
225 level = svq3_dct_tables[intra][vlc].level;
226 } else if (intra) {
227 run = (vlc & 0x7);
228 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
229 } else {
230 run = (vlc & 0xF);
231 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
235 if ((index += run) >= limit)
236 return -1;
238 block[scan[index]] = (level ^ sign) - sign;
241 if (type != 2) {
242 break;
246 return 0;
249 static inline void svq3_mc_dir_part(MpegEncContext *s,
250 int x, int y, int width, int height,
251 int mx, int my, int dxy,
252 int thirdpel, int dir, int avg)
254 const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
255 uint8_t *src, *dest;
256 int i, emu = 0;
257 int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
259 mx += x;
260 my += y;
262 if (mx < 0 || mx >= (s->h_edge_pos - width - 1) ||
263 my < 0 || my >= (s->v_edge_pos - height - 1)) {
265 if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
266 emu = 1;
269 mx = av_clip (mx, -16, (s->h_edge_pos - width + 15));
270 my = av_clip (my, -16, (s->v_edge_pos - height + 15));
273 /* form component predictions */
274 dest = s->current_picture.data[0] + x + y*s->linesize;
275 src = pic->data[0] + mx + my*s->linesize;
277 if (emu) {
278 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
279 mx, my, s->h_edge_pos, s->v_edge_pos);
280 src = s->edge_emu_buffer;
282 if (thirdpel)
283 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
284 else
285 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
287 if (!(s->flags & CODEC_FLAG_GRAY)) {
288 mx = (mx + (mx < (int) x)) >> 1;
289 my = (my + (my < (int) y)) >> 1;
290 width = (width >> 1);
291 height = (height >> 1);
292 blocksize++;
294 for (i = 1; i < 3; i++) {
295 dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
296 src = pic->data[i] + mx + my*s->uvlinesize;
298 if (emu) {
299 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
300 mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
301 src = s->edge_emu_buffer;
303 if (thirdpel)
304 (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
305 else
306 (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
311 static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
312 int avg)
314 int i, j, k, mx, my, dx, dy, x, y;
315 MpegEncContext *const s = (MpegEncContext *) h;
316 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
317 const int part_height = 16 >> ((unsigned) (size + 1) / 3);
318 const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
319 const int h_edge_pos = 6*(s->h_edge_pos - part_width ) - extra_width;
320 const int v_edge_pos = 6*(s->v_edge_pos - part_height) - extra_width;
322 for (i = 0; i < 16; i += part_height) {
323 for (j = 0; j < 16; j += part_width) {
324 const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
325 int dxy;
326 x = 16*s->mb_x + j;
327 y = 16*s->mb_y + i;
328 k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
330 if (mode != PREDICT_MODE) {
331 pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
332 } else {
333 mx = s->next_picture.motion_val[0][b_xy][0]<<1;
334 my = s->next_picture.motion_val[0][b_xy][1]<<1;
336 if (dir == 0) {
337 mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
338 my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
339 } else {
340 mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
341 my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
345 /* clip motion vector prediction to frame border */
346 mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
347 my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
349 /* get (optional) motion vector differential */
350 if (mode == PREDICT_MODE) {
351 dx = dy = 0;
352 } else {
353 dy = svq3_get_se_golomb(&s->gb);
354 dx = svq3_get_se_golomb(&s->gb);
356 if (dx == INVALID_VLC || dy == INVALID_VLC) {
357 av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
358 return -1;
362 /* compute motion vector */
363 if (mode == THIRDPEL_MODE) {
364 int fx, fy;
365 mx = ((mx + 1)>>1) + dx;
366 my = ((my + 1)>>1) + dy;
367 fx = ((unsigned)(mx + 0x3000))/3 - 0x1000;
368 fy = ((unsigned)(my + 0x3000))/3 - 0x1000;
369 dxy = (mx - 3*fx) + 4*(my - 3*fy);
371 svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
372 mx += mx;
373 my += my;
374 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
375 mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
376 my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
377 dxy = (mx&1) + 2*(my&1);
379 svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
380 mx *= 3;
381 my *= 3;
382 } else {
383 mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
384 my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
386 svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
387 mx *= 6;
388 my *= 6;
391 /* update mv_cache */
392 if (mode != PREDICT_MODE) {
393 int32_t mv = pack16to32(mx,my);
395 if (part_height == 8 && i < 8) {
396 *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
398 if (part_width == 8 && j < 8) {
399 *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
402 if (part_width == 8 && j < 8) {
403 *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
405 if (part_width == 4 || part_height == 4) {
406 *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
410 /* write back motion vectors */
411 fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
415 return 0;
418 static int svq3_decode_mb(H264Context *h, unsigned int mb_type)
420 int i, j, k, m, dir, mode;
421 int cbp = 0;
422 uint32_t vlc;
423 int8_t *top, *left;
424 MpegEncContext *const s = (MpegEncContext *) h;
425 const int mb_xy = h->mb_xy;
426 const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
428 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
429 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
430 h->topright_samples_available = 0xFFFF;
432 if (mb_type == 0) { /* SKIP */
433 if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
434 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
436 if (s->pict_type == FF_B_TYPE) {
437 svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
440 mb_type = MB_TYPE_SKIP;
441 } else {
442 mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6);
443 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
444 return -1;
445 if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
446 return -1;
448 mb_type = MB_TYPE_16x16;
450 } else if (mb_type < 8) { /* INTER */
451 if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) {
452 mode = THIRDPEL_MODE;
453 } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) {
454 mode = HALFPEL_MODE;
455 } else {
456 mode = FULLPEL_MODE;
459 /* fill caches */
460 /* note ref_cache should contain here:
461 ????????
462 ???11111
463 N??11111
464 N??11111
465 N??11111
468 for (m = 0; m < 2; m++) {
469 if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
470 for (i = 0; i < 4; i++) {
471 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
473 } else {
474 for (i = 0; i < 4; i++) {
475 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
478 if (s->mb_y > 0) {
479 memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
480 memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
482 if (s->mb_x < (s->mb_width - 1)) {
483 *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
484 h->ref_cache[m][scan8[0] + 4 - 1*8] =
485 (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
486 h->intra4x4_pred_mode[mb_xy - s->mb_stride ][4] == -1) ? PART_NOT_AVAILABLE : 1;
487 }else
488 h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
489 if (s->mb_x > 0) {
490 *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
491 h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
492 }else
493 h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
494 }else
495 memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
497 if (s->pict_type != FF_B_TYPE)
498 break;
501 /* decode motion vector(s) and form prediction(s) */
502 if (s->pict_type == FF_P_TYPE) {
503 if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
504 return -1;
505 } else { /* FF_B_TYPE */
506 if (mb_type != 2) {
507 if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
508 return -1;
509 } else {
510 for (i = 0; i < 4; i++) {
511 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
514 if (mb_type != 1) {
515 if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
516 return -1;
517 } else {
518 for (i = 0; i < 4; i++) {
519 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
524 mb_type = MB_TYPE_16x16;
525 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
526 memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
528 if (mb_type == 8) {
529 if (s->mb_x > 0) {
530 for (i = 0; i < 4; i++) {
531 h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
533 if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
534 h->left_samples_available = 0x5F5F;
537 if (s->mb_y > 0) {
538 h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
539 h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
540 h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
541 h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
543 if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
544 h->top_samples_available = 0x33FF;
548 /* decode prediction codes for luma blocks */
549 for (i = 0; i < 16; i+=2) {
550 vlc = svq3_get_ue_golomb(&s->gb);
552 if (vlc >= 25){
553 av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
554 return -1;
557 left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
558 top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
560 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
561 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
563 if (left[1] == -1 || left[2] == -1){
564 av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
565 return -1;
568 } else { /* mb_type == 33, DC_128_PRED block type */
569 for (i = 0; i < 4; i++) {
570 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
574 write_back_intra_pred_mode(h);
576 if (mb_type == 8) {
577 check_intra4x4_pred_mode(h);
579 h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
580 h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
581 } else {
582 for (i = 0; i < 4; i++) {
583 memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
586 h->top_samples_available = 0x33FF;
587 h->left_samples_available = 0x5F5F;
590 mb_type = MB_TYPE_INTRA4x4;
591 } else { /* INTRA16x16 */
592 dir = i_mb_type_info[mb_type - 8].pred_mode;
593 dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
595 if ((h->intra16x16_pred_mode = check_intra_pred_mode(h, dir)) == -1){
596 av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
597 return -1;
600 cbp = i_mb_type_info[mb_type - 8].cbp;
601 mb_type = MB_TYPE_INTRA16x16;
604 if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) {
605 for (i = 0; i < 4; i++) {
606 memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
608 if (s->pict_type == FF_B_TYPE) {
609 for (i = 0; i < 4; i++) {
610 memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
614 if (!IS_INTRA4x4(mb_type)) {
615 memset(h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
617 if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) {
618 memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
619 s->dsp.clear_blocks(h->mb);
622 if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) {
623 if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
624 av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
625 return -1;
628 cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
630 if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) {
631 s->qscale += svq3_get_se_golomb(&s->gb);
633 if (s->qscale > 31){
634 av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
635 return -1;
638 if (IS_INTRA16x16(mb_type)) {
639 if (svq3_decode_block(&s->gb, h->mb, 0, 0)){
640 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
641 return -1;
645 if (cbp) {
646 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
647 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
649 for (i = 0; i < 4; i++) {
650 if ((cbp & (1 << i))) {
651 for (j = 0; j < 4; j++) {
652 k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
653 h->non_zero_count_cache[ scan8[k] ] = 1;
655 if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
656 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
657 return -1;
663 if ((cbp & 0x30)) {
664 for (i = 0; i < 2; ++i) {
665 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){
666 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
667 return -1;
671 if ((cbp & 0x20)) {
672 for (i = 0; i < 8; i++) {
673 h->non_zero_count_cache[ scan8[16+i] ] = 1;
675 if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){
676 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
677 return -1;
684 h->cbp= cbp;
685 s->current_picture.mb_type[mb_xy] = mb_type;
687 if (IS_INTRA(mb_type)) {
688 h->chroma_pred_mode = check_intra_pred_mode(h, DC_PRED8x8);
691 return 0;
694 static int svq3_decode_slice_header(H264Context *h)
696 MpegEncContext *const s = (MpegEncContext *) h;
697 const int mb_xy = h->mb_xy;
698 int i, header;
700 header = get_bits(&s->gb, 8);
702 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
703 /* TODO: what? */
704 av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
705 return -1;
706 } else {
707 int length = (header >> 5) & 3;
709 h->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
711 if (h->next_slice_index > s->gb.size_in_bits) {
712 av_log(h->s.avctx, AV_LOG_ERROR, "slice after bitstream end\n");
713 return -1;
716 s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
717 skip_bits(&s->gb, 8);
719 if (h->svq3_watermark_key) {
720 uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
721 AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ h->svq3_watermark_key);
723 if (length > 0) {
724 memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
725 &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
727 skip_bits_long(&s->gb, 0);
730 if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
731 av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
732 return -1;
735 h->slice_type = golomb_to_pict_type[i];
737 if ((header & 0x9F) == 2) {
738 i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
739 s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
740 } else {
741 skip_bits1(&s->gb);
742 s->mb_skip_run = 0;
745 h->slice_num = get_bits(&s->gb, 8);
746 s->qscale = get_bits(&s->gb, 5);
747 s->adaptive_quant = get_bits1(&s->gb);
749 /* unknown fields */
750 skip_bits1(&s->gb);
752 if (h->unknown_svq3_flag) {
753 skip_bits1(&s->gb);
756 skip_bits1(&s->gb);
757 skip_bits(&s->gb, 2);
759 while (get_bits1(&s->gb)) {
760 skip_bits(&s->gb, 8);
763 /* reset intra predictors and invalidate motion vector references */
764 if (s->mb_x > 0) {
765 memset(h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
766 memset(h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
768 if (s->mb_y > 0) {
769 memset(h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
771 if (s->mb_x > 0) {
772 h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
776 return 0;
779 static av_cold int svq3_decode_init(AVCodecContext *avctx)
781 MpegEncContext *const s = avctx->priv_data;
782 H264Context *const h = avctx->priv_data;
783 int m;
784 unsigned char *extradata;
785 unsigned int size;
787 if(avctx->thread_count > 1){
788 av_log(avctx, AV_LOG_ERROR, "SVQ3 does not support multithreaded decoding, patch welcome! (check latest SVN too)\n");
789 return -1;
792 if (decode_init(avctx) < 0)
793 return -1;
795 s->flags = avctx->flags;
796 s->flags2 = avctx->flags2;
797 s->unrestricted_mv = 1;
798 h->is_complex=1;
800 if (!s->context_initialized) {
801 s->width = avctx->width;
802 s->height = avctx->height;
803 h->halfpel_flag = 1;
804 h->thirdpel_flag = 1;
805 h->unknown_svq3_flag = 0;
806 h->chroma_qp[0] = h->chroma_qp[1] = 4;
808 if (MPV_common_init(s) < 0)
809 return -1;
811 h->b_stride = 4*s->mb_width;
813 alloc_tables(h);
815 /* prowl for the "SEQH" marker in the extradata */
816 extradata = (unsigned char *)avctx->extradata;
817 for (m = 0; m < avctx->extradata_size; m++) {
818 if (!memcmp(extradata, "SEQH", 4))
819 break;
820 extradata++;
823 /* if a match was found, parse the extra data */
824 if (extradata && !memcmp(extradata, "SEQH", 4)) {
826 GetBitContext gb;
827 int frame_size_code;
829 size = AV_RB32(&extradata[4]);
830 init_get_bits(&gb, extradata + 8, size*8);
832 /* 'frame size code' and optional 'width, height' */
833 frame_size_code = get_bits(&gb, 3);
834 switch (frame_size_code) {
835 case 0: avctx->width = 160; avctx->height = 120; break;
836 case 1: avctx->width = 128; avctx->height = 96; break;
837 case 2: avctx->width = 176; avctx->height = 144; break;
838 case 3: avctx->width = 352; avctx->height = 288; break;
839 case 4: avctx->width = 704; avctx->height = 576; break;
840 case 5: avctx->width = 240; avctx->height = 180; break;
841 case 6: avctx->width = 320; avctx->height = 240; break;
842 case 7:
843 avctx->width = get_bits(&gb, 12);
844 avctx->height = get_bits(&gb, 12);
845 break;
848 h->halfpel_flag = get_bits1(&gb);
849 h->thirdpel_flag = get_bits1(&gb);
851 /* unknown fields */
852 skip_bits1(&gb);
853 skip_bits1(&gb);
854 skip_bits1(&gb);
855 skip_bits1(&gb);
857 s->low_delay = get_bits1(&gb);
859 /* unknown field */
860 skip_bits1(&gb);
862 while (get_bits1(&gb)) {
863 skip_bits(&gb, 8);
866 h->unknown_svq3_flag = get_bits1(&gb);
867 avctx->has_b_frames = !s->low_delay;
868 if (h->unknown_svq3_flag) {
869 #if CONFIG_ZLIB
870 unsigned watermark_width = svq3_get_ue_golomb(&gb);
871 unsigned watermark_height = svq3_get_ue_golomb(&gb);
872 int u1 = svq3_get_ue_golomb(&gb);
873 int u2 = get_bits(&gb, 8);
874 int u3 = get_bits(&gb, 2);
875 int u4 = svq3_get_ue_golomb(&gb);
876 unsigned buf_len = watermark_width*watermark_height*4;
877 int offset = (get_bits_count(&gb)+7)>>3;
878 uint8_t *buf;
880 if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
881 return -1;
883 buf = av_malloc(buf_len);
884 av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
885 av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
886 if (uncompress(buf, (uLong*)&buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
887 av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
888 av_free(buf);
889 return -1;
891 h->svq3_watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
892 h->svq3_watermark_key = h->svq3_watermark_key << 16 | h->svq3_watermark_key;
893 av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", h->svq3_watermark_key);
894 av_free(buf);
895 #else
896 av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
897 return -1;
898 #endif
903 return 0;
906 static int svq3_decode_frame(AVCodecContext *avctx,
907 void *data, int *data_size,
908 AVPacket *avpkt)
910 const uint8_t *buf = avpkt->data;
911 int buf_size = avpkt->size;
912 MpegEncContext *const s = avctx->priv_data;
913 H264Context *const h = avctx->priv_data;
914 int m, mb_type;
916 /* special case for last picture */
917 if (buf_size == 0) {
918 if (s->next_picture_ptr && !s->low_delay) {
919 *(AVFrame *) data = *(AVFrame *) &s->next_picture;
920 s->next_picture_ptr = NULL;
921 *data_size = sizeof(AVFrame);
923 return 0;
926 init_get_bits (&s->gb, buf, 8*buf_size);
928 s->mb_x = s->mb_y = h->mb_xy = 0;
930 if (svq3_decode_slice_header(h))
931 return -1;
933 s->pict_type = h->slice_type;
934 s->picture_number = h->slice_num;
936 if (avctx->debug&FF_DEBUG_PICT_INFO){
937 av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
938 av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
939 s->adaptive_quant, s->qscale, h->slice_num);
942 /* for hurry_up == 5 */
943 s->current_picture.pict_type = s->pict_type;
944 s->current_picture.key_frame = (s->pict_type == FF_I_TYPE);
946 /* Skip B-frames if we do not have reference frames. */
947 if (s->last_picture_ptr == NULL && s->pict_type == FF_B_TYPE)
948 return 0;
949 /* Skip B-frames if we are in a hurry. */
950 if (avctx->hurry_up && s->pict_type == FF_B_TYPE)
951 return 0;
952 /* Skip everything if we are in a hurry >= 5. */
953 if (avctx->hurry_up >= 5)
954 return 0;
955 if ( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
956 ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
957 || avctx->skip_frame >= AVDISCARD_ALL)
958 return 0;
960 if (s->next_p_frame_damaged) {
961 if (s->pict_type == FF_B_TYPE)
962 return 0;
963 else
964 s->next_p_frame_damaged = 0;
967 if (frame_start(h) < 0)
968 return -1;
970 if (s->pict_type == FF_B_TYPE) {
971 h->frame_num_offset = (h->slice_num - h->prev_frame_num);
973 if (h->frame_num_offset < 0) {
974 h->frame_num_offset += 256;
976 if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
977 av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
978 return -1;
980 } else {
981 h->prev_frame_num = h->frame_num;
982 h->frame_num = h->slice_num;
983 h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
985 if (h->prev_frame_num_offset < 0) {
986 h->prev_frame_num_offset += 256;
990 for (m = 0; m < 2; m++){
991 int i;
992 for (i = 0; i < 4; i++){
993 int j;
994 for (j = -1; j < 4; j++)
995 h->ref_cache[m][scan8[0] + 8*i + j]= 1;
996 if (i < 3)
997 h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
1001 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1002 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1003 h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
1005 if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1006 ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
1008 skip_bits(&s->gb, h->next_slice_index - get_bits_count(&s->gb));
1009 s->gb.size_in_bits = 8*buf_size;
1011 if (svq3_decode_slice_header(h))
1012 return -1;
1014 /* TODO: support s->mb_skip_run */
1017 mb_type = svq3_get_ue_golomb(&s->gb);
1019 if (s->pict_type == FF_I_TYPE) {
1020 mb_type += 8;
1021 } else if (s->pict_type == FF_B_TYPE && mb_type >= 4) {
1022 mb_type += 4;
1024 if (mb_type > 33 || svq3_decode_mb(h, mb_type)) {
1025 av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1026 return -1;
1029 if (mb_type != 0) {
1030 hl_decode_mb (h);
1033 if (s->pict_type != FF_B_TYPE && !s->low_delay) {
1034 s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
1035 (s->pict_type == FF_P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
1039 ff_draw_horiz_band(s, 16*s->mb_y, 16);
1042 MPV_frame_end(s);
1044 if (s->pict_type == FF_B_TYPE || s->low_delay) {
1045 *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1046 } else {
1047 *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1050 /* Do not output the last pic after seeking. */
1051 if (s->last_picture_ptr || s->low_delay) {
1052 *data_size = sizeof(AVFrame);
1055 return buf_size;
1059 AVCodec svq3_decoder = {
1060 "svq3",
1061 CODEC_TYPE_VIDEO,
1062 CODEC_ID_SVQ3,
1063 sizeof(H264Context),
1064 svq3_decode_init,
1065 NULL,
1066 decode_end,
1067 svq3_decode_frame,
1068 CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1069 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3"),
1070 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE},