Merge branch 'mirror' into vdpau
[FFMpeg-mirror/ffmpeg-vdpau.git] / libavformat / utils.c
blob7a91b0f009031f9b30557cc7e11e60e31bf9ec07
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "libavutil/avstring.h"
25 #include "riff.h"
26 #include <sys/time.h>
27 #include <time.h>
28 #include <strings.h>
30 #undef NDEBUG
31 #include <assert.h>
33 /**
34 * @file libavformat/utils.c
35 * various utility functions for use within FFmpeg
38 unsigned avformat_version(void)
40 return LIBAVFORMAT_VERSION_INT;
43 /* fraction handling */
45 /**
46 * f = val + (num / den) + 0.5.
48 * 'num' is normalized so that it is such as 0 <= num < den.
50 * @param f fractional number
51 * @param val integer value
52 * @param num must be >= 0
53 * @param den must be >= 1
55 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
57 num += (den >> 1);
58 if (num >= den) {
59 val += num / den;
60 num = num % den;
62 f->val = val;
63 f->num = num;
64 f->den = den;
67 /**
68 * Fractional addition to f: f = f + (incr / f->den).
70 * @param f fractional number
71 * @param incr increment, can be positive or negative
73 static void av_frac_add(AVFrac *f, int64_t incr)
75 int64_t num, den;
77 num = f->num + incr;
78 den = f->den;
79 if (num < 0) {
80 f->val += num / den;
81 num = num % den;
82 if (num < 0) {
83 num += den;
84 f->val--;
86 } else if (num >= den) {
87 f->val += num / den;
88 num = num % den;
90 f->num = num;
93 /** head of registered input format linked list */
94 AVInputFormat *first_iformat = NULL;
95 /** head of registered output format linked list */
96 AVOutputFormat *first_oformat = NULL;
98 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 if(f) return f->next;
101 else return first_iformat;
104 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
106 if(f) return f->next;
107 else return first_oformat;
110 void av_register_input_format(AVInputFormat *format)
112 AVInputFormat **p;
113 p = &first_iformat;
114 while (*p != NULL) p = &(*p)->next;
115 *p = format;
116 format->next = NULL;
119 void av_register_output_format(AVOutputFormat *format)
121 AVOutputFormat **p;
122 p = &first_oformat;
123 while (*p != NULL) p = &(*p)->next;
124 *p = format;
125 format->next = NULL;
128 int match_ext(const char *filename, const char *extensions)
130 const char *ext, *p;
131 char ext1[32], *q;
133 if(!filename)
134 return 0;
136 ext = strrchr(filename, '.');
137 if (ext) {
138 ext++;
139 p = extensions;
140 for(;;) {
141 q = ext1;
142 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
143 *q++ = *p++;
144 *q = '\0';
145 if (!strcasecmp(ext1, ext))
146 return 1;
147 if (*p == '\0')
148 break;
149 p++;
152 return 0;
155 AVOutputFormat *guess_format(const char *short_name, const char *filename,
156 const char *mime_type)
158 AVOutputFormat *fmt, *fmt_found;
159 int score_max, score;
161 /* specific test for image sequences */
162 #ifdef CONFIG_IMAGE2_MUXER
163 if (!short_name && filename &&
164 av_filename_number_test(filename) &&
165 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
166 return guess_format("image2", NULL, NULL);
168 #endif
169 /* Find the proper file type. */
170 fmt_found = NULL;
171 score_max = 0;
172 fmt = first_oformat;
173 while (fmt != NULL) {
174 score = 0;
175 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
176 score += 100;
177 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
178 score += 10;
179 if (filename && fmt->extensions &&
180 match_ext(filename, fmt->extensions)) {
181 score += 5;
183 if (score > score_max) {
184 score_max = score;
185 fmt_found = fmt;
187 fmt = fmt->next;
189 return fmt_found;
192 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
193 const char *mime_type)
195 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
197 if (fmt) {
198 AVOutputFormat *stream_fmt;
199 char stream_format_name[64];
201 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
202 stream_fmt = guess_format(stream_format_name, NULL, NULL);
204 if (stream_fmt)
205 fmt = stream_fmt;
208 return fmt;
211 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
212 const char *filename, const char *mime_type, enum CodecType type){
213 if(type == CODEC_TYPE_VIDEO){
214 enum CodecID codec_id= CODEC_ID_NONE;
216 #ifdef CONFIG_IMAGE2_MUXER
217 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
218 codec_id= av_guess_image2_codec(filename);
220 #endif
221 if(codec_id == CODEC_ID_NONE)
222 codec_id= fmt->video_codec;
223 return codec_id;
224 }else if(type == CODEC_TYPE_AUDIO)
225 return fmt->audio_codec;
226 else
227 return CODEC_ID_NONE;
230 AVInputFormat *av_find_input_format(const char *short_name)
232 AVInputFormat *fmt;
233 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
234 if (!strcmp(fmt->name, short_name))
235 return fmt;
237 return NULL;
240 /* memory handling */
242 void av_destruct_packet(AVPacket *pkt)
244 av_free(pkt->data);
245 pkt->data = NULL; pkt->size = 0;
248 void av_init_packet(AVPacket *pkt)
250 pkt->pts = AV_NOPTS_VALUE;
251 pkt->dts = AV_NOPTS_VALUE;
252 pkt->pos = -1;
253 pkt->duration = 0;
254 pkt->convergence_duration = 0;
255 pkt->flags = 0;
256 pkt->stream_index = 0;
257 pkt->destruct= av_destruct_packet_nofree;
260 int av_new_packet(AVPacket *pkt, int size)
262 uint8_t *data;
263 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
264 return AVERROR(ENOMEM);
265 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
266 if (!data)
267 return AVERROR(ENOMEM);
268 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
270 av_init_packet(pkt);
271 pkt->data = data;
272 pkt->size = size;
273 pkt->destruct = av_destruct_packet;
274 return 0;
277 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
279 int ret= av_new_packet(pkt, size);
281 if(ret<0)
282 return ret;
284 pkt->pos= url_ftell(s);
286 ret= get_buffer(s, pkt->data, size);
287 if(ret<=0)
288 av_free_packet(pkt);
289 else
290 pkt->size= ret;
292 return ret;
295 int av_dup_packet(AVPacket *pkt)
297 if (pkt->destruct != av_destruct_packet) {
298 uint8_t *data;
299 /* We duplicate the packet and don't forget to add the padding again. */
300 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
301 return AVERROR(ENOMEM);
302 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
303 if (!data) {
304 return AVERROR(ENOMEM);
306 memcpy(data, pkt->data, pkt->size);
307 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
308 pkt->data = data;
309 pkt->destruct = av_destruct_packet;
311 return 0;
314 int av_filename_number_test(const char *filename)
316 char buf[1024];
317 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
320 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
322 AVInputFormat *fmt1, *fmt;
323 int score;
325 fmt = NULL;
326 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
327 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
328 continue;
329 score = 0;
330 if (fmt1->read_probe) {
331 score = fmt1->read_probe(pd);
332 } else if (fmt1->extensions) {
333 if (match_ext(pd->filename, fmt1->extensions)) {
334 score = 50;
337 if (score > *score_max) {
338 *score_max = score;
339 fmt = fmt1;
340 }else if (score == *score_max)
341 fmt = NULL;
343 return fmt;
346 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
347 int score=0;
348 return av_probe_input_format2(pd, is_opened, &score);
351 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
353 AVInputFormat *fmt;
354 fmt = av_probe_input_format2(pd, 1, &score);
356 if (fmt) {
357 if (!strcmp(fmt->name, "mp3")) {
358 st->codec->codec_id = CODEC_ID_MP3;
359 st->codec->codec_type = CODEC_TYPE_AUDIO;
360 } else if (!strcmp(fmt->name, "ac3")) {
361 st->codec->codec_id = CODEC_ID_AC3;
362 st->codec->codec_type = CODEC_TYPE_AUDIO;
363 } else if (!strcmp(fmt->name, "mpegvideo")) {
364 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
365 st->codec->codec_type = CODEC_TYPE_VIDEO;
366 } else if (!strcmp(fmt->name, "m4v")) {
367 st->codec->codec_id = CODEC_ID_MPEG4;
368 st->codec->codec_type = CODEC_TYPE_VIDEO;
369 } else if (!strcmp(fmt->name, "h264")) {
370 st->codec->codec_id = CODEC_ID_H264;
371 st->codec->codec_type = CODEC_TYPE_VIDEO;
374 return !!fmt;
377 /************************************************************/
378 /* input media file */
381 * Open a media file from an IO stream. 'fmt' must be specified.
383 static const char* format_to_name(void* ptr)
385 AVFormatContext* fc = (AVFormatContext*) ptr;
386 if(fc->iformat) return fc->iformat->name;
387 else if(fc->oformat) return fc->oformat->name;
388 else return "NULL";
391 #define OFFSET(x) offsetof(AVFormatContext,x)
392 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
393 //these names are too long to be readable
394 #define E AV_OPT_FLAG_ENCODING_PARAM
395 #define D AV_OPT_FLAG_DECODING_PARAM
397 static const AVOption options[]={
398 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
399 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
400 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
401 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
402 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
403 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
404 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
405 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
406 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
407 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
408 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
409 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
410 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
411 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
412 {NULL},
415 #undef E
416 #undef D
417 #undef DEFAULT
419 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
421 static void avformat_get_context_defaults(AVFormatContext *s)
423 memset(s, 0, sizeof(AVFormatContext));
425 s->av_class = &av_format_context_class;
427 av_opt_set_defaults(s);
430 AVFormatContext *av_alloc_format_context(void)
432 AVFormatContext *ic;
433 ic = av_malloc(sizeof(AVFormatContext));
434 if (!ic) return ic;
435 avformat_get_context_defaults(ic);
436 ic->av_class = &av_format_context_class;
437 return ic;
440 int av_open_input_stream(AVFormatContext **ic_ptr,
441 ByteIOContext *pb, const char *filename,
442 AVInputFormat *fmt, AVFormatParameters *ap)
444 int err;
445 AVFormatContext *ic;
446 AVFormatParameters default_ap;
448 if(!ap){
449 ap=&default_ap;
450 memset(ap, 0, sizeof(default_ap));
453 if(!ap->prealloced_context)
454 ic = av_alloc_format_context();
455 else
456 ic = *ic_ptr;
457 if (!ic) {
458 err = AVERROR(ENOMEM);
459 goto fail;
461 ic->iformat = fmt;
462 ic->pb = pb;
463 ic->duration = AV_NOPTS_VALUE;
464 ic->start_time = AV_NOPTS_VALUE;
465 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
467 /* allocate private data */
468 if (fmt->priv_data_size > 0) {
469 ic->priv_data = av_mallocz(fmt->priv_data_size);
470 if (!ic->priv_data) {
471 err = AVERROR(ENOMEM);
472 goto fail;
474 } else {
475 ic->priv_data = NULL;
478 if (ic->iformat->read_header) {
479 err = ic->iformat->read_header(ic, ap);
480 if (err < 0)
481 goto fail;
484 if (pb && !ic->data_offset)
485 ic->data_offset = url_ftell(ic->pb);
487 *ic_ptr = ic;
488 return 0;
489 fail:
490 if (ic) {
491 int i;
492 av_freep(&ic->priv_data);
493 for(i=0;i<ic->nb_streams;i++) {
494 AVStream *st = ic->streams[i];
495 if (st) {
496 av_free(st->priv_data);
497 av_free(st->codec->extradata);
499 av_free(st);
502 av_free(ic);
503 *ic_ptr = NULL;
504 return err;
507 /** size of probe buffer, for guessing file type from file contents */
508 #define PROBE_BUF_MIN 2048
509 #define PROBE_BUF_MAX (1<<20)
511 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
512 AVInputFormat *fmt,
513 int buf_size,
514 AVFormatParameters *ap)
516 int err, probe_size;
517 AVProbeData probe_data, *pd = &probe_data;
518 ByteIOContext *pb = NULL;
520 pd->filename = "";
521 if (filename)
522 pd->filename = filename;
523 pd->buf = NULL;
524 pd->buf_size = 0;
526 if (!fmt) {
527 /* guess format if no file can be opened */
528 fmt = av_probe_input_format(pd, 0);
531 /* Do not open file if the format does not need it. XXX: specific
532 hack needed to handle RTSP/TCP */
533 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
534 /* if no file needed do not try to open one */
535 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
536 goto fail;
538 if (buf_size > 0) {
539 url_setbufsize(pb, buf_size);
542 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
543 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
544 /* read probe data */
545 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
546 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
547 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
548 if (url_fseek(pb, 0, SEEK_SET) < 0) {
549 url_fclose(pb);
550 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
551 pb = NULL;
552 err = AVERROR(EIO);
553 goto fail;
556 /* guess file format */
557 fmt = av_probe_input_format2(pd, 1, &score);
559 av_freep(&pd->buf);
562 /* if still no format found, error */
563 if (!fmt) {
564 err = AVERROR_NOFMT;
565 goto fail;
568 /* check filename in case an image number is expected */
569 if (fmt->flags & AVFMT_NEEDNUMBER) {
570 if (!av_filename_number_test(filename)) {
571 err = AVERROR_NUMEXPECTED;
572 goto fail;
575 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
576 if (err)
577 goto fail;
578 return 0;
579 fail:
580 av_freep(&pd->buf);
581 if (pb)
582 url_fclose(pb);
583 *ic_ptr = NULL;
584 return err;
588 /*******************************************************/
590 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
591 AVPacketList **plast_pktl){
592 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
593 if (!pktl)
594 return NULL;
596 if (*packet_buffer)
597 (*plast_pktl)->next = pktl;
598 else
599 *packet_buffer = pktl;
601 /* add the packet in the buffered packet list */
602 *plast_pktl = pktl;
603 pktl->pkt= *pkt;
604 return &pktl->pkt;
607 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
609 int ret;
610 AVStream *st;
612 for(;;){
613 AVPacketList *pktl = s->raw_packet_buffer;
615 if (pktl) {
616 *pkt = pktl->pkt;
617 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
618 s->raw_packet_buffer = pktl->next;
619 av_free(pktl);
620 return 0;
624 av_init_packet(pkt);
625 ret= s->iformat->read_packet(s, pkt);
626 if (ret < 0)
627 return ret;
628 st= s->streams[pkt->stream_index];
630 switch(st->codec->codec_type){
631 case CODEC_TYPE_VIDEO:
632 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
633 break;
634 case CODEC_TYPE_AUDIO:
635 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
636 break;
637 case CODEC_TYPE_SUBTITLE:
638 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
639 break;
642 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
643 return ret;
645 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
647 if(st->codec->codec_id == CODEC_ID_PROBE){
648 AVProbeData *pd = &st->probe_data;
650 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
651 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
652 pd->buf_size += pkt->size;
653 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
655 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
656 set_codec_from_probe_data(st, pd, 1);
657 if(st->codec->codec_id != CODEC_ID_PROBE){
658 pd->buf_size=0;
659 av_freep(&pd->buf);
666 /**********************************************************/
669 * Get the number of samples of an audio frame. Return -1 on error.
671 static int get_audio_frame_size(AVCodecContext *enc, int size)
673 int frame_size;
675 if(enc->codec_id == CODEC_ID_VORBIS)
676 return -1;
678 if (enc->frame_size <= 1) {
679 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
681 if (bits_per_sample) {
682 if (enc->channels == 0)
683 return -1;
684 frame_size = (size << 3) / (bits_per_sample * enc->channels);
685 } else {
686 /* used for example by ADPCM codecs */
687 if (enc->bit_rate == 0)
688 return -1;
689 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
691 } else {
692 frame_size = enc->frame_size;
694 return frame_size;
699 * Return the frame duration in seconds. Return 0 if not available.
701 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
702 AVCodecParserContext *pc, AVPacket *pkt)
704 int frame_size;
706 *pnum = 0;
707 *pden = 0;
708 switch(st->codec->codec_type) {
709 case CODEC_TYPE_VIDEO:
710 if(st->time_base.num*1000LL > st->time_base.den){
711 *pnum = st->time_base.num;
712 *pden = st->time_base.den;
713 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
714 *pnum = st->codec->time_base.num;
715 *pden = st->codec->time_base.den;
716 if (pc && pc->repeat_pict) {
717 *pden *= 2;
718 *pnum = (*pnum) * (2 + pc->repeat_pict);
721 break;
722 case CODEC_TYPE_AUDIO:
723 frame_size = get_audio_frame_size(st->codec, pkt->size);
724 if (frame_size < 0)
725 break;
726 *pnum = frame_size;
727 *pden = st->codec->sample_rate;
728 break;
729 default:
730 break;
734 static int is_intra_only(AVCodecContext *enc){
735 if(enc->codec_type == CODEC_TYPE_AUDIO){
736 return 1;
737 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
738 switch(enc->codec_id){
739 case CODEC_ID_MJPEG:
740 case CODEC_ID_MJPEGB:
741 case CODEC_ID_LJPEG:
742 case CODEC_ID_RAWVIDEO:
743 case CODEC_ID_DVVIDEO:
744 case CODEC_ID_HUFFYUV:
745 case CODEC_ID_FFVHUFF:
746 case CODEC_ID_ASV1:
747 case CODEC_ID_ASV2:
748 case CODEC_ID_VCR1:
749 case CODEC_ID_DNXHD:
750 return 1;
751 default: break;
754 return 0;
757 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
758 int64_t dts, int64_t pts)
760 AVStream *st= s->streams[stream_index];
761 AVPacketList *pktl= s->packet_buffer;
763 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
764 return;
766 st->first_dts= dts - st->cur_dts;
767 st->cur_dts= dts;
769 for(; pktl; pktl= pktl->next){
770 if(pktl->pkt.stream_index != stream_index)
771 continue;
772 //FIXME think more about this check
773 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
774 pktl->pkt.pts += st->first_dts;
776 if(pktl->pkt.dts != AV_NOPTS_VALUE)
777 pktl->pkt.dts += st->first_dts;
779 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
780 st->start_time= pktl->pkt.pts;
782 if (st->start_time == AV_NOPTS_VALUE)
783 st->start_time = pts;
786 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
788 AVPacketList *pktl= s->packet_buffer;
789 int64_t cur_dts= 0;
791 if(st->first_dts != AV_NOPTS_VALUE){
792 cur_dts= st->first_dts;
793 for(; pktl; pktl= pktl->next){
794 if(pktl->pkt.stream_index == pkt->stream_index){
795 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
796 break;
797 cur_dts -= pkt->duration;
800 pktl= s->packet_buffer;
801 st->first_dts = cur_dts;
802 }else if(st->cur_dts)
803 return;
805 for(; pktl; pktl= pktl->next){
806 if(pktl->pkt.stream_index != pkt->stream_index)
807 continue;
808 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
809 && !pktl->pkt.duration){
810 pktl->pkt.dts= cur_dts;
811 if(!st->codec->has_b_frames)
812 pktl->pkt.pts= cur_dts;
813 cur_dts += pkt->duration;
814 pktl->pkt.duration= pkt->duration;
815 }else
816 break;
818 if(st->first_dts == AV_NOPTS_VALUE)
819 st->cur_dts= cur_dts;
822 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
823 AVCodecParserContext *pc, AVPacket *pkt)
825 int num, den, presentation_delayed, delay, i;
826 int64_t offset;
828 /* do we have a video B-frame ? */
829 delay= st->codec->has_b_frames;
830 presentation_delayed = 0;
831 /* XXX: need has_b_frame, but cannot get it if the codec is
832 not initialized */
833 if (delay &&
834 pc && pc->pict_type != FF_B_TYPE)
835 presentation_delayed = 1;
837 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
838 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
839 pkt->dts -= 1LL<<st->pts_wrap_bits;
842 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
843 // we take the conservative approach and discard both
844 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
845 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
846 av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
847 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
850 if (pkt->duration == 0) {
851 compute_frame_duration(&num, &den, st, pc, pkt);
852 if (den && num) {
853 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
855 if(pkt->duration != 0 && s->packet_buffer)
856 update_initial_durations(s, st, pkt);
860 /* correct timestamps with byte offset if demuxers only have timestamps
861 on packet boundaries */
862 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
863 /* this will estimate bitrate based on this frame's duration and size */
864 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
865 if(pkt->pts != AV_NOPTS_VALUE)
866 pkt->pts += offset;
867 if(pkt->dts != AV_NOPTS_VALUE)
868 pkt->dts += offset;
871 /* This may be redundant, but it should not hurt. */
872 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
873 presentation_delayed = 1;
875 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
876 /* interpolate PTS and DTS if they are not present */
877 if(delay==0 || (delay==1 && pc)){
878 if (presentation_delayed) {
879 /* DTS = decompression timestamp */
880 /* PTS = presentation timestamp */
881 if (pkt->dts == AV_NOPTS_VALUE)
882 pkt->dts = st->last_IP_pts;
883 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
884 if (pkt->dts == AV_NOPTS_VALUE)
885 pkt->dts = st->cur_dts;
887 /* this is tricky: the dts must be incremented by the duration
888 of the frame we are displaying, i.e. the last I- or P-frame */
889 if (st->last_IP_duration == 0)
890 st->last_IP_duration = pkt->duration;
891 if(pkt->dts != AV_NOPTS_VALUE)
892 st->cur_dts = pkt->dts + st->last_IP_duration;
893 st->last_IP_duration = pkt->duration;
894 st->last_IP_pts= pkt->pts;
895 /* cannot compute PTS if not present (we can compute it only
896 by knowing the future */
897 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
898 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
899 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
900 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
901 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
902 pkt->pts += pkt->duration;
903 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
907 /* presentation is not delayed : PTS and DTS are the same */
908 if(pkt->pts == AV_NOPTS_VALUE)
909 pkt->pts = pkt->dts;
910 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
911 if(pkt->pts == AV_NOPTS_VALUE)
912 pkt->pts = st->cur_dts;
913 pkt->dts = pkt->pts;
914 if(pkt->pts != AV_NOPTS_VALUE)
915 st->cur_dts = pkt->pts + pkt->duration;
919 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
920 st->pts_buffer[0]= pkt->pts;
921 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
922 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
923 if(pkt->dts == AV_NOPTS_VALUE)
924 pkt->dts= st->pts_buffer[0];
925 if(delay>1){
926 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
928 if(pkt->dts > st->cur_dts)
929 st->cur_dts = pkt->dts;
932 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
934 /* update flags */
935 if(is_intra_only(st->codec))
936 pkt->flags |= PKT_FLAG_KEY;
937 else if (pc) {
938 pkt->flags = 0;
939 /* keyframe computation */
940 if (pc->pict_type == FF_I_TYPE)
941 pkt->flags |= PKT_FLAG_KEY;
945 void av_destruct_packet_nofree(AVPacket *pkt)
947 pkt->data = NULL; pkt->size = 0;
950 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
952 AVStream *st;
953 int len, ret, i;
955 av_init_packet(pkt);
957 for(;;) {
958 /* select current input stream component */
959 st = s->cur_st;
960 if (st) {
961 if (!st->need_parsing || !st->parser) {
962 /* no parsing needed: we just output the packet as is */
963 /* raw data support */
964 *pkt = s->cur_pkt;
965 compute_pkt_fields(s, st, NULL, pkt);
966 s->cur_st = NULL;
967 break;
968 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
969 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
970 s->cur_ptr, s->cur_len,
971 s->cur_pkt.pts, s->cur_pkt.dts);
972 s->cur_pkt.pts = AV_NOPTS_VALUE;
973 s->cur_pkt.dts = AV_NOPTS_VALUE;
974 /* increment read pointer */
975 s->cur_ptr += len;
976 s->cur_len -= len;
978 /* return packet if any */
979 if (pkt->size) {
980 got_packet:
981 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
982 pkt->duration = 0;
983 pkt->stream_index = st->index;
984 pkt->pts = st->parser->pts;
985 pkt->dts = st->parser->dts;
986 pkt->destruct = av_destruct_packet_nofree;
987 compute_pkt_fields(s, st, st->parser, pkt);
989 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
990 ff_reduce_index(s, st->index);
991 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
992 0, 0, AVINDEX_KEYFRAME);
995 break;
997 } else {
998 /* free packet */
999 av_free_packet(&s->cur_pkt);
1000 s->cur_st = NULL;
1002 } else {
1003 /* read next packet */
1004 ret = av_read_packet(s, &s->cur_pkt);
1005 if (ret < 0) {
1006 if (ret == AVERROR(EAGAIN))
1007 return ret;
1008 /* return the last frames, if any */
1009 for(i = 0; i < s->nb_streams; i++) {
1010 st = s->streams[i];
1011 if (st->parser && st->need_parsing) {
1012 av_parser_parse(st->parser, st->codec,
1013 &pkt->data, &pkt->size,
1014 NULL, 0,
1015 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1016 if (pkt->size)
1017 goto got_packet;
1020 /* no more packets: really terminate parsing */
1021 return ret;
1024 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1025 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1026 s->cur_pkt.pts < s->cur_pkt.dts){
1027 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1028 s->cur_pkt.stream_index,
1029 s->cur_pkt.pts,
1030 s->cur_pkt.dts,
1031 s->cur_pkt.size);
1032 // av_free_packet(&s->cur_pkt);
1033 // return -1;
1036 st = s->streams[s->cur_pkt.stream_index];
1037 if(s->debug & FF_FDEBUG_TS)
1038 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1039 s->cur_pkt.stream_index,
1040 s->cur_pkt.pts,
1041 s->cur_pkt.dts,
1042 s->cur_pkt.size,
1043 s->cur_pkt.flags);
1045 s->cur_st = st;
1046 s->cur_ptr = s->cur_pkt.data;
1047 s->cur_len = s->cur_pkt.size;
1048 if (st->need_parsing && !st->parser) {
1049 st->parser = av_parser_init(st->codec->codec_id);
1050 if (!st->parser) {
1051 /* no parser available: just output the raw packets */
1052 st->need_parsing = AVSTREAM_PARSE_NONE;
1053 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1054 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1056 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1057 st->parser->next_frame_offset=
1058 st->parser->cur_offset= s->cur_pkt.pos;
1063 if(s->debug & FF_FDEBUG_TS)
1064 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1065 pkt->stream_index,
1066 pkt->pts,
1067 pkt->dts,
1068 pkt->size,
1069 pkt->flags);
1071 return 0;
1074 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1076 AVPacketList *pktl;
1077 int eof=0;
1078 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1080 for(;;){
1081 pktl = s->packet_buffer;
1082 if (pktl) {
1083 AVPacket *next_pkt= &pktl->pkt;
1085 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1086 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1087 if( pktl->pkt.stream_index == next_pkt->stream_index
1088 && next_pkt->dts < pktl->pkt.dts
1089 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1090 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1091 next_pkt->pts= pktl->pkt.dts;
1093 pktl= pktl->next;
1095 pktl = s->packet_buffer;
1098 if( next_pkt->pts != AV_NOPTS_VALUE
1099 || next_pkt->dts == AV_NOPTS_VALUE
1100 || !genpts || eof){
1101 /* read packet from packet buffer, if there is data */
1102 *pkt = *next_pkt;
1103 s->packet_buffer = pktl->next;
1104 av_free(pktl);
1105 return 0;
1108 if(genpts){
1109 int ret= av_read_frame_internal(s, pkt);
1110 if(ret<0){
1111 if(pktl && ret != AVERROR(EAGAIN)){
1112 eof=1;
1113 continue;
1114 }else
1115 return ret;
1118 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1119 &s->packet_buffer_end)) < 0)
1120 return AVERROR(ENOMEM);
1121 }else{
1122 assert(!s->packet_buffer);
1123 return av_read_frame_internal(s, pkt);
1128 /* XXX: suppress the packet queue */
1129 static void flush_packet_queue(AVFormatContext *s)
1131 AVPacketList *pktl;
1133 for(;;) {
1134 pktl = s->packet_buffer;
1135 if (!pktl)
1136 break;
1137 s->packet_buffer = pktl->next;
1138 av_free_packet(&pktl->pkt);
1139 av_free(pktl);
1143 /*******************************************************/
1144 /* seek support */
1146 int av_find_default_stream_index(AVFormatContext *s)
1148 int first_audio_index = -1;
1149 int i;
1150 AVStream *st;
1152 if (s->nb_streams <= 0)
1153 return -1;
1154 for(i = 0; i < s->nb_streams; i++) {
1155 st = s->streams[i];
1156 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1157 return i;
1159 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1160 first_audio_index = i;
1162 return first_audio_index >= 0 ? first_audio_index : 0;
1166 * Flush the frame reader.
1168 static void av_read_frame_flush(AVFormatContext *s)
1170 AVStream *st;
1171 int i;
1173 flush_packet_queue(s);
1175 /* free previous packet */
1176 if (s->cur_st) {
1177 if (s->cur_st->parser)
1178 av_free_packet(&s->cur_pkt);
1179 s->cur_st = NULL;
1181 /* fail safe */
1182 s->cur_ptr = NULL;
1183 s->cur_len = 0;
1185 /* for each stream, reset read state */
1186 for(i = 0; i < s->nb_streams; i++) {
1187 st = s->streams[i];
1189 if (st->parser) {
1190 av_parser_close(st->parser);
1191 st->parser = NULL;
1193 st->last_IP_pts = AV_NOPTS_VALUE;
1194 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1198 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1199 int i;
1201 for(i = 0; i < s->nb_streams; i++) {
1202 AVStream *st = s->streams[i];
1204 st->cur_dts = av_rescale(timestamp,
1205 st->time_base.den * (int64_t)ref_st->time_base.num,
1206 st->time_base.num * (int64_t)ref_st->time_base.den);
1210 void ff_reduce_index(AVFormatContext *s, int stream_index)
1212 AVStream *st= s->streams[stream_index];
1213 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1215 if((unsigned)st->nb_index_entries >= max_entries){
1216 int i;
1217 for(i=0; 2*i<st->nb_index_entries; i++)
1218 st->index_entries[i]= st->index_entries[2*i];
1219 st->nb_index_entries= i;
1223 int av_add_index_entry(AVStream *st,
1224 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1226 AVIndexEntry *entries, *ie;
1227 int index;
1229 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1230 return -1;
1232 entries = av_fast_realloc(st->index_entries,
1233 &st->index_entries_allocated_size,
1234 (st->nb_index_entries + 1) *
1235 sizeof(AVIndexEntry));
1236 if(!entries)
1237 return -1;
1239 st->index_entries= entries;
1241 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1243 if(index<0){
1244 index= st->nb_index_entries++;
1245 ie= &entries[index];
1246 assert(index==0 || ie[-1].timestamp < timestamp);
1247 }else{
1248 ie= &entries[index];
1249 if(ie->timestamp != timestamp){
1250 if(ie->timestamp <= timestamp)
1251 return -1;
1252 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1253 st->nb_index_entries++;
1254 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1255 distance= ie->min_distance;
1258 ie->pos = pos;
1259 ie->timestamp = timestamp;
1260 ie->min_distance= distance;
1261 ie->size= size;
1262 ie->flags = flags;
1264 return index;
1267 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1268 int flags)
1270 AVIndexEntry *entries= st->index_entries;
1271 int nb_entries= st->nb_index_entries;
1272 int a, b, m;
1273 int64_t timestamp;
1275 a = - 1;
1276 b = nb_entries;
1278 while (b - a > 1) {
1279 m = (a + b) >> 1;
1280 timestamp = entries[m].timestamp;
1281 if(timestamp >= wanted_timestamp)
1282 b = m;
1283 if(timestamp <= wanted_timestamp)
1284 a = m;
1286 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1288 if(!(flags & AVSEEK_FLAG_ANY)){
1289 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1290 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1294 if(m == nb_entries)
1295 return -1;
1296 return m;
1299 #define DEBUG_SEEK
1301 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1302 AVInputFormat *avif= s->iformat;
1303 int64_t pos_min, pos_max, pos, pos_limit;
1304 int64_t ts_min, ts_max, ts;
1305 int index;
1306 AVStream *st;
1308 if (stream_index < 0)
1309 return -1;
1311 #ifdef DEBUG_SEEK
1312 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1313 #endif
1315 ts_max=
1316 ts_min= AV_NOPTS_VALUE;
1317 pos_limit= -1; //gcc falsely says it may be uninitialized
1319 st= s->streams[stream_index];
1320 if(st->index_entries){
1321 AVIndexEntry *e;
1323 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1324 index= FFMAX(index, 0);
1325 e= &st->index_entries[index];
1327 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1328 pos_min= e->pos;
1329 ts_min= e->timestamp;
1330 #ifdef DEBUG_SEEK
1331 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1332 pos_min,ts_min);
1333 #endif
1334 }else{
1335 assert(index==0);
1338 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1339 assert(index < st->nb_index_entries);
1340 if(index >= 0){
1341 e= &st->index_entries[index];
1342 assert(e->timestamp >= target_ts);
1343 pos_max= e->pos;
1344 ts_max= e->timestamp;
1345 pos_limit= pos_max - e->min_distance;
1346 #ifdef DEBUG_SEEK
1347 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1348 pos_max,pos_limit, ts_max);
1349 #endif
1353 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1354 if(pos<0)
1355 return -1;
1357 /* do the seek */
1358 url_fseek(s->pb, pos, SEEK_SET);
1360 av_update_cur_dts(s, st, ts);
1362 return 0;
1365 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1366 int64_t pos, ts;
1367 int64_t start_pos, filesize;
1368 int no_change;
1370 #ifdef DEBUG_SEEK
1371 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1372 #endif
1374 if(ts_min == AV_NOPTS_VALUE){
1375 pos_min = s->data_offset;
1376 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1377 if (ts_min == AV_NOPTS_VALUE)
1378 return -1;
1381 if(ts_max == AV_NOPTS_VALUE){
1382 int step= 1024;
1383 filesize = url_fsize(s->pb);
1384 pos_max = filesize - 1;
1386 pos_max -= step;
1387 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1388 step += step;
1389 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1390 if (ts_max == AV_NOPTS_VALUE)
1391 return -1;
1393 for(;;){
1394 int64_t tmp_pos= pos_max + 1;
1395 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1396 if(tmp_ts == AV_NOPTS_VALUE)
1397 break;
1398 ts_max= tmp_ts;
1399 pos_max= tmp_pos;
1400 if(tmp_pos >= filesize)
1401 break;
1403 pos_limit= pos_max;
1406 if(ts_min > ts_max){
1407 return -1;
1408 }else if(ts_min == ts_max){
1409 pos_limit= pos_min;
1412 no_change=0;
1413 while (pos_min < pos_limit) {
1414 #ifdef DEBUG_SEEK
1415 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1416 pos_min, pos_max,
1417 ts_min, ts_max);
1418 #endif
1419 assert(pos_limit <= pos_max);
1421 if(no_change==0){
1422 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1423 // interpolate position (better than dichotomy)
1424 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1425 + pos_min - approximate_keyframe_distance;
1426 }else if(no_change==1){
1427 // bisection, if interpolation failed to change min or max pos last time
1428 pos = (pos_min + pos_limit)>>1;
1429 }else{
1430 /* linear search if bisection failed, can only happen if there
1431 are very few or no keyframes between min/max */
1432 pos=pos_min;
1434 if(pos <= pos_min)
1435 pos= pos_min + 1;
1436 else if(pos > pos_limit)
1437 pos= pos_limit;
1438 start_pos= pos;
1440 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1441 if(pos == pos_max)
1442 no_change++;
1443 else
1444 no_change=0;
1445 #ifdef DEBUG_SEEK
1446 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1447 #endif
1448 if(ts == AV_NOPTS_VALUE){
1449 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1450 return -1;
1452 assert(ts != AV_NOPTS_VALUE);
1453 if (target_ts <= ts) {
1454 pos_limit = start_pos - 1;
1455 pos_max = pos;
1456 ts_max = ts;
1458 if (target_ts >= ts) {
1459 pos_min = pos;
1460 ts_min = ts;
1464 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1465 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1466 #ifdef DEBUG_SEEK
1467 pos_min = pos;
1468 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1469 pos_min++;
1470 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1471 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1472 pos, ts_min, target_ts, ts_max);
1473 #endif
1474 *ts_ret= ts;
1475 return pos;
1478 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1479 int64_t pos_min, pos_max;
1480 #if 0
1481 AVStream *st;
1483 if (stream_index < 0)
1484 return -1;
1486 st= s->streams[stream_index];
1487 #endif
1489 pos_min = s->data_offset;
1490 pos_max = url_fsize(s->pb) - 1;
1492 if (pos < pos_min) pos= pos_min;
1493 else if(pos > pos_max) pos= pos_max;
1495 url_fseek(s->pb, pos, SEEK_SET);
1497 #if 0
1498 av_update_cur_dts(s, st, ts);
1499 #endif
1500 return 0;
1503 static int av_seek_frame_generic(AVFormatContext *s,
1504 int stream_index, int64_t timestamp, int flags)
1506 int index, ret;
1507 AVStream *st;
1508 AVIndexEntry *ie;
1510 st = s->streams[stream_index];
1512 index = av_index_search_timestamp(st, timestamp, flags);
1514 if(index < 0 || index==st->nb_index_entries-1){
1515 int i;
1516 AVPacket pkt;
1518 if(st->nb_index_entries){
1519 assert(st->index_entries);
1520 ie= &st->index_entries[st->nb_index_entries-1];
1521 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1522 return ret;
1523 av_update_cur_dts(s, st, ie->timestamp);
1524 }else{
1525 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1526 return ret;
1528 for(i=0;; i++) {
1529 int ret = av_read_frame(s, &pkt);
1530 if(ret<0)
1531 break;
1532 av_free_packet(&pkt);
1533 if(stream_index == pkt.stream_index){
1534 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1535 break;
1538 index = av_index_search_timestamp(st, timestamp, flags);
1540 if (index < 0)
1541 return -1;
1543 av_read_frame_flush(s);
1544 if (s->iformat->read_seek){
1545 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1546 return 0;
1548 ie = &st->index_entries[index];
1549 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1550 return ret;
1551 av_update_cur_dts(s, st, ie->timestamp);
1553 return 0;
1556 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1558 int ret;
1559 AVStream *st;
1561 av_read_frame_flush(s);
1563 if(flags & AVSEEK_FLAG_BYTE)
1564 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1566 if(stream_index < 0){
1567 stream_index= av_find_default_stream_index(s);
1568 if(stream_index < 0)
1569 return -1;
1571 st= s->streams[stream_index];
1572 /* timestamp for default must be expressed in AV_TIME_BASE units */
1573 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1576 /* first, we try the format specific seek */
1577 if (s->iformat->read_seek)
1578 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1579 else
1580 ret = -1;
1581 if (ret >= 0) {
1582 return 0;
1585 if(s->iformat->read_timestamp)
1586 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1587 else
1588 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1591 /*******************************************************/
1594 * Returns TRUE if the stream has accurate duration in any stream.
1596 * @return TRUE if the stream has accurate duration for at least one component.
1598 static int av_has_duration(AVFormatContext *ic)
1600 int i;
1601 AVStream *st;
1603 for(i = 0;i < ic->nb_streams; i++) {
1604 st = ic->streams[i];
1605 if (st->duration != AV_NOPTS_VALUE)
1606 return 1;
1608 return 0;
1612 * Estimate the stream timings from the one of each components.
1614 * Also computes the global bitrate if possible.
1616 static void av_update_stream_timings(AVFormatContext *ic)
1618 int64_t start_time, start_time1, end_time, end_time1;
1619 int64_t duration, duration1;
1620 int i;
1621 AVStream *st;
1623 start_time = INT64_MAX;
1624 end_time = INT64_MIN;
1625 duration = INT64_MIN;
1626 for(i = 0;i < ic->nb_streams; i++) {
1627 st = ic->streams[i];
1628 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1629 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1630 if (start_time1 < start_time)
1631 start_time = start_time1;
1632 if (st->duration != AV_NOPTS_VALUE) {
1633 end_time1 = start_time1
1634 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1635 if (end_time1 > end_time)
1636 end_time = end_time1;
1639 if (st->duration != AV_NOPTS_VALUE) {
1640 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1641 if (duration1 > duration)
1642 duration = duration1;
1645 if (start_time != INT64_MAX) {
1646 ic->start_time = start_time;
1647 if (end_time != INT64_MIN) {
1648 if (end_time - start_time > duration)
1649 duration = end_time - start_time;
1652 if (duration != INT64_MIN) {
1653 ic->duration = duration;
1654 if (ic->file_size > 0) {
1655 /* compute the bitrate */
1656 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1657 (double)ic->duration;
1662 static void fill_all_stream_timings(AVFormatContext *ic)
1664 int i;
1665 AVStream *st;
1667 av_update_stream_timings(ic);
1668 for(i = 0;i < ic->nb_streams; i++) {
1669 st = ic->streams[i];
1670 if (st->start_time == AV_NOPTS_VALUE) {
1671 if(ic->start_time != AV_NOPTS_VALUE)
1672 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1673 if(ic->duration != AV_NOPTS_VALUE)
1674 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1679 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1681 int64_t filesize, duration;
1682 int bit_rate, i;
1683 AVStream *st;
1685 /* if bit_rate is already set, we believe it */
1686 if (ic->bit_rate == 0) {
1687 bit_rate = 0;
1688 for(i=0;i<ic->nb_streams;i++) {
1689 st = ic->streams[i];
1690 bit_rate += st->codec->bit_rate;
1692 ic->bit_rate = bit_rate;
1695 /* if duration is already set, we believe it */
1696 if (ic->duration == AV_NOPTS_VALUE &&
1697 ic->bit_rate != 0 &&
1698 ic->file_size != 0) {
1699 filesize = ic->file_size;
1700 if (filesize > 0) {
1701 for(i = 0; i < ic->nb_streams; i++) {
1702 st = ic->streams[i];
1703 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1704 if (st->duration == AV_NOPTS_VALUE)
1705 st->duration = duration;
1711 #define DURATION_MAX_READ_SIZE 250000
1713 /* only usable for MPEG-PS streams */
1714 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1716 AVPacket pkt1, *pkt = &pkt1;
1717 AVStream *st;
1718 int read_size, i, ret;
1719 int64_t end_time;
1720 int64_t filesize, offset, duration;
1722 /* free previous packet */
1723 if (ic->cur_st && ic->cur_st->parser)
1724 av_free_packet(&ic->cur_pkt);
1725 ic->cur_st = NULL;
1727 /* flush packet queue */
1728 flush_packet_queue(ic);
1730 for(i=0;i<ic->nb_streams;i++) {
1731 st = ic->streams[i];
1732 if (st->parser) {
1733 av_parser_close(st->parser);
1734 st->parser= NULL;
1738 /* we read the first packets to get the first PTS (not fully
1739 accurate, but it is enough now) */
1740 url_fseek(ic->pb, 0, SEEK_SET);
1741 read_size = 0;
1742 for(;;) {
1743 if (read_size >= DURATION_MAX_READ_SIZE)
1744 break;
1745 /* if all info is available, we can stop */
1746 for(i = 0;i < ic->nb_streams; i++) {
1747 st = ic->streams[i];
1748 if (st->start_time == AV_NOPTS_VALUE)
1749 break;
1751 if (i == ic->nb_streams)
1752 break;
1754 ret = av_read_packet(ic, pkt);
1755 if (ret != 0)
1756 break;
1757 read_size += pkt->size;
1758 st = ic->streams[pkt->stream_index];
1759 if (pkt->pts != AV_NOPTS_VALUE) {
1760 if (st->start_time == AV_NOPTS_VALUE)
1761 st->start_time = pkt->pts;
1763 av_free_packet(pkt);
1766 /* estimate the end time (duration) */
1767 /* XXX: may need to support wrapping */
1768 filesize = ic->file_size;
1769 offset = filesize - DURATION_MAX_READ_SIZE;
1770 if (offset < 0)
1771 offset = 0;
1773 url_fseek(ic->pb, offset, SEEK_SET);
1774 read_size = 0;
1775 for(;;) {
1776 if (read_size >= DURATION_MAX_READ_SIZE)
1777 break;
1779 ret = av_read_packet(ic, pkt);
1780 if (ret != 0)
1781 break;
1782 read_size += pkt->size;
1783 st = ic->streams[pkt->stream_index];
1784 if (pkt->pts != AV_NOPTS_VALUE &&
1785 st->start_time != AV_NOPTS_VALUE) {
1786 end_time = pkt->pts;
1787 duration = end_time - st->start_time;
1788 if (duration > 0) {
1789 if (st->duration == AV_NOPTS_VALUE ||
1790 st->duration < duration)
1791 st->duration = duration;
1794 av_free_packet(pkt);
1797 fill_all_stream_timings(ic);
1799 url_fseek(ic->pb, old_offset, SEEK_SET);
1800 for(i=0; i<ic->nb_streams; i++){
1801 st= ic->streams[i];
1802 st->cur_dts= st->first_dts;
1803 st->last_IP_pts = AV_NOPTS_VALUE;
1807 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1809 int64_t file_size;
1811 /* get the file size, if possible */
1812 if (ic->iformat->flags & AVFMT_NOFILE) {
1813 file_size = 0;
1814 } else {
1815 file_size = url_fsize(ic->pb);
1816 if (file_size < 0)
1817 file_size = 0;
1819 ic->file_size = file_size;
1821 if ((!strcmp(ic->iformat->name, "mpeg") ||
1822 !strcmp(ic->iformat->name, "mpegts")) &&
1823 file_size && !url_is_streamed(ic->pb)) {
1824 /* get accurate estimate from the PTSes */
1825 av_estimate_timings_from_pts(ic, old_offset);
1826 } else if (av_has_duration(ic)) {
1827 /* at least one component has timings - we use them for all
1828 the components */
1829 fill_all_stream_timings(ic);
1830 } else {
1831 /* less precise: use bitrate info */
1832 av_estimate_timings_from_bit_rate(ic);
1834 av_update_stream_timings(ic);
1836 #if 0
1838 int i;
1839 AVStream *st;
1840 for(i = 0;i < ic->nb_streams; i++) {
1841 st = ic->streams[i];
1842 printf("%d: start_time: %0.3f duration: %0.3f\n",
1843 i, (double)st->start_time / AV_TIME_BASE,
1844 (double)st->duration / AV_TIME_BASE);
1846 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1847 (double)ic->start_time / AV_TIME_BASE,
1848 (double)ic->duration / AV_TIME_BASE,
1849 ic->bit_rate / 1000);
1851 #endif
1854 static int has_codec_parameters(AVCodecContext *enc)
1856 int val;
1857 switch(enc->codec_type) {
1858 case CODEC_TYPE_AUDIO:
1859 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1860 if(!enc->frame_size &&
1861 (enc->codec_id == CODEC_ID_VORBIS ||
1862 enc->codec_id == CODEC_ID_AAC))
1863 return 0;
1864 break;
1865 case CODEC_TYPE_VIDEO:
1866 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1867 break;
1868 default:
1869 val = 1;
1870 break;
1872 return enc->codec_id != CODEC_ID_NONE && val != 0;
1875 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1877 int16_t *samples;
1878 AVCodec *codec;
1879 int got_picture, data_size, ret=0;
1880 AVFrame picture;
1882 if(!st->codec->codec){
1883 codec = avcodec_find_decoder(st->codec->codec_id);
1884 if (!codec)
1885 return -1;
1886 ret = avcodec_open(st->codec, codec);
1887 if (ret < 0)
1888 return ret;
1891 if(!has_codec_parameters(st->codec)){
1892 switch(st->codec->codec_type) {
1893 case CODEC_TYPE_VIDEO:
1894 ret = avcodec_decode_video(st->codec, &picture,
1895 &got_picture, data, size);
1896 break;
1897 case CODEC_TYPE_AUDIO:
1898 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1899 samples = av_malloc(data_size);
1900 if (!samples)
1901 goto fail;
1902 ret = avcodec_decode_audio2(st->codec, samples,
1903 &data_size, data, size);
1904 av_free(samples);
1905 break;
1906 default:
1907 break;
1910 fail:
1911 return ret;
1914 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1916 while (tags->id != CODEC_ID_NONE) {
1917 if (tags->id == id)
1918 return tags->tag;
1919 tags++;
1921 return 0;
1924 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1926 int i;
1927 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1928 if(tag == tags[i].tag)
1929 return tags[i].id;
1931 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1932 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1933 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1934 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1935 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1936 return tags[i].id;
1938 return CODEC_ID_NONE;
1941 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1943 int i;
1944 for(i=0; tags && tags[i]; i++){
1945 int tag= codec_get_tag(tags[i], id);
1946 if(tag) return tag;
1948 return 0;
1951 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1953 int i;
1954 for(i=0; tags && tags[i]; i++){
1955 enum CodecID id= codec_get_id(tags[i], tag);
1956 if(id!=CODEC_ID_NONE) return id;
1958 return CODEC_ID_NONE;
1961 static void compute_chapters_end(AVFormatContext *s)
1963 unsigned int i;
1965 for (i=0; i+1<s->nb_chapters; i++)
1966 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1967 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1968 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1969 s->chapters[i]->end = s->chapters[i+1]->start;
1972 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1973 assert(s->start_time != AV_NOPTS_VALUE);
1974 assert(s->duration > 0);
1975 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1976 AV_TIME_BASE_Q,
1977 s->chapters[i]->time_base);
1981 /* absolute maximum size we read until we abort */
1982 #define MAX_READ_SIZE 5000000
1984 #define MAX_STD_TIMEBASES (60*12+5)
1985 static int get_std_framerate(int i){
1986 if(i<60*12) return i*1001;
1987 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1991 * Is the time base unreliable.
1992 * This is a heuristic to balance between quick acceptance of the values in
1993 * the headers vs. some extra checks.
1994 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1995 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1996 * And there are "variable" fps files this needs to detect as well.
1998 static int tb_unreliable(AVCodecContext *c){
1999 if( c->time_base.den >= 101L*c->time_base.num
2000 || c->time_base.den < 5L*c->time_base.num
2001 /* || c->codec_tag == ff_get_fourcc("DIVX")
2002 || c->codec_tag == ff_get_fourcc("XVID")*/
2003 || c->codec_id == CODEC_ID_MPEG2VIDEO)
2004 return 1;
2005 return 0;
2008 int av_find_stream_info(AVFormatContext *ic)
2010 int i, count, ret, read_size, j;
2011 AVStream *st;
2012 AVPacket pkt1, *pkt;
2013 int64_t last_dts[MAX_STREAMS];
2014 int duration_count[MAX_STREAMS]={0};
2015 double (*duration_error)[MAX_STD_TIMEBASES];
2016 int64_t old_offset = url_ftell(ic->pb);
2017 int64_t codec_info_duration[MAX_STREAMS]={0};
2018 int codec_info_nb_frames[MAX_STREAMS]={0};
2020 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2021 if (!duration_error) return AVERROR(ENOMEM);
2023 for(i=0;i<ic->nb_streams;i++) {
2024 st = ic->streams[i];
2025 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2026 /* if(!st->time_base.num)
2027 st->time_base= */
2028 if(!st->codec->time_base.num)
2029 st->codec->time_base= st->time_base;
2031 //only for the split stuff
2032 if (!st->parser) {
2033 st->parser = av_parser_init(st->codec->codec_id);
2034 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2035 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2040 for(i=0;i<MAX_STREAMS;i++){
2041 last_dts[i]= AV_NOPTS_VALUE;
2044 count = 0;
2045 read_size = 0;
2046 for(;;) {
2047 /* check if one codec still needs to be handled */
2048 for(i=0;i<ic->nb_streams;i++) {
2049 st = ic->streams[i];
2050 if (!has_codec_parameters(st->codec))
2051 break;
2052 /* variable fps and no guess at the real fps */
2053 if( tb_unreliable(st->codec)
2054 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2055 break;
2056 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2057 break;
2058 if(st->first_dts == AV_NOPTS_VALUE)
2059 break;
2061 if (i == ic->nb_streams) {
2062 /* NOTE: if the format has no header, then we need to read
2063 some packets to get most of the streams, so we cannot
2064 stop here */
2065 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2066 /* if we found the info for all the codecs, we can stop */
2067 ret = count;
2068 break;
2071 /* we did not get all the codec info, but we read too much data */
2072 if (read_size >= MAX_READ_SIZE) {
2073 ret = count;
2074 break;
2077 /* NOTE: a new stream can be added there if no header in file
2078 (AVFMTCTX_NOHEADER) */
2079 ret = av_read_frame_internal(ic, &pkt1);
2080 if (ret < 0) {
2081 /* EOF or error */
2082 ret = -1; /* we could not have all the codec parameters before EOF */
2083 for(i=0;i<ic->nb_streams;i++) {
2084 st = ic->streams[i];
2085 if (!has_codec_parameters(st->codec)){
2086 char buf[256];
2087 avcodec_string(buf, sizeof(buf), st->codec, 0);
2088 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2089 } else {
2090 ret = 0;
2093 break;
2096 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2097 if(av_dup_packet(pkt) < 0) {
2098 av_free(duration_error);
2099 return AVERROR(ENOMEM);
2102 read_size += pkt->size;
2104 st = ic->streams[pkt->stream_index];
2105 if(codec_info_nb_frames[st->index]>1)
2106 codec_info_duration[st->index] += pkt->duration;
2107 if (pkt->duration != 0)
2108 codec_info_nb_frames[st->index]++;
2111 int index= pkt->stream_index;
2112 int64_t last= last_dts[index];
2113 int64_t duration= pkt->dts - last;
2115 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2116 double dur= duration * av_q2d(st->time_base);
2118 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2119 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2120 if(duration_count[index] < 2)
2121 memset(duration_error[index], 0, sizeof(*duration_error));
2122 for(i=1; i<MAX_STD_TIMEBASES; i++){
2123 int framerate= get_std_framerate(i);
2124 int ticks= lrintf(dur*framerate/(1001*12));
2125 double error= dur - ticks*1001*12/(double)framerate;
2126 duration_error[index][i] += error*error;
2128 duration_count[index]++;
2130 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2131 last_dts[pkt->stream_index]= pkt->dts;
2133 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2134 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2135 if(i){
2136 st->codec->extradata_size= i;
2137 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2138 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2139 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2143 /* if still no information, we try to open the codec and to
2144 decompress the frame. We try to avoid that in most cases as
2145 it takes longer and uses more memory. For MPEG-4, we need to
2146 decompress for QuickTime. */
2147 if (!has_codec_parameters(st->codec) /*&&
2148 (st->codec->codec_id == CODEC_ID_FLV1 ||
2149 st->codec->codec_id == CODEC_ID_H264 ||
2150 st->codec->codec_id == CODEC_ID_H263 ||
2151 st->codec->codec_id == CODEC_ID_H261 ||
2152 st->codec->codec_id == CODEC_ID_VORBIS ||
2153 st->codec->codec_id == CODEC_ID_MJPEG ||
2154 st->codec->codec_id == CODEC_ID_PNG ||
2155 st->codec->codec_id == CODEC_ID_PAM ||
2156 st->codec->codec_id == CODEC_ID_PGM ||
2157 st->codec->codec_id == CODEC_ID_PGMYUV ||
2158 st->codec->codec_id == CODEC_ID_PBM ||
2159 st->codec->codec_id == CODEC_ID_PPM ||
2160 st->codec->codec_id == CODEC_ID_SHORTEN ||
2161 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2162 try_decode_frame(st, pkt->data, pkt->size);
2164 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2165 break;
2167 count++;
2170 // close codecs which were opened in try_decode_frame()
2171 for(i=0;i<ic->nb_streams;i++) {
2172 st = ic->streams[i];
2173 if(st->codec->codec)
2174 avcodec_close(st->codec);
2176 for(i=0;i<ic->nb_streams;i++) {
2177 st = ic->streams[i];
2178 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2179 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2180 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2182 if(duration_count[i]
2183 && tb_unreliable(st->codec) /*&&
2184 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2185 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2186 double best_error= 2*av_q2d(st->time_base);
2187 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2189 for(j=1; j<MAX_STD_TIMEBASES; j++){
2190 double error= duration_error[i][j] * get_std_framerate(j);
2191 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2192 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2193 if(error < best_error){
2194 best_error= error;
2195 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2200 if (!st->r_frame_rate.num){
2201 if( st->codec->time_base.den * (int64_t)st->time_base.num
2202 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2203 st->r_frame_rate.num = st->codec->time_base.den;
2204 st->r_frame_rate.den = st->codec->time_base.num;
2205 }else{
2206 st->r_frame_rate.num = st->time_base.den;
2207 st->r_frame_rate.den = st->time_base.num;
2210 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2211 if(!st->codec->bits_per_coded_sample)
2212 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2216 av_estimate_timings(ic, old_offset);
2218 compute_chapters_end(ic);
2220 #if 0
2221 /* correct DTS for B-frame streams with no timestamps */
2222 for(i=0;i<ic->nb_streams;i++) {
2223 st = ic->streams[i];
2224 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2225 if(b-frames){
2226 ppktl = &ic->packet_buffer;
2227 while(ppkt1){
2228 if(ppkt1->stream_index != i)
2229 continue;
2230 if(ppkt1->pkt->dts < 0)
2231 break;
2232 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2233 break;
2234 ppkt1->pkt->dts -= delta;
2235 ppkt1= ppkt1->next;
2237 if(ppkt1)
2238 continue;
2239 st->cur_dts -= delta;
2243 #endif
2245 av_free(duration_error);
2247 return ret;
2250 /*******************************************************/
2252 int av_read_play(AVFormatContext *s)
2254 if (s->iformat->read_play)
2255 return s->iformat->read_play(s);
2256 if (s->pb)
2257 return av_url_read_fpause(s->pb, 0);
2258 return AVERROR(ENOSYS);
2261 int av_read_pause(AVFormatContext *s)
2263 if (s->iformat->read_pause)
2264 return s->iformat->read_pause(s);
2265 if (s->pb)
2266 return av_url_read_fpause(s->pb, 1);
2267 return AVERROR(ENOSYS);
2270 void av_close_input_stream(AVFormatContext *s)
2272 int i;
2273 AVStream *st;
2275 /* free previous packet */
2276 if (s->cur_st && s->cur_st->parser)
2277 av_free_packet(&s->cur_pkt);
2279 if (s->iformat->read_close)
2280 s->iformat->read_close(s);
2281 for(i=0;i<s->nb_streams;i++) {
2282 /* free all data in a stream component */
2283 st = s->streams[i];
2284 if (st->parser) {
2285 av_parser_close(st->parser);
2287 av_free(st->index_entries);
2288 av_free(st->codec->extradata);
2289 av_free(st->codec);
2290 av_free(st->filename);
2291 av_free(st->priv_data);
2292 av_free(st);
2294 for(i=s->nb_programs-1; i>=0; i--) {
2295 av_freep(&s->programs[i]->provider_name);
2296 av_freep(&s->programs[i]->name);
2297 av_freep(&s->programs[i]->stream_index);
2298 av_freep(&s->programs[i]);
2300 av_freep(&s->programs);
2301 flush_packet_queue(s);
2302 av_freep(&s->priv_data);
2303 while(s->nb_chapters--) {
2304 av_free(s->chapters[s->nb_chapters]->title);
2305 av_free(s->chapters[s->nb_chapters]);
2307 av_freep(&s->chapters);
2308 av_free(s);
2311 void av_close_input_file(AVFormatContext *s)
2313 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2314 av_close_input_stream(s);
2315 if (pb)
2316 url_fclose(pb);
2319 AVStream *av_new_stream(AVFormatContext *s, int id)
2321 AVStream *st;
2322 int i;
2324 if (s->nb_streams >= MAX_STREAMS)
2325 return NULL;
2327 st = av_mallocz(sizeof(AVStream));
2328 if (!st)
2329 return NULL;
2331 st->codec= avcodec_alloc_context();
2332 if (s->iformat) {
2333 /* no default bitrate if decoding */
2334 st->codec->bit_rate = 0;
2336 st->index = s->nb_streams;
2337 st->id = id;
2338 st->start_time = AV_NOPTS_VALUE;
2339 st->duration = AV_NOPTS_VALUE;
2340 /* we set the current DTS to 0 so that formats without any timestamps
2341 but durations get some timestamps, formats with some unknown
2342 timestamps have their first few packets buffered and the
2343 timestamps corrected before they are returned to the user */
2344 st->cur_dts = 0;
2345 st->first_dts = AV_NOPTS_VALUE;
2347 /* default pts setting is MPEG-like */
2348 av_set_pts_info(st, 33, 1, 90000);
2349 st->last_IP_pts = AV_NOPTS_VALUE;
2350 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2351 st->pts_buffer[i]= AV_NOPTS_VALUE;
2353 st->sample_aspect_ratio = (AVRational){0,1};
2355 s->streams[s->nb_streams++] = st;
2356 return st;
2359 AVProgram *av_new_program(AVFormatContext *ac, int id)
2361 AVProgram *program=NULL;
2362 int i;
2364 #ifdef DEBUG_SI
2365 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2366 #endif
2368 for(i=0; i<ac->nb_programs; i++)
2369 if(ac->programs[i]->id == id)
2370 program = ac->programs[i];
2372 if(!program){
2373 program = av_mallocz(sizeof(AVProgram));
2374 if (!program)
2375 return NULL;
2376 dynarray_add(&ac->programs, &ac->nb_programs, program);
2377 program->discard = AVDISCARD_NONE;
2379 program->id = id;
2381 return program;
2384 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2386 assert(!provider_name == !name);
2387 if(name){
2388 av_free(program->provider_name);
2389 av_free(program-> name);
2390 program->provider_name = av_strdup(provider_name);
2391 program-> name = av_strdup( name);
2395 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2397 AVChapter *chapter = NULL;
2398 int i;
2400 for(i=0; i<s->nb_chapters; i++)
2401 if(s->chapters[i]->id == id)
2402 chapter = s->chapters[i];
2404 if(!chapter){
2405 chapter= av_mallocz(sizeof(AVChapter));
2406 if(!chapter)
2407 return NULL;
2408 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2410 av_free(chapter->title);
2411 chapter->title = av_strdup(title);
2412 chapter->id = id;
2413 chapter->time_base= time_base;
2414 chapter->start = start;
2415 chapter->end = end;
2417 return chapter;
2420 /************************************************************/
2421 /* output media file */
2423 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2425 int ret;
2427 if (s->oformat->priv_data_size > 0) {
2428 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2429 if (!s->priv_data)
2430 return AVERROR(ENOMEM);
2431 } else
2432 s->priv_data = NULL;
2434 if (s->oformat->set_parameters) {
2435 ret = s->oformat->set_parameters(s, ap);
2436 if (ret < 0)
2437 return ret;
2439 return 0;
2442 int av_write_header(AVFormatContext *s)
2444 int ret, i;
2445 AVStream *st;
2447 // some sanity checks
2448 for(i=0;i<s->nb_streams;i++) {
2449 st = s->streams[i];
2451 switch (st->codec->codec_type) {
2452 case CODEC_TYPE_AUDIO:
2453 if(st->codec->sample_rate<=0){
2454 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2455 return -1;
2457 if(!st->codec->block_align)
2458 st->codec->block_align = st->codec->channels *
2459 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2460 break;
2461 case CODEC_TYPE_VIDEO:
2462 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2463 av_log(s, AV_LOG_ERROR, "time base not set\n");
2464 return -1;
2466 if(st->codec->width<=0 || st->codec->height<=0){
2467 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2468 return -1;
2470 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2471 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2472 return -1;
2474 break;
2477 if(s->oformat->codec_tag){
2478 if(st->codec->codec_tag){
2479 //FIXME
2480 //check that tag + id is in the table
2481 //if neither is in the table -> OK
2482 //if tag is in the table with another id -> FAIL
2483 //if id is in the table with another tag -> FAIL unless strict < ?
2484 }else
2485 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2489 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2490 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2491 if (!s->priv_data)
2492 return AVERROR(ENOMEM);
2495 if(s->oformat->write_header){
2496 ret = s->oformat->write_header(s);
2497 if (ret < 0)
2498 return ret;
2501 /* init PTS generation */
2502 for(i=0;i<s->nb_streams;i++) {
2503 int64_t den = AV_NOPTS_VALUE;
2504 st = s->streams[i];
2506 switch (st->codec->codec_type) {
2507 case CODEC_TYPE_AUDIO:
2508 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2509 break;
2510 case CODEC_TYPE_VIDEO:
2511 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2512 break;
2513 default:
2514 break;
2516 if (den != AV_NOPTS_VALUE) {
2517 if (den <= 0)
2518 return AVERROR_INVALIDDATA;
2519 av_frac_init(&st->pts, 0, 0, den);
2522 return 0;
2525 //FIXME merge with compute_pkt_fields
2526 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2527 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2528 int num, den, frame_size, i;
2530 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2532 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2533 return -1;*/
2535 /* duration field */
2536 if (pkt->duration == 0) {
2537 compute_frame_duration(&num, &den, st, NULL, pkt);
2538 if (den && num) {
2539 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2543 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2544 pkt->pts= pkt->dts;
2546 //XXX/FIXME this is a temporary hack until all encoders output pts
2547 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2548 pkt->dts=
2549 // pkt->pts= st->cur_dts;
2550 pkt->pts= st->pts.val;
2553 //calculate dts from pts
2554 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2555 st->pts_buffer[0]= pkt->pts;
2556 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2557 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2558 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2559 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2561 pkt->dts= st->pts_buffer[0];
2564 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2565 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2566 return -1;
2568 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2569 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2570 return -1;
2573 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2574 st->cur_dts= pkt->dts;
2575 st->pts.val= pkt->dts;
2577 /* update pts */
2578 switch (st->codec->codec_type) {
2579 case CODEC_TYPE_AUDIO:
2580 frame_size = get_audio_frame_size(st->codec, pkt->size);
2582 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2583 likely equal to the encoder delay, but it would be better if we
2584 had the real timestamps from the encoder */
2585 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2586 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2588 break;
2589 case CODEC_TYPE_VIDEO:
2590 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2591 break;
2592 default:
2593 break;
2595 return 0;
2598 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2600 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2602 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2603 return ret;
2605 ret= s->oformat->write_packet(s, pkt);
2606 if(!ret)
2607 ret= url_ferror(s->pb);
2608 return ret;
2611 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2612 AVPacketList *pktl, **next_point, *this_pktl;
2613 int stream_count=0;
2614 int streams[MAX_STREAMS];
2616 if(pkt){
2617 AVStream *st= s->streams[ pkt->stream_index];
2619 // assert(pkt->destruct != av_destruct_packet); //FIXME
2621 this_pktl = av_mallocz(sizeof(AVPacketList));
2622 this_pktl->pkt= *pkt;
2623 if(pkt->destruct == av_destruct_packet)
2624 pkt->destruct= NULL; // not shared -> must keep original from being freed
2625 else
2626 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2628 next_point = &s->packet_buffer;
2629 while(*next_point){
2630 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2631 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2632 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2633 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2634 break;
2635 next_point= &(*next_point)->next;
2637 this_pktl->next= *next_point;
2638 *next_point= this_pktl;
2641 memset(streams, 0, sizeof(streams));
2642 pktl= s->packet_buffer;
2643 while(pktl){
2644 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2645 if(streams[ pktl->pkt.stream_index ] == 0)
2646 stream_count++;
2647 streams[ pktl->pkt.stream_index ]++;
2648 pktl= pktl->next;
2651 if(stream_count && (s->nb_streams == stream_count || flush)){
2652 pktl= s->packet_buffer;
2653 *out= pktl->pkt;
2655 s->packet_buffer= pktl->next;
2656 av_freep(&pktl);
2657 return 1;
2658 }else{
2659 av_init_packet(out);
2660 return 0;
2665 * Interleaves an AVPacket correctly so it can be muxed.
2666 * @param out the interleaved packet will be output here
2667 * @param in the input packet
2668 * @param flush 1 if no further packets are available as input and all
2669 * remaining packets should be output
2670 * @return 1 if a packet was output, 0 if no packet could be output,
2671 * < 0 if an error occurred
2673 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2674 if(s->oformat->interleave_packet)
2675 return s->oformat->interleave_packet(s, out, in, flush);
2676 else
2677 return av_interleave_packet_per_dts(s, out, in, flush);
2680 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2681 AVStream *st= s->streams[ pkt->stream_index];
2683 //FIXME/XXX/HACK drop zero sized packets
2684 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2685 return 0;
2687 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2688 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2689 return -1;
2691 if(pkt->dts == AV_NOPTS_VALUE)
2692 return -1;
2694 for(;;){
2695 AVPacket opkt;
2696 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2697 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2698 return ret;
2700 ret= s->oformat->write_packet(s, &opkt);
2702 av_free_packet(&opkt);
2703 pkt= NULL;
2705 if(ret<0)
2706 return ret;
2707 if(url_ferror(s->pb))
2708 return url_ferror(s->pb);
2712 int av_write_trailer(AVFormatContext *s)
2714 int ret, i;
2716 for(;;){
2717 AVPacket pkt;
2718 ret= av_interleave_packet(s, &pkt, NULL, 1);
2719 if(ret<0) //FIXME cleanup needed for ret<0 ?
2720 goto fail;
2721 if(!ret)
2722 break;
2724 ret= s->oformat->write_packet(s, &pkt);
2726 av_free_packet(&pkt);
2728 if(ret<0)
2729 goto fail;
2730 if(url_ferror(s->pb))
2731 goto fail;
2734 if(s->oformat->write_trailer)
2735 ret = s->oformat->write_trailer(s);
2736 fail:
2737 if(ret == 0)
2738 ret=url_ferror(s->pb);
2739 for(i=0;i<s->nb_streams;i++)
2740 av_freep(&s->streams[i]->priv_data);
2741 av_freep(&s->priv_data);
2742 return ret;
2745 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2747 int i, j;
2748 AVProgram *program=NULL;
2749 void *tmp;
2751 for(i=0; i<ac->nb_programs; i++){
2752 if(ac->programs[i]->id != progid)
2753 continue;
2754 program = ac->programs[i];
2755 for(j=0; j<program->nb_stream_indexes; j++)
2756 if(program->stream_index[j] == idx)
2757 return;
2759 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2760 if(!tmp)
2761 return;
2762 program->stream_index = tmp;
2763 program->stream_index[program->nb_stream_indexes++] = idx;
2764 return;
2768 /* "user interface" functions */
2769 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2771 char buf[256];
2772 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2773 AVStream *st = ic->streams[i];
2774 int g = ff_gcd(st->time_base.num, st->time_base.den);
2775 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2776 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2777 /* the pid is an important information, so we display it */
2778 /* XXX: add a generic system */
2779 if (flags & AVFMT_SHOW_IDS)
2780 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2781 if (strlen(st->language) > 0)
2782 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2783 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2784 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2785 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2786 if(st->r_frame_rate.den && st->r_frame_rate.num)
2787 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2788 /* else if(st->time_base.den && st->time_base.num)
2789 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2790 else
2791 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2793 av_log(NULL, AV_LOG_INFO, "\n");
2796 void dump_format(AVFormatContext *ic,
2797 int index,
2798 const char *url,
2799 int is_output)
2801 int i;
2803 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2804 is_output ? "Output" : "Input",
2805 index,
2806 is_output ? ic->oformat->name : ic->iformat->name,
2807 is_output ? "to" : "from", url);
2808 if (!is_output) {
2809 av_log(NULL, AV_LOG_INFO, " Duration: ");
2810 if (ic->duration != AV_NOPTS_VALUE) {
2811 int hours, mins, secs, us;
2812 secs = ic->duration / AV_TIME_BASE;
2813 us = ic->duration % AV_TIME_BASE;
2814 mins = secs / 60;
2815 secs %= 60;
2816 hours = mins / 60;
2817 mins %= 60;
2818 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2819 (100 * us) / AV_TIME_BASE);
2820 } else {
2821 av_log(NULL, AV_LOG_INFO, "N/A");
2823 if (ic->start_time != AV_NOPTS_VALUE) {
2824 int secs, us;
2825 av_log(NULL, AV_LOG_INFO, ", start: ");
2826 secs = ic->start_time / AV_TIME_BASE;
2827 us = ic->start_time % AV_TIME_BASE;
2828 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2829 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2831 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2832 if (ic->bit_rate) {
2833 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2834 } else {
2835 av_log(NULL, AV_LOG_INFO, "N/A");
2837 av_log(NULL, AV_LOG_INFO, "\n");
2839 if(ic->nb_programs) {
2840 int j, k;
2841 for(j=0; j<ic->nb_programs; j++) {
2842 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2843 ic->programs[j]->name ? ic->programs[j]->name : "");
2844 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2845 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2847 } else
2848 for(i=0;i<ic->nb_streams;i++)
2849 dump_stream_format(ic, i, index, is_output);
2852 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2854 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2857 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2859 AVRational frame_rate;
2860 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2861 *frame_rate_num= frame_rate.num;
2862 *frame_rate_den= frame_rate.den;
2863 return ret;
2866 int64_t av_gettime(void)
2868 struct timeval tv;
2869 gettimeofday(&tv,NULL);
2870 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2873 int64_t parse_date(const char *datestr, int duration)
2875 const char *p;
2876 int64_t t;
2877 struct tm dt;
2878 int i;
2879 static const char * const date_fmt[] = {
2880 "%Y-%m-%d",
2881 "%Y%m%d",
2883 static const char * const time_fmt[] = {
2884 "%H:%M:%S",
2885 "%H%M%S",
2887 const char *q;
2888 int is_utc, len;
2889 char lastch;
2890 int negative = 0;
2892 #undef time
2893 time_t now = time(0);
2895 len = strlen(datestr);
2896 if (len > 0)
2897 lastch = datestr[len - 1];
2898 else
2899 lastch = '\0';
2900 is_utc = (lastch == 'z' || lastch == 'Z');
2902 memset(&dt, 0, sizeof(dt));
2904 p = datestr;
2905 q = NULL;
2906 if (!duration) {
2907 /* parse the year-month-day part */
2908 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2909 q = small_strptime(p, date_fmt[i], &dt);
2910 if (q) {
2911 break;
2915 /* if the year-month-day part is missing, then take the
2916 * current year-month-day time */
2917 if (!q) {
2918 if (is_utc) {
2919 dt = *gmtime(&now);
2920 } else {
2921 dt = *localtime(&now);
2923 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2924 } else {
2925 p = q;
2928 if (*p == 'T' || *p == 't' || *p == ' ')
2929 p++;
2931 /* parse the hour-minute-second part */
2932 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2933 q = small_strptime(p, time_fmt[i], &dt);
2934 if (q) {
2935 break;
2938 } else {
2939 /* parse datestr as a duration */
2940 if (p[0] == '-') {
2941 negative = 1;
2942 ++p;
2944 /* parse datestr as HH:MM:SS */
2945 q = small_strptime(p, time_fmt[0], &dt);
2946 if (!q) {
2947 /* parse datestr as S+ */
2948 dt.tm_sec = strtol(p, (char **)&q, 10);
2949 if (q == p)
2950 /* the parsing didn't succeed */
2951 return INT64_MIN;
2952 dt.tm_min = 0;
2953 dt.tm_hour = 0;
2957 /* Now we have all the fields that we can get */
2958 if (!q) {
2959 return INT64_MIN;
2962 if (duration) {
2963 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2964 } else {
2965 dt.tm_isdst = -1; /* unknown */
2966 if (is_utc) {
2967 t = mktimegm(&dt);
2968 } else {
2969 t = mktime(&dt);
2973 t *= 1000000;
2975 /* parse the .m... part */
2976 if (*q == '.') {
2977 int val, n;
2978 q++;
2979 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2980 if (!isdigit(*q))
2981 break;
2982 val += n * (*q - '0');
2984 t += val;
2986 return negative ? -t : t;
2989 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2991 const char *p;
2992 char tag[128], *q;
2994 p = info;
2995 if (*p == '?')
2996 p++;
2997 for(;;) {
2998 q = tag;
2999 while (*p != '\0' && *p != '=' && *p != '&') {
3000 if ((q - tag) < sizeof(tag) - 1)
3001 *q++ = *p;
3002 p++;
3004 *q = '\0';
3005 q = arg;
3006 if (*p == '=') {
3007 p++;
3008 while (*p != '&' && *p != '\0') {
3009 if ((q - arg) < arg_size - 1) {
3010 if (*p == '+')
3011 *q++ = ' ';
3012 else
3013 *q++ = *p;
3015 p++;
3017 *q = '\0';
3019 if (!strcmp(tag, tag1))
3020 return 1;
3021 if (*p != '&')
3022 break;
3023 p++;
3025 return 0;
3028 int av_get_frame_filename(char *buf, int buf_size,
3029 const char *path, int number)
3031 const char *p;
3032 char *q, buf1[20], c;
3033 int nd, len, percentd_found;
3035 q = buf;
3036 p = path;
3037 percentd_found = 0;
3038 for(;;) {
3039 c = *p++;
3040 if (c == '\0')
3041 break;
3042 if (c == '%') {
3043 do {
3044 nd = 0;
3045 while (isdigit(*p)) {
3046 nd = nd * 10 + *p++ - '0';
3048 c = *p++;
3049 } while (isdigit(c));
3051 switch(c) {
3052 case '%':
3053 goto addchar;
3054 case 'd':
3055 if (percentd_found)
3056 goto fail;
3057 percentd_found = 1;
3058 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3059 len = strlen(buf1);
3060 if ((q - buf + len) > buf_size - 1)
3061 goto fail;
3062 memcpy(q, buf1, len);
3063 q += len;
3064 break;
3065 default:
3066 goto fail;
3068 } else {
3069 addchar:
3070 if ((q - buf) < buf_size - 1)
3071 *q++ = c;
3074 if (!percentd_found)
3075 goto fail;
3076 *q = '\0';
3077 return 0;
3078 fail:
3079 *q = '\0';
3080 return -1;
3083 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3085 int len, i, j, c;
3086 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3088 for(i=0;i<size;i+=16) {
3089 len = size - i;
3090 if (len > 16)
3091 len = 16;
3092 PRINT("%08x ", i);
3093 for(j=0;j<16;j++) {
3094 if (j < len)
3095 PRINT(" %02x", buf[i+j]);
3096 else
3097 PRINT(" ");
3099 PRINT(" ");
3100 for(j=0;j<len;j++) {
3101 c = buf[i+j];
3102 if (c < ' ' || c > '~')
3103 c = '.';
3104 PRINT("%c", c);
3106 PRINT("\n");
3108 #undef PRINT
3111 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3113 hex_dump_internal(NULL, f, 0, buf, size);
3116 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3118 hex_dump_internal(avcl, NULL, level, buf, size);
3121 //FIXME needs to know the time_base
3122 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3124 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3125 PRINT("stream #%d:\n", pkt->stream_index);
3126 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3127 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3128 /* DTS is _always_ valid after av_read_frame() */
3129 PRINT(" dts=");
3130 if (pkt->dts == AV_NOPTS_VALUE)
3131 PRINT("N/A");
3132 else
3133 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3134 /* PTS may not be known if B-frames are present. */
3135 PRINT(" pts=");
3136 if (pkt->pts == AV_NOPTS_VALUE)
3137 PRINT("N/A");
3138 else
3139 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3140 PRINT("\n");
3141 PRINT(" size=%d\n", pkt->size);
3142 #undef PRINT
3143 if (dump_payload)
3144 av_hex_dump(f, pkt->data, pkt->size);
3147 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3149 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3152 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3154 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3157 void url_split(char *proto, int proto_size,
3158 char *authorization, int authorization_size,
3159 char *hostname, int hostname_size,
3160 int *port_ptr,
3161 char *path, int path_size,
3162 const char *url)
3164 const char *p, *ls, *at, *col, *brk;
3166 if (port_ptr) *port_ptr = -1;
3167 if (proto_size > 0) proto[0] = 0;
3168 if (authorization_size > 0) authorization[0] = 0;
3169 if (hostname_size > 0) hostname[0] = 0;
3170 if (path_size > 0) path[0] = 0;
3172 /* parse protocol */
3173 if ((p = strchr(url, ':'))) {
3174 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3175 p++; /* skip ':' */
3176 if (*p == '/') p++;
3177 if (*p == '/') p++;
3178 } else {
3179 /* no protocol means plain filename */
3180 av_strlcpy(path, url, path_size);
3181 return;
3184 /* separate path from hostname */
3185 ls = strchr(p, '/');
3186 if(!ls)
3187 ls = strchr(p, '?');
3188 if(ls)
3189 av_strlcpy(path, ls, path_size);
3190 else
3191 ls = &p[strlen(p)]; // XXX
3193 /* the rest is hostname, use that to parse auth/port */
3194 if (ls != p) {
3195 /* authorization (user[:pass]@hostname) */
3196 if ((at = strchr(p, '@')) && at < ls) {
3197 av_strlcpy(authorization, p,
3198 FFMIN(authorization_size, at + 1 - p));
3199 p = at + 1; /* skip '@' */
3202 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3203 /* [host]:port */
3204 av_strlcpy(hostname, p + 1,
3205 FFMIN(hostname_size, brk - p));
3206 if (brk[1] == ':' && port_ptr)
3207 *port_ptr = atoi(brk + 2);
3208 } else if ((col = strchr(p, ':')) && col < ls) {
3209 av_strlcpy(hostname, p,
3210 FFMIN(col + 1 - p, hostname_size));
3211 if (port_ptr) *port_ptr = atoi(col + 1);
3212 } else
3213 av_strlcpy(hostname, p,
3214 FFMIN(ls + 1 - p, hostname_size));
3218 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3220 int i;
3221 static const char hex_table[16] = { '0', '1', '2', '3',
3222 '4', '5', '6', '7',
3223 '8', '9', 'A', 'B',
3224 'C', 'D', 'E', 'F' };
3226 for(i = 0; i < s; i++) {
3227 buff[i * 2] = hex_table[src[i] >> 4];
3228 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3231 return buff;
3234 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3235 int pts_num, int pts_den)
3237 unsigned int gcd= ff_gcd(pts_num, pts_den);
3238 s->pts_wrap_bits = pts_wrap_bits;
3239 s->time_base.num = pts_num/gcd;
3240 s->time_base.den = pts_den/gcd;
3242 if(gcd>1)
3243 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);