Parse the OpaqueData field for every AVStream represented by this "set of
[ffmpeg-lucabe.git] / libavformat / utils.c
blobeb5549979f5b5a9588feba1fdd2663d2431cab75
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "metadata.h"
25 #include "libavutil/avstring.h"
26 #include "riff.h"
27 #include <sys/time.h>
28 #include <time.h>
29 #include <strings.h>
31 #undef NDEBUG
32 #include <assert.h>
34 /**
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
46 /**
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
58 num += (den >> 1);
59 if (num >= den) {
60 val += num / den;
61 num = num % den;
63 f->val = val;
64 f->num = num;
65 f->den = den;
68 /**
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
76 int64_t num, den;
78 num = f->num + incr;
79 den = f->den;
80 if (num < 0) {
81 f->val += num / den;
82 num = num % den;
83 if (num < 0) {
84 num += den;
85 f->val--;
87 } else if (num >= den) {
88 f->val += num / den;
89 num = num % den;
91 f->num = num;
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
113 AVInputFormat **p;
114 p = &first_iformat;
115 while (*p != NULL) p = &(*p)->next;
116 *p = format;
117 format->next = NULL;
120 void av_register_output_format(AVOutputFormat *format)
122 AVOutputFormat **p;
123 p = &first_oformat;
124 while (*p != NULL) p = &(*p)->next;
125 *p = format;
126 format->next = NULL;
129 int match_ext(const char *filename, const char *extensions)
131 const char *ext, *p;
132 char ext1[32], *q;
134 if(!filename)
135 return 0;
137 ext = strrchr(filename, '.');
138 if (ext) {
139 ext++;
140 p = extensions;
141 for(;;) {
142 q = ext1;
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 *q++ = *p++;
145 *q = '\0';
146 if (!strcasecmp(ext1, ext))
147 return 1;
148 if (*p == '\0')
149 break;
150 p++;
153 return 0;
156 AVOutputFormat *guess_format(const char *short_name, const char *filename,
157 const char *mime_type)
159 AVOutputFormat *fmt, *fmt_found;
160 int score_max, score;
162 /* specific test for image sequences */
163 #ifdef CONFIG_IMAGE2_MUXER
164 if (!short_name && filename &&
165 av_filename_number_test(filename) &&
166 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
167 return guess_format("image2", NULL, NULL);
169 #endif
170 /* Find the proper file type. */
171 fmt_found = NULL;
172 score_max = 0;
173 fmt = first_oformat;
174 while (fmt != NULL) {
175 score = 0;
176 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
177 score += 100;
178 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
179 score += 10;
180 if (filename && fmt->extensions &&
181 match_ext(filename, fmt->extensions)) {
182 score += 5;
184 if (score > score_max) {
185 score_max = score;
186 fmt_found = fmt;
188 fmt = fmt->next;
190 return fmt_found;
193 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
194 const char *mime_type)
196 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
198 if (fmt) {
199 AVOutputFormat *stream_fmt;
200 char stream_format_name[64];
202 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
203 stream_fmt = guess_format(stream_format_name, NULL, NULL);
205 if (stream_fmt)
206 fmt = stream_fmt;
209 return fmt;
212 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
213 const char *filename, const char *mime_type, enum CodecType type){
214 if(type == CODEC_TYPE_VIDEO){
215 enum CodecID codec_id= CODEC_ID_NONE;
217 #ifdef CONFIG_IMAGE2_MUXER
218 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
219 codec_id= av_guess_image2_codec(filename);
221 #endif
222 if(codec_id == CODEC_ID_NONE)
223 codec_id= fmt->video_codec;
224 return codec_id;
225 }else if(type == CODEC_TYPE_AUDIO)
226 return fmt->audio_codec;
227 else
228 return CODEC_ID_NONE;
231 AVInputFormat *av_find_input_format(const char *short_name)
233 AVInputFormat *fmt;
234 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
235 if (!strcmp(fmt->name, short_name))
236 return fmt;
238 return NULL;
241 /* memory handling */
243 void av_destruct_packet(AVPacket *pkt)
245 av_free(pkt->data);
246 pkt->data = NULL; pkt->size = 0;
249 void av_init_packet(AVPacket *pkt)
251 pkt->pts = AV_NOPTS_VALUE;
252 pkt->dts = AV_NOPTS_VALUE;
253 pkt->pos = -1;
254 pkt->duration = 0;
255 pkt->convergence_duration = 0;
256 pkt->flags = 0;
257 pkt->stream_index = 0;
258 pkt->destruct= av_destruct_packet_nofree;
261 int av_new_packet(AVPacket *pkt, int size)
263 uint8_t *data;
264 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
265 return AVERROR(ENOMEM);
266 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
267 if (!data)
268 return AVERROR(ENOMEM);
269 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
271 av_init_packet(pkt);
272 pkt->data = data;
273 pkt->size = size;
274 pkt->destruct = av_destruct_packet;
275 return 0;
278 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
280 int ret= av_new_packet(pkt, size);
282 if(ret<0)
283 return ret;
285 pkt->pos= url_ftell(s);
287 ret= get_buffer(s, pkt->data, size);
288 if(ret<=0)
289 av_free_packet(pkt);
290 else
291 pkt->size= ret;
293 return ret;
296 int av_dup_packet(AVPacket *pkt)
298 if (pkt->destruct != av_destruct_packet) {
299 uint8_t *data;
300 /* We duplicate the packet and don't forget to add the padding again. */
301 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
302 return AVERROR(ENOMEM);
303 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
304 if (!data) {
305 return AVERROR(ENOMEM);
307 memcpy(data, pkt->data, pkt->size);
308 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
309 pkt->data = data;
310 pkt->destruct = av_destruct_packet;
312 return 0;
315 int av_filename_number_test(const char *filename)
317 char buf[1024];
318 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
321 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
323 AVInputFormat *fmt1, *fmt;
324 int score;
326 fmt = NULL;
327 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
328 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
329 continue;
330 score = 0;
331 if (fmt1->read_probe) {
332 score = fmt1->read_probe(pd);
333 } else if (fmt1->extensions) {
334 if (match_ext(pd->filename, fmt1->extensions)) {
335 score = 50;
338 if (score > *score_max) {
339 *score_max = score;
340 fmt = fmt1;
341 }else if (score == *score_max)
342 fmt = NULL;
344 return fmt;
347 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
348 int score=0;
349 return av_probe_input_format2(pd, is_opened, &score);
352 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
354 AVInputFormat *fmt;
355 fmt = av_probe_input_format2(pd, 1, &score);
357 if (fmt) {
358 if (!strcmp(fmt->name, "mp3")) {
359 st->codec->codec_id = CODEC_ID_MP3;
360 st->codec->codec_type = CODEC_TYPE_AUDIO;
361 } else if (!strcmp(fmt->name, "ac3")) {
362 st->codec->codec_id = CODEC_ID_AC3;
363 st->codec->codec_type = CODEC_TYPE_AUDIO;
364 } else if (!strcmp(fmt->name, "mpegvideo")) {
365 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
366 st->codec->codec_type = CODEC_TYPE_VIDEO;
367 } else if (!strcmp(fmt->name, "m4v")) {
368 st->codec->codec_id = CODEC_ID_MPEG4;
369 st->codec->codec_type = CODEC_TYPE_VIDEO;
370 } else if (!strcmp(fmt->name, "h264")) {
371 st->codec->codec_id = CODEC_ID_H264;
372 st->codec->codec_type = CODEC_TYPE_VIDEO;
375 return !!fmt;
378 /************************************************************/
379 /* input media file */
382 * Open a media file from an IO stream. 'fmt' must be specified.
384 static const char* format_to_name(void* ptr)
386 AVFormatContext* fc = (AVFormatContext*) ptr;
387 if(fc->iformat) return fc->iformat->name;
388 else if(fc->oformat) return fc->oformat->name;
389 else return "NULL";
392 #define OFFSET(x) offsetof(AVFormatContext,x)
393 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
394 //these names are too long to be readable
395 #define E AV_OPT_FLAG_ENCODING_PARAM
396 #define D AV_OPT_FLAG_DECODING_PARAM
398 static const AVOption options[]={
399 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
400 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
401 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
402 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
403 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
404 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
405 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
406 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
407 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
408 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
409 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
410 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
411 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
412 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
413 {NULL},
416 #undef E
417 #undef D
418 #undef DEFAULT
420 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
422 static void avformat_get_context_defaults(AVFormatContext *s)
424 memset(s, 0, sizeof(AVFormatContext));
426 s->av_class = &av_format_context_class;
428 av_opt_set_defaults(s);
431 AVFormatContext *av_alloc_format_context(void)
433 AVFormatContext *ic;
434 ic = av_malloc(sizeof(AVFormatContext));
435 if (!ic) return ic;
436 avformat_get_context_defaults(ic);
437 ic->av_class = &av_format_context_class;
438 return ic;
441 int av_open_input_stream(AVFormatContext **ic_ptr,
442 ByteIOContext *pb, const char *filename,
443 AVInputFormat *fmt, AVFormatParameters *ap)
445 int err;
446 AVFormatContext *ic;
447 AVFormatParameters default_ap;
449 if(!ap){
450 ap=&default_ap;
451 memset(ap, 0, sizeof(default_ap));
454 if(!ap->prealloced_context)
455 ic = av_alloc_format_context();
456 else
457 ic = *ic_ptr;
458 if (!ic) {
459 err = AVERROR(ENOMEM);
460 goto fail;
462 ic->iformat = fmt;
463 ic->pb = pb;
464 ic->duration = AV_NOPTS_VALUE;
465 ic->start_time = AV_NOPTS_VALUE;
466 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
468 /* allocate private data */
469 if (fmt->priv_data_size > 0) {
470 ic->priv_data = av_mallocz(fmt->priv_data_size);
471 if (!ic->priv_data) {
472 err = AVERROR(ENOMEM);
473 goto fail;
475 } else {
476 ic->priv_data = NULL;
479 if (ic->iformat->read_header) {
480 err = ic->iformat->read_header(ic, ap);
481 if (err < 0)
482 goto fail;
485 if (pb && !ic->data_offset)
486 ic->data_offset = url_ftell(ic->pb);
488 *ic_ptr = ic;
489 return 0;
490 fail:
491 if (ic) {
492 int i;
493 av_freep(&ic->priv_data);
494 for(i=0;i<ic->nb_streams;i++) {
495 AVStream *st = ic->streams[i];
496 if (st) {
497 av_free(st->priv_data);
498 av_free(st->codec->extradata);
500 av_free(st);
503 av_free(ic);
504 *ic_ptr = NULL;
505 return err;
508 /** size of probe buffer, for guessing file type from file contents */
509 #define PROBE_BUF_MIN 2048
510 #define PROBE_BUF_MAX (1<<20)
512 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
513 AVInputFormat *fmt,
514 int buf_size,
515 AVFormatParameters *ap)
517 int err, probe_size;
518 AVProbeData probe_data, *pd = &probe_data;
519 ByteIOContext *pb = NULL;
521 pd->filename = "";
522 if (filename)
523 pd->filename = filename;
524 pd->buf = NULL;
525 pd->buf_size = 0;
527 if (!fmt) {
528 /* guess format if no file can be opened */
529 fmt = av_probe_input_format(pd, 0);
532 /* Do not open file if the format does not need it. XXX: specific
533 hack needed to handle RTSP/TCP */
534 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
535 /* if no file needed do not try to open one */
536 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
537 goto fail;
539 if (buf_size > 0) {
540 url_setbufsize(pb, buf_size);
543 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
544 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
545 /* read probe data */
546 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
547 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
548 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
549 if (url_fseek(pb, 0, SEEK_SET) < 0) {
550 url_fclose(pb);
551 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
552 pb = NULL;
553 err = AVERROR(EIO);
554 goto fail;
557 /* guess file format */
558 fmt = av_probe_input_format2(pd, 1, &score);
560 av_freep(&pd->buf);
563 /* if still no format found, error */
564 if (!fmt) {
565 err = AVERROR_NOFMT;
566 goto fail;
569 /* check filename in case an image number is expected */
570 if (fmt->flags & AVFMT_NEEDNUMBER) {
571 if (!av_filename_number_test(filename)) {
572 err = AVERROR_NUMEXPECTED;
573 goto fail;
576 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
577 if (err)
578 goto fail;
579 return 0;
580 fail:
581 av_freep(&pd->buf);
582 if (pb)
583 url_fclose(pb);
584 *ic_ptr = NULL;
585 return err;
589 /*******************************************************/
591 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
592 AVPacketList **plast_pktl){
593 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
594 if (!pktl)
595 return NULL;
597 if (*packet_buffer)
598 (*plast_pktl)->next = pktl;
599 else
600 *packet_buffer = pktl;
602 /* add the packet in the buffered packet list */
603 *plast_pktl = pktl;
604 pktl->pkt= *pkt;
605 return &pktl->pkt;
608 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
610 int ret;
611 AVStream *st;
613 for(;;){
614 AVPacketList *pktl = s->raw_packet_buffer;
616 if (pktl) {
617 *pkt = pktl->pkt;
618 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
619 s->raw_packet_buffer = pktl->next;
620 av_free(pktl);
621 return 0;
625 av_init_packet(pkt);
626 ret= s->iformat->read_packet(s, pkt);
627 if (ret < 0)
628 return ret;
629 st= s->streams[pkt->stream_index];
631 switch(st->codec->codec_type){
632 case CODEC_TYPE_VIDEO:
633 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
634 break;
635 case CODEC_TYPE_AUDIO:
636 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
637 break;
638 case CODEC_TYPE_SUBTITLE:
639 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
640 break;
643 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
644 return ret;
646 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
648 if(st->codec->codec_id == CODEC_ID_PROBE){
649 AVProbeData *pd = &st->probe_data;
651 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
652 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
653 pd->buf_size += pkt->size;
654 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
656 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
657 set_codec_from_probe_data(st, pd, 1);
658 if(st->codec->codec_id != CODEC_ID_PROBE){
659 pd->buf_size=0;
660 av_freep(&pd->buf);
667 /**********************************************************/
670 * Get the number of samples of an audio frame. Return -1 on error.
672 static int get_audio_frame_size(AVCodecContext *enc, int size)
674 int frame_size;
676 if(enc->codec_id == CODEC_ID_VORBIS)
677 return -1;
679 if (enc->frame_size <= 1) {
680 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
682 if (bits_per_sample) {
683 if (enc->channels == 0)
684 return -1;
685 frame_size = (size << 3) / (bits_per_sample * enc->channels);
686 } else {
687 /* used for example by ADPCM codecs */
688 if (enc->bit_rate == 0)
689 return -1;
690 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
692 } else {
693 frame_size = enc->frame_size;
695 return frame_size;
700 * Return the frame duration in seconds. Return 0 if not available.
702 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
703 AVCodecParserContext *pc, AVPacket *pkt)
705 int frame_size;
707 *pnum = 0;
708 *pden = 0;
709 switch(st->codec->codec_type) {
710 case CODEC_TYPE_VIDEO:
711 if(st->time_base.num*1000LL > st->time_base.den){
712 *pnum = st->time_base.num;
713 *pden = st->time_base.den;
714 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
715 *pnum = st->codec->time_base.num;
716 *pden = st->codec->time_base.den;
717 if (pc && pc->repeat_pict) {
718 *pden *= 2;
719 *pnum = (*pnum) * (2 + pc->repeat_pict);
722 break;
723 case CODEC_TYPE_AUDIO:
724 frame_size = get_audio_frame_size(st->codec, pkt->size);
725 if (frame_size < 0)
726 break;
727 *pnum = frame_size;
728 *pden = st->codec->sample_rate;
729 break;
730 default:
731 break;
735 static int is_intra_only(AVCodecContext *enc){
736 if(enc->codec_type == CODEC_TYPE_AUDIO){
737 return 1;
738 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
739 switch(enc->codec_id){
740 case CODEC_ID_MJPEG:
741 case CODEC_ID_MJPEGB:
742 case CODEC_ID_LJPEG:
743 case CODEC_ID_RAWVIDEO:
744 case CODEC_ID_DVVIDEO:
745 case CODEC_ID_HUFFYUV:
746 case CODEC_ID_FFVHUFF:
747 case CODEC_ID_ASV1:
748 case CODEC_ID_ASV2:
749 case CODEC_ID_VCR1:
750 case CODEC_ID_DNXHD:
751 return 1;
752 default: break;
755 return 0;
758 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
759 int64_t dts, int64_t pts)
761 AVStream *st= s->streams[stream_index];
762 AVPacketList *pktl= s->packet_buffer;
764 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
765 return;
767 st->first_dts= dts - st->cur_dts;
768 st->cur_dts= dts;
770 for(; pktl; pktl= pktl->next){
771 if(pktl->pkt.stream_index != stream_index)
772 continue;
773 //FIXME think more about this check
774 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
775 pktl->pkt.pts += st->first_dts;
777 if(pktl->pkt.dts != AV_NOPTS_VALUE)
778 pktl->pkt.dts += st->first_dts;
780 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
781 st->start_time= pktl->pkt.pts;
783 if (st->start_time == AV_NOPTS_VALUE)
784 st->start_time = pts;
787 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
789 AVPacketList *pktl= s->packet_buffer;
790 int64_t cur_dts= 0;
792 if(st->first_dts != AV_NOPTS_VALUE){
793 cur_dts= st->first_dts;
794 for(; pktl; pktl= pktl->next){
795 if(pktl->pkt.stream_index == pkt->stream_index){
796 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
797 break;
798 cur_dts -= pkt->duration;
801 pktl= s->packet_buffer;
802 st->first_dts = cur_dts;
803 }else if(st->cur_dts)
804 return;
806 for(; pktl; pktl= pktl->next){
807 if(pktl->pkt.stream_index != pkt->stream_index)
808 continue;
809 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
810 && !pktl->pkt.duration){
811 pktl->pkt.dts= cur_dts;
812 if(!st->codec->has_b_frames)
813 pktl->pkt.pts= cur_dts;
814 cur_dts += pkt->duration;
815 pktl->pkt.duration= pkt->duration;
816 }else
817 break;
819 if(st->first_dts == AV_NOPTS_VALUE)
820 st->cur_dts= cur_dts;
823 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
824 AVCodecParserContext *pc, AVPacket *pkt)
826 int num, den, presentation_delayed, delay, i;
827 int64_t offset;
829 /* do we have a video B-frame ? */
830 delay= st->codec->has_b_frames;
831 presentation_delayed = 0;
832 /* XXX: need has_b_frame, but cannot get it if the codec is
833 not initialized */
834 if (delay &&
835 pc && pc->pict_type != FF_B_TYPE)
836 presentation_delayed = 1;
838 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
839 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
840 pkt->dts -= 1LL<<st->pts_wrap_bits;
843 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
844 // we take the conservative approach and discard both
845 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
846 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
847 av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
848 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
851 if (pkt->duration == 0) {
852 compute_frame_duration(&num, &den, st, pc, pkt);
853 if (den && num) {
854 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
856 if(pkt->duration != 0 && s->packet_buffer)
857 update_initial_durations(s, st, pkt);
861 /* correct timestamps with byte offset if demuxers only have timestamps
862 on packet boundaries */
863 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
864 /* this will estimate bitrate based on this frame's duration and size */
865 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
866 if(pkt->pts != AV_NOPTS_VALUE)
867 pkt->pts += offset;
868 if(pkt->dts != AV_NOPTS_VALUE)
869 pkt->dts += offset;
872 /* This may be redundant, but it should not hurt. */
873 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
874 presentation_delayed = 1;
876 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
877 /* interpolate PTS and DTS if they are not present */
878 if(delay==0 || (delay==1 && pc)){
879 if (presentation_delayed) {
880 /* DTS = decompression timestamp */
881 /* PTS = presentation timestamp */
882 if (pkt->dts == AV_NOPTS_VALUE)
883 pkt->dts = st->last_IP_pts;
884 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
885 if (pkt->dts == AV_NOPTS_VALUE)
886 pkt->dts = st->cur_dts;
888 /* this is tricky: the dts must be incremented by the duration
889 of the frame we are displaying, i.e. the last I- or P-frame */
890 if (st->last_IP_duration == 0)
891 st->last_IP_duration = pkt->duration;
892 if(pkt->dts != AV_NOPTS_VALUE)
893 st->cur_dts = pkt->dts + st->last_IP_duration;
894 st->last_IP_duration = pkt->duration;
895 st->last_IP_pts= pkt->pts;
896 /* cannot compute PTS if not present (we can compute it only
897 by knowing the future */
898 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
899 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
900 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
901 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
902 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
903 pkt->pts += pkt->duration;
904 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
908 /* presentation is not delayed : PTS and DTS are the same */
909 if(pkt->pts == AV_NOPTS_VALUE)
910 pkt->pts = pkt->dts;
911 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
912 if(pkt->pts == AV_NOPTS_VALUE)
913 pkt->pts = st->cur_dts;
914 pkt->dts = pkt->pts;
915 if(pkt->pts != AV_NOPTS_VALUE)
916 st->cur_dts = pkt->pts + pkt->duration;
920 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
921 st->pts_buffer[0]= pkt->pts;
922 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
923 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
924 if(pkt->dts == AV_NOPTS_VALUE)
925 pkt->dts= st->pts_buffer[0];
926 if(delay>1){
927 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
929 if(pkt->dts > st->cur_dts)
930 st->cur_dts = pkt->dts;
933 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
935 /* update flags */
936 if(is_intra_only(st->codec))
937 pkt->flags |= PKT_FLAG_KEY;
938 else if (pc) {
939 pkt->flags = 0;
940 /* keyframe computation */
941 if (pc->pict_type == FF_I_TYPE)
942 pkt->flags |= PKT_FLAG_KEY;
946 void av_destruct_packet_nofree(AVPacket *pkt)
948 pkt->data = NULL; pkt->size = 0;
951 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
953 AVStream *st;
954 int len, ret, i;
956 av_init_packet(pkt);
958 for(;;) {
959 /* select current input stream component */
960 st = s->cur_st;
961 if (st) {
962 if (!st->need_parsing || !st->parser) {
963 /* no parsing needed: we just output the packet as is */
964 /* raw data support */
965 *pkt = s->cur_pkt;
966 compute_pkt_fields(s, st, NULL, pkt);
967 s->cur_st = NULL;
968 break;
969 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
970 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
971 s->cur_ptr, s->cur_len,
972 s->cur_pkt.pts, s->cur_pkt.dts);
973 s->cur_pkt.pts = AV_NOPTS_VALUE;
974 s->cur_pkt.dts = AV_NOPTS_VALUE;
975 /* increment read pointer */
976 s->cur_ptr += len;
977 s->cur_len -= len;
979 /* return packet if any */
980 if (pkt->size) {
981 got_packet:
982 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
983 pkt->duration = 0;
984 pkt->stream_index = st->index;
985 pkt->pts = st->parser->pts;
986 pkt->dts = st->parser->dts;
987 pkt->destruct = av_destruct_packet_nofree;
988 compute_pkt_fields(s, st, st->parser, pkt);
990 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
991 ff_reduce_index(s, st->index);
992 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
993 0, 0, AVINDEX_KEYFRAME);
996 break;
998 } else {
999 /* free packet */
1000 av_free_packet(&s->cur_pkt);
1001 s->cur_st = NULL;
1003 } else {
1004 /* read next packet */
1005 ret = av_read_packet(s, &s->cur_pkt);
1006 if (ret < 0) {
1007 if (ret == AVERROR(EAGAIN))
1008 return ret;
1009 /* return the last frames, if any */
1010 for(i = 0; i < s->nb_streams; i++) {
1011 st = s->streams[i];
1012 if (st->parser && st->need_parsing) {
1013 av_parser_parse(st->parser, st->codec,
1014 &pkt->data, &pkt->size,
1015 NULL, 0,
1016 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1017 if (pkt->size)
1018 goto got_packet;
1021 /* no more packets: really terminate parsing */
1022 return ret;
1025 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1026 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1027 s->cur_pkt.pts < s->cur_pkt.dts){
1028 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1029 s->cur_pkt.stream_index,
1030 s->cur_pkt.pts,
1031 s->cur_pkt.dts,
1032 s->cur_pkt.size);
1033 // av_free_packet(&s->cur_pkt);
1034 // return -1;
1037 st = s->streams[s->cur_pkt.stream_index];
1038 if(s->debug & FF_FDEBUG_TS)
1039 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1040 s->cur_pkt.stream_index,
1041 s->cur_pkt.pts,
1042 s->cur_pkt.dts,
1043 s->cur_pkt.size,
1044 s->cur_pkt.flags);
1046 s->cur_st = st;
1047 s->cur_ptr = s->cur_pkt.data;
1048 s->cur_len = s->cur_pkt.size;
1049 if (st->need_parsing && !st->parser) {
1050 st->parser = av_parser_init(st->codec->codec_id);
1051 if (!st->parser) {
1052 /* no parser available: just output the raw packets */
1053 st->need_parsing = AVSTREAM_PARSE_NONE;
1054 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1055 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1057 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1058 st->parser->next_frame_offset=
1059 st->parser->cur_offset= s->cur_pkt.pos;
1064 if(s->debug & FF_FDEBUG_TS)
1065 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1066 pkt->stream_index,
1067 pkt->pts,
1068 pkt->dts,
1069 pkt->size,
1070 pkt->flags);
1072 return 0;
1075 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1077 AVPacketList *pktl;
1078 int eof=0;
1079 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1081 for(;;){
1082 pktl = s->packet_buffer;
1083 if (pktl) {
1084 AVPacket *next_pkt= &pktl->pkt;
1086 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1087 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1088 if( pktl->pkt.stream_index == next_pkt->stream_index
1089 && next_pkt->dts < pktl->pkt.dts
1090 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1091 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1092 next_pkt->pts= pktl->pkt.dts;
1094 pktl= pktl->next;
1096 pktl = s->packet_buffer;
1099 if( next_pkt->pts != AV_NOPTS_VALUE
1100 || next_pkt->dts == AV_NOPTS_VALUE
1101 || !genpts || eof){
1102 /* read packet from packet buffer, if there is data */
1103 *pkt = *next_pkt;
1104 s->packet_buffer = pktl->next;
1105 av_free(pktl);
1106 return 0;
1109 if(genpts){
1110 int ret= av_read_frame_internal(s, pkt);
1111 if(ret<0){
1112 if(pktl && ret != AVERROR(EAGAIN)){
1113 eof=1;
1114 continue;
1115 }else
1116 return ret;
1119 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1120 &s->packet_buffer_end)) < 0)
1121 return AVERROR(ENOMEM);
1122 }else{
1123 assert(!s->packet_buffer);
1124 return av_read_frame_internal(s, pkt);
1129 /* XXX: suppress the packet queue */
1130 static void flush_packet_queue(AVFormatContext *s)
1132 AVPacketList *pktl;
1134 for(;;) {
1135 pktl = s->packet_buffer;
1136 if (!pktl)
1137 break;
1138 s->packet_buffer = pktl->next;
1139 av_free_packet(&pktl->pkt);
1140 av_free(pktl);
1144 /*******************************************************/
1145 /* seek support */
1147 int av_find_default_stream_index(AVFormatContext *s)
1149 int first_audio_index = -1;
1150 int i;
1151 AVStream *st;
1153 if (s->nb_streams <= 0)
1154 return -1;
1155 for(i = 0; i < s->nb_streams; i++) {
1156 st = s->streams[i];
1157 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1158 return i;
1160 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1161 first_audio_index = i;
1163 return first_audio_index >= 0 ? first_audio_index : 0;
1167 * Flush the frame reader.
1169 static void av_read_frame_flush(AVFormatContext *s)
1171 AVStream *st;
1172 int i;
1174 flush_packet_queue(s);
1176 /* free previous packet */
1177 if (s->cur_st) {
1178 if (s->cur_st->parser)
1179 av_free_packet(&s->cur_pkt);
1180 s->cur_st = NULL;
1182 /* fail safe */
1183 s->cur_ptr = NULL;
1184 s->cur_len = 0;
1186 /* for each stream, reset read state */
1187 for(i = 0; i < s->nb_streams; i++) {
1188 st = s->streams[i];
1190 if (st->parser) {
1191 av_parser_close(st->parser);
1192 st->parser = NULL;
1194 st->last_IP_pts = AV_NOPTS_VALUE;
1195 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1199 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1200 int i;
1202 for(i = 0; i < s->nb_streams; i++) {
1203 AVStream *st = s->streams[i];
1205 st->cur_dts = av_rescale(timestamp,
1206 st->time_base.den * (int64_t)ref_st->time_base.num,
1207 st->time_base.num * (int64_t)ref_st->time_base.den);
1211 void ff_reduce_index(AVFormatContext *s, int stream_index)
1213 AVStream *st= s->streams[stream_index];
1214 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1216 if((unsigned)st->nb_index_entries >= max_entries){
1217 int i;
1218 for(i=0; 2*i<st->nb_index_entries; i++)
1219 st->index_entries[i]= st->index_entries[2*i];
1220 st->nb_index_entries= i;
1224 int av_add_index_entry(AVStream *st,
1225 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1227 AVIndexEntry *entries, *ie;
1228 int index;
1230 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1231 return -1;
1233 entries = av_fast_realloc(st->index_entries,
1234 &st->index_entries_allocated_size,
1235 (st->nb_index_entries + 1) *
1236 sizeof(AVIndexEntry));
1237 if(!entries)
1238 return -1;
1240 st->index_entries= entries;
1242 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1244 if(index<0){
1245 index= st->nb_index_entries++;
1246 ie= &entries[index];
1247 assert(index==0 || ie[-1].timestamp < timestamp);
1248 }else{
1249 ie= &entries[index];
1250 if(ie->timestamp != timestamp){
1251 if(ie->timestamp <= timestamp)
1252 return -1;
1253 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1254 st->nb_index_entries++;
1255 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1256 distance= ie->min_distance;
1259 ie->pos = pos;
1260 ie->timestamp = timestamp;
1261 ie->min_distance= distance;
1262 ie->size= size;
1263 ie->flags = flags;
1265 return index;
1268 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1269 int flags)
1271 AVIndexEntry *entries= st->index_entries;
1272 int nb_entries= st->nb_index_entries;
1273 int a, b, m;
1274 int64_t timestamp;
1276 a = - 1;
1277 b = nb_entries;
1279 while (b - a > 1) {
1280 m = (a + b) >> 1;
1281 timestamp = entries[m].timestamp;
1282 if(timestamp >= wanted_timestamp)
1283 b = m;
1284 if(timestamp <= wanted_timestamp)
1285 a = m;
1287 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1289 if(!(flags & AVSEEK_FLAG_ANY)){
1290 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1291 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1295 if(m == nb_entries)
1296 return -1;
1297 return m;
1300 #define DEBUG_SEEK
1302 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1303 AVInputFormat *avif= s->iformat;
1304 int64_t pos_min, pos_max, pos, pos_limit;
1305 int64_t ts_min, ts_max, ts;
1306 int index;
1307 AVStream *st;
1309 if (stream_index < 0)
1310 return -1;
1312 #ifdef DEBUG_SEEK
1313 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1314 #endif
1316 ts_max=
1317 ts_min= AV_NOPTS_VALUE;
1318 pos_limit= -1; //gcc falsely says it may be uninitialized
1320 st= s->streams[stream_index];
1321 if(st->index_entries){
1322 AVIndexEntry *e;
1324 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1325 index= FFMAX(index, 0);
1326 e= &st->index_entries[index];
1328 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1329 pos_min= e->pos;
1330 ts_min= e->timestamp;
1331 #ifdef DEBUG_SEEK
1332 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1333 pos_min,ts_min);
1334 #endif
1335 }else{
1336 assert(index==0);
1339 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1340 assert(index < st->nb_index_entries);
1341 if(index >= 0){
1342 e= &st->index_entries[index];
1343 assert(e->timestamp >= target_ts);
1344 pos_max= e->pos;
1345 ts_max= e->timestamp;
1346 pos_limit= pos_max - e->min_distance;
1347 #ifdef DEBUG_SEEK
1348 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1349 pos_max,pos_limit, ts_max);
1350 #endif
1354 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1355 if(pos<0)
1356 return -1;
1358 /* do the seek */
1359 url_fseek(s->pb, pos, SEEK_SET);
1361 av_update_cur_dts(s, st, ts);
1363 return 0;
1366 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1367 int64_t pos, ts;
1368 int64_t start_pos, filesize;
1369 int no_change;
1371 #ifdef DEBUG_SEEK
1372 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1373 #endif
1375 if(ts_min == AV_NOPTS_VALUE){
1376 pos_min = s->data_offset;
1377 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1378 if (ts_min == AV_NOPTS_VALUE)
1379 return -1;
1382 if(ts_max == AV_NOPTS_VALUE){
1383 int step= 1024;
1384 filesize = url_fsize(s->pb);
1385 pos_max = filesize - 1;
1387 pos_max -= step;
1388 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1389 step += step;
1390 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1391 if (ts_max == AV_NOPTS_VALUE)
1392 return -1;
1394 for(;;){
1395 int64_t tmp_pos= pos_max + 1;
1396 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1397 if(tmp_ts == AV_NOPTS_VALUE)
1398 break;
1399 ts_max= tmp_ts;
1400 pos_max= tmp_pos;
1401 if(tmp_pos >= filesize)
1402 break;
1404 pos_limit= pos_max;
1407 if(ts_min > ts_max){
1408 return -1;
1409 }else if(ts_min == ts_max){
1410 pos_limit= pos_min;
1413 no_change=0;
1414 while (pos_min < pos_limit) {
1415 #ifdef DEBUG_SEEK
1416 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1417 pos_min, pos_max,
1418 ts_min, ts_max);
1419 #endif
1420 assert(pos_limit <= pos_max);
1422 if(no_change==0){
1423 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1424 // interpolate position (better than dichotomy)
1425 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1426 + pos_min - approximate_keyframe_distance;
1427 }else if(no_change==1){
1428 // bisection, if interpolation failed to change min or max pos last time
1429 pos = (pos_min + pos_limit)>>1;
1430 }else{
1431 /* linear search if bisection failed, can only happen if there
1432 are very few or no keyframes between min/max */
1433 pos=pos_min;
1435 if(pos <= pos_min)
1436 pos= pos_min + 1;
1437 else if(pos > pos_limit)
1438 pos= pos_limit;
1439 start_pos= pos;
1441 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1442 if(pos == pos_max)
1443 no_change++;
1444 else
1445 no_change=0;
1446 #ifdef DEBUG_SEEK
1447 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1448 #endif
1449 if(ts == AV_NOPTS_VALUE){
1450 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1451 return -1;
1453 assert(ts != AV_NOPTS_VALUE);
1454 if (target_ts <= ts) {
1455 pos_limit = start_pos - 1;
1456 pos_max = pos;
1457 ts_max = ts;
1459 if (target_ts >= ts) {
1460 pos_min = pos;
1461 ts_min = ts;
1465 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1466 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1467 #ifdef DEBUG_SEEK
1468 pos_min = pos;
1469 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1470 pos_min++;
1471 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1472 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1473 pos, ts_min, target_ts, ts_max);
1474 #endif
1475 *ts_ret= ts;
1476 return pos;
1479 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1480 int64_t pos_min, pos_max;
1481 #if 0
1482 AVStream *st;
1484 if (stream_index < 0)
1485 return -1;
1487 st= s->streams[stream_index];
1488 #endif
1490 pos_min = s->data_offset;
1491 pos_max = url_fsize(s->pb) - 1;
1493 if (pos < pos_min) pos= pos_min;
1494 else if(pos > pos_max) pos= pos_max;
1496 url_fseek(s->pb, pos, SEEK_SET);
1498 #if 0
1499 av_update_cur_dts(s, st, ts);
1500 #endif
1501 return 0;
1504 static int av_seek_frame_generic(AVFormatContext *s,
1505 int stream_index, int64_t timestamp, int flags)
1507 int index, ret;
1508 AVStream *st;
1509 AVIndexEntry *ie;
1511 st = s->streams[stream_index];
1513 index = av_index_search_timestamp(st, timestamp, flags);
1515 if(index < 0 || index==st->nb_index_entries-1){
1516 int i;
1517 AVPacket pkt;
1519 if(st->nb_index_entries){
1520 assert(st->index_entries);
1521 ie= &st->index_entries[st->nb_index_entries-1];
1522 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1523 return ret;
1524 av_update_cur_dts(s, st, ie->timestamp);
1525 }else{
1526 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1527 return ret;
1529 for(i=0;; i++) {
1530 int ret = av_read_frame(s, &pkt);
1531 if(ret<0)
1532 break;
1533 av_free_packet(&pkt);
1534 if(stream_index == pkt.stream_index){
1535 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1536 break;
1539 index = av_index_search_timestamp(st, timestamp, flags);
1541 if (index < 0)
1542 return -1;
1544 av_read_frame_flush(s);
1545 if (s->iformat->read_seek){
1546 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1547 return 0;
1549 ie = &st->index_entries[index];
1550 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1551 return ret;
1552 av_update_cur_dts(s, st, ie->timestamp);
1554 return 0;
1557 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1559 int ret;
1560 AVStream *st;
1562 av_read_frame_flush(s);
1564 if(flags & AVSEEK_FLAG_BYTE)
1565 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1567 if(stream_index < 0){
1568 stream_index= av_find_default_stream_index(s);
1569 if(stream_index < 0)
1570 return -1;
1572 st= s->streams[stream_index];
1573 /* timestamp for default must be expressed in AV_TIME_BASE units */
1574 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1577 /* first, we try the format specific seek */
1578 if (s->iformat->read_seek)
1579 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1580 else
1581 ret = -1;
1582 if (ret >= 0) {
1583 return 0;
1586 if(s->iformat->read_timestamp)
1587 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1588 else
1589 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1592 /*******************************************************/
1595 * Returns TRUE if the stream has accurate duration in any stream.
1597 * @return TRUE if the stream has accurate duration for at least one component.
1599 static int av_has_duration(AVFormatContext *ic)
1601 int i;
1602 AVStream *st;
1604 for(i = 0;i < ic->nb_streams; i++) {
1605 st = ic->streams[i];
1606 if (st->duration != AV_NOPTS_VALUE)
1607 return 1;
1609 return 0;
1613 * Estimate the stream timings from the one of each components.
1615 * Also computes the global bitrate if possible.
1617 static void av_update_stream_timings(AVFormatContext *ic)
1619 int64_t start_time, start_time1, end_time, end_time1;
1620 int64_t duration, duration1;
1621 int i;
1622 AVStream *st;
1624 start_time = INT64_MAX;
1625 end_time = INT64_MIN;
1626 duration = INT64_MIN;
1627 for(i = 0;i < ic->nb_streams; i++) {
1628 st = ic->streams[i];
1629 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1630 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1631 if (start_time1 < start_time)
1632 start_time = start_time1;
1633 if (st->duration != AV_NOPTS_VALUE) {
1634 end_time1 = start_time1
1635 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1636 if (end_time1 > end_time)
1637 end_time = end_time1;
1640 if (st->duration != AV_NOPTS_VALUE) {
1641 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1642 if (duration1 > duration)
1643 duration = duration1;
1646 if (start_time != INT64_MAX) {
1647 ic->start_time = start_time;
1648 if (end_time != INT64_MIN) {
1649 if (end_time - start_time > duration)
1650 duration = end_time - start_time;
1653 if (duration != INT64_MIN) {
1654 ic->duration = duration;
1655 if (ic->file_size > 0) {
1656 /* compute the bitrate */
1657 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1658 (double)ic->duration;
1663 static void fill_all_stream_timings(AVFormatContext *ic)
1665 int i;
1666 AVStream *st;
1668 av_update_stream_timings(ic);
1669 for(i = 0;i < ic->nb_streams; i++) {
1670 st = ic->streams[i];
1671 if (st->start_time == AV_NOPTS_VALUE) {
1672 if(ic->start_time != AV_NOPTS_VALUE)
1673 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1674 if(ic->duration != AV_NOPTS_VALUE)
1675 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1680 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1682 int64_t filesize, duration;
1683 int bit_rate, i;
1684 AVStream *st;
1686 /* if bit_rate is already set, we believe it */
1687 if (ic->bit_rate == 0) {
1688 bit_rate = 0;
1689 for(i=0;i<ic->nb_streams;i++) {
1690 st = ic->streams[i];
1691 bit_rate += st->codec->bit_rate;
1693 ic->bit_rate = bit_rate;
1696 /* if duration is already set, we believe it */
1697 if (ic->duration == AV_NOPTS_VALUE &&
1698 ic->bit_rate != 0 &&
1699 ic->file_size != 0) {
1700 filesize = ic->file_size;
1701 if (filesize > 0) {
1702 for(i = 0; i < ic->nb_streams; i++) {
1703 st = ic->streams[i];
1704 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1705 if (st->duration == AV_NOPTS_VALUE)
1706 st->duration = duration;
1712 #define DURATION_MAX_READ_SIZE 250000
1714 /* only usable for MPEG-PS streams */
1715 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1717 AVPacket pkt1, *pkt = &pkt1;
1718 AVStream *st;
1719 int read_size, i, ret;
1720 int64_t end_time;
1721 int64_t filesize, offset, duration;
1723 /* free previous packet */
1724 if (ic->cur_st && ic->cur_st->parser)
1725 av_free_packet(&ic->cur_pkt);
1726 ic->cur_st = NULL;
1728 /* flush packet queue */
1729 flush_packet_queue(ic);
1731 for(i=0;i<ic->nb_streams;i++) {
1732 st = ic->streams[i];
1733 if (st->parser) {
1734 av_parser_close(st->parser);
1735 st->parser= NULL;
1739 /* we read the first packets to get the first PTS (not fully
1740 accurate, but it is enough now) */
1741 url_fseek(ic->pb, 0, SEEK_SET);
1742 read_size = 0;
1743 for(;;) {
1744 if (read_size >= DURATION_MAX_READ_SIZE)
1745 break;
1746 /* if all info is available, we can stop */
1747 for(i = 0;i < ic->nb_streams; i++) {
1748 st = ic->streams[i];
1749 if (st->start_time == AV_NOPTS_VALUE)
1750 break;
1752 if (i == ic->nb_streams)
1753 break;
1755 ret = av_read_packet(ic, pkt);
1756 if (ret != 0)
1757 break;
1758 read_size += pkt->size;
1759 st = ic->streams[pkt->stream_index];
1760 if (pkt->pts != AV_NOPTS_VALUE) {
1761 if (st->start_time == AV_NOPTS_VALUE)
1762 st->start_time = pkt->pts;
1764 av_free_packet(pkt);
1767 /* estimate the end time (duration) */
1768 /* XXX: may need to support wrapping */
1769 filesize = ic->file_size;
1770 offset = filesize - DURATION_MAX_READ_SIZE;
1771 if (offset < 0)
1772 offset = 0;
1774 url_fseek(ic->pb, offset, SEEK_SET);
1775 read_size = 0;
1776 for(;;) {
1777 if (read_size >= DURATION_MAX_READ_SIZE)
1778 break;
1780 ret = av_read_packet(ic, pkt);
1781 if (ret != 0)
1782 break;
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 st->start_time != AV_NOPTS_VALUE) {
1787 end_time = pkt->pts;
1788 duration = end_time - st->start_time;
1789 if (duration > 0) {
1790 if (st->duration == AV_NOPTS_VALUE ||
1791 st->duration < duration)
1792 st->duration = duration;
1795 av_free_packet(pkt);
1798 fill_all_stream_timings(ic);
1800 url_fseek(ic->pb, old_offset, SEEK_SET);
1801 for(i=0; i<ic->nb_streams; i++){
1802 st= ic->streams[i];
1803 st->cur_dts= st->first_dts;
1804 st->last_IP_pts = AV_NOPTS_VALUE;
1808 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1810 int64_t file_size;
1812 /* get the file size, if possible */
1813 if (ic->iformat->flags & AVFMT_NOFILE) {
1814 file_size = 0;
1815 } else {
1816 file_size = url_fsize(ic->pb);
1817 if (file_size < 0)
1818 file_size = 0;
1820 ic->file_size = file_size;
1822 if ((!strcmp(ic->iformat->name, "mpeg") ||
1823 !strcmp(ic->iformat->name, "mpegts")) &&
1824 file_size && !url_is_streamed(ic->pb)) {
1825 /* get accurate estimate from the PTSes */
1826 av_estimate_timings_from_pts(ic, old_offset);
1827 } else if (av_has_duration(ic)) {
1828 /* at least one component has timings - we use them for all
1829 the components */
1830 fill_all_stream_timings(ic);
1831 } else {
1832 /* less precise: use bitrate info */
1833 av_estimate_timings_from_bit_rate(ic);
1835 av_update_stream_timings(ic);
1837 #if 0
1839 int i;
1840 AVStream *st;
1841 for(i = 0;i < ic->nb_streams; i++) {
1842 st = ic->streams[i];
1843 printf("%d: start_time: %0.3f duration: %0.3f\n",
1844 i, (double)st->start_time / AV_TIME_BASE,
1845 (double)st->duration / AV_TIME_BASE);
1847 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1848 (double)ic->start_time / AV_TIME_BASE,
1849 (double)ic->duration / AV_TIME_BASE,
1850 ic->bit_rate / 1000);
1852 #endif
1855 static int has_codec_parameters(AVCodecContext *enc)
1857 int val;
1858 switch(enc->codec_type) {
1859 case CODEC_TYPE_AUDIO:
1860 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1861 if(!enc->frame_size &&
1862 (enc->codec_id == CODEC_ID_VORBIS ||
1863 enc->codec_id == CODEC_ID_AAC))
1864 return 0;
1865 break;
1866 case CODEC_TYPE_VIDEO:
1867 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1868 break;
1869 default:
1870 val = 1;
1871 break;
1873 return enc->codec_id != CODEC_ID_NONE && val != 0;
1876 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1878 int16_t *samples;
1879 AVCodec *codec;
1880 int got_picture, data_size, ret=0;
1881 AVFrame picture;
1883 if(!st->codec->codec){
1884 codec = avcodec_find_decoder(st->codec->codec_id);
1885 if (!codec)
1886 return -1;
1887 ret = avcodec_open(st->codec, codec);
1888 if (ret < 0)
1889 return ret;
1892 if(!has_codec_parameters(st->codec)){
1893 switch(st->codec->codec_type) {
1894 case CODEC_TYPE_VIDEO:
1895 ret = avcodec_decode_video(st->codec, &picture,
1896 &got_picture, data, size);
1897 break;
1898 case CODEC_TYPE_AUDIO:
1899 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1900 samples = av_malloc(data_size);
1901 if (!samples)
1902 goto fail;
1903 ret = avcodec_decode_audio2(st->codec, samples,
1904 &data_size, data, size);
1905 av_free(samples);
1906 break;
1907 default:
1908 break;
1911 fail:
1912 return ret;
1915 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1917 while (tags->id != CODEC_ID_NONE) {
1918 if (tags->id == id)
1919 return tags->tag;
1920 tags++;
1922 return 0;
1925 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1927 int i;
1928 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1929 if(tag == tags[i].tag)
1930 return tags[i].id;
1932 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1933 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1934 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1935 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1936 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1937 return tags[i].id;
1939 return CODEC_ID_NONE;
1942 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1944 int i;
1945 for(i=0; tags && tags[i]; i++){
1946 int tag= codec_get_tag(tags[i], id);
1947 if(tag) return tag;
1949 return 0;
1952 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1954 int i;
1955 for(i=0; tags && tags[i]; i++){
1956 enum CodecID id= codec_get_id(tags[i], tag);
1957 if(id!=CODEC_ID_NONE) return id;
1959 return CODEC_ID_NONE;
1962 static void compute_chapters_end(AVFormatContext *s)
1964 unsigned int i;
1966 for (i=0; i+1<s->nb_chapters; i++)
1967 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1968 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1969 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1970 s->chapters[i]->end = s->chapters[i+1]->start;
1973 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1974 assert(s->start_time != AV_NOPTS_VALUE);
1975 assert(s->duration > 0);
1976 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1977 AV_TIME_BASE_Q,
1978 s->chapters[i]->time_base);
1982 /* absolute maximum size we read until we abort */
1983 #define MAX_READ_SIZE 5000000
1985 #define MAX_STD_TIMEBASES (60*12+5)
1986 static int get_std_framerate(int i){
1987 if(i<60*12) return i*1001;
1988 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1992 * Is the time base unreliable.
1993 * This is a heuristic to balance between quick acceptance of the values in
1994 * the headers vs. some extra checks.
1995 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1996 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1997 * And there are "variable" fps files this needs to detect as well.
1999 static int tb_unreliable(AVCodecContext *c){
2000 if( c->time_base.den >= 101L*c->time_base.num
2001 || c->time_base.den < 5L*c->time_base.num
2002 /* || c->codec_tag == ff_get_fourcc("DIVX")
2003 || c->codec_tag == ff_get_fourcc("XVID")*/
2004 || c->codec_id == CODEC_ID_MPEG2VIDEO)
2005 return 1;
2006 return 0;
2009 int av_find_stream_info(AVFormatContext *ic)
2011 int i, count, ret, read_size, j;
2012 AVStream *st;
2013 AVPacket pkt1, *pkt;
2014 int64_t last_dts[MAX_STREAMS];
2015 int duration_count[MAX_STREAMS]={0};
2016 double (*duration_error)[MAX_STD_TIMEBASES];
2017 int64_t old_offset = url_ftell(ic->pb);
2018 int64_t codec_info_duration[MAX_STREAMS]={0};
2019 int codec_info_nb_frames[MAX_STREAMS]={0};
2021 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2022 if (!duration_error) return AVERROR(ENOMEM);
2024 for(i=0;i<ic->nb_streams;i++) {
2025 st = ic->streams[i];
2026 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2027 /* if(!st->time_base.num)
2028 st->time_base= */
2029 if(!st->codec->time_base.num)
2030 st->codec->time_base= st->time_base;
2032 //only for the split stuff
2033 if (!st->parser) {
2034 st->parser = av_parser_init(st->codec->codec_id);
2035 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2036 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2041 for(i=0;i<MAX_STREAMS;i++){
2042 last_dts[i]= AV_NOPTS_VALUE;
2045 count = 0;
2046 read_size = 0;
2047 for(;;) {
2048 /* check if one codec still needs to be handled */
2049 for(i=0;i<ic->nb_streams;i++) {
2050 st = ic->streams[i];
2051 if (!has_codec_parameters(st->codec))
2052 break;
2053 /* variable fps and no guess at the real fps */
2054 if( tb_unreliable(st->codec)
2055 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2056 break;
2057 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2058 break;
2059 if(st->first_dts == AV_NOPTS_VALUE)
2060 break;
2062 if (i == ic->nb_streams) {
2063 /* NOTE: if the format has no header, then we need to read
2064 some packets to get most of the streams, so we cannot
2065 stop here */
2066 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2067 /* if we found the info for all the codecs, we can stop */
2068 ret = count;
2069 break;
2072 /* we did not get all the codec info, but we read too much data */
2073 if (read_size >= MAX_READ_SIZE) {
2074 ret = count;
2075 break;
2078 /* NOTE: a new stream can be added there if no header in file
2079 (AVFMTCTX_NOHEADER) */
2080 ret = av_read_frame_internal(ic, &pkt1);
2081 if (ret < 0) {
2082 /* EOF or error */
2083 ret = -1; /* we could not have all the codec parameters before EOF */
2084 for(i=0;i<ic->nb_streams;i++) {
2085 st = ic->streams[i];
2086 if (!has_codec_parameters(st->codec)){
2087 char buf[256];
2088 avcodec_string(buf, sizeof(buf), st->codec, 0);
2089 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2090 } else {
2091 ret = 0;
2094 break;
2097 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2098 if(av_dup_packet(pkt) < 0) {
2099 av_free(duration_error);
2100 return AVERROR(ENOMEM);
2103 read_size += pkt->size;
2105 st = ic->streams[pkt->stream_index];
2106 if(codec_info_nb_frames[st->index]>1)
2107 codec_info_duration[st->index] += pkt->duration;
2108 if (pkt->duration != 0)
2109 codec_info_nb_frames[st->index]++;
2112 int index= pkt->stream_index;
2113 int64_t last= last_dts[index];
2114 int64_t duration= pkt->dts - last;
2116 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2117 double dur= duration * av_q2d(st->time_base);
2119 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2120 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2121 if(duration_count[index] < 2)
2122 memset(duration_error[index], 0, sizeof(*duration_error));
2123 for(i=1; i<MAX_STD_TIMEBASES; i++){
2124 int framerate= get_std_framerate(i);
2125 int ticks= lrintf(dur*framerate/(1001*12));
2126 double error= dur - ticks*1001*12/(double)framerate;
2127 duration_error[index][i] += error*error;
2129 duration_count[index]++;
2131 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2132 last_dts[pkt->stream_index]= pkt->dts;
2134 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2135 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2136 if(i){
2137 st->codec->extradata_size= i;
2138 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2139 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2140 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2144 /* if still no information, we try to open the codec and to
2145 decompress the frame. We try to avoid that in most cases as
2146 it takes longer and uses more memory. For MPEG-4, we need to
2147 decompress for QuickTime. */
2148 if (!has_codec_parameters(st->codec) /*&&
2149 (st->codec->codec_id == CODEC_ID_FLV1 ||
2150 st->codec->codec_id == CODEC_ID_H264 ||
2151 st->codec->codec_id == CODEC_ID_H263 ||
2152 st->codec->codec_id == CODEC_ID_H261 ||
2153 st->codec->codec_id == CODEC_ID_VORBIS ||
2154 st->codec->codec_id == CODEC_ID_MJPEG ||
2155 st->codec->codec_id == CODEC_ID_PNG ||
2156 st->codec->codec_id == CODEC_ID_PAM ||
2157 st->codec->codec_id == CODEC_ID_PGM ||
2158 st->codec->codec_id == CODEC_ID_PGMYUV ||
2159 st->codec->codec_id == CODEC_ID_PBM ||
2160 st->codec->codec_id == CODEC_ID_PPM ||
2161 st->codec->codec_id == CODEC_ID_SHORTEN ||
2162 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2163 try_decode_frame(st, pkt->data, pkt->size);
2165 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2166 break;
2168 count++;
2171 // close codecs which were opened in try_decode_frame()
2172 for(i=0;i<ic->nb_streams;i++) {
2173 st = ic->streams[i];
2174 if(st->codec->codec)
2175 avcodec_close(st->codec);
2177 for(i=0;i<ic->nb_streams;i++) {
2178 st = ic->streams[i];
2179 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2180 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2181 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2183 if(duration_count[i]
2184 && tb_unreliable(st->codec) /*&&
2185 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2186 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2187 double best_error= 2*av_q2d(st->time_base);
2188 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2190 for(j=1; j<MAX_STD_TIMEBASES; j++){
2191 double error= duration_error[i][j] * get_std_framerate(j);
2192 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2193 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2194 if(error < best_error){
2195 best_error= error;
2196 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2201 if (!st->r_frame_rate.num){
2202 if( st->codec->time_base.den * (int64_t)st->time_base.num
2203 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2204 st->r_frame_rate.num = st->codec->time_base.den;
2205 st->r_frame_rate.den = st->codec->time_base.num;
2206 }else{
2207 st->r_frame_rate.num = st->time_base.den;
2208 st->r_frame_rate.den = st->time_base.num;
2211 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2212 if(!st->codec->bits_per_coded_sample)
2213 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2217 av_estimate_timings(ic, old_offset);
2219 compute_chapters_end(ic);
2221 #if 0
2222 /* correct DTS for B-frame streams with no timestamps */
2223 for(i=0;i<ic->nb_streams;i++) {
2224 st = ic->streams[i];
2225 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2226 if(b-frames){
2227 ppktl = &ic->packet_buffer;
2228 while(ppkt1){
2229 if(ppkt1->stream_index != i)
2230 continue;
2231 if(ppkt1->pkt->dts < 0)
2232 break;
2233 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2234 break;
2235 ppkt1->pkt->dts -= delta;
2236 ppkt1= ppkt1->next;
2238 if(ppkt1)
2239 continue;
2240 st->cur_dts -= delta;
2244 #endif
2246 av_free(duration_error);
2248 return ret;
2251 /*******************************************************/
2253 int av_read_play(AVFormatContext *s)
2255 if (s->iformat->read_play)
2256 return s->iformat->read_play(s);
2257 if (s->pb)
2258 return av_url_read_fpause(s->pb, 0);
2259 return AVERROR(ENOSYS);
2262 int av_read_pause(AVFormatContext *s)
2264 if (s->iformat->read_pause)
2265 return s->iformat->read_pause(s);
2266 if (s->pb)
2267 return av_url_read_fpause(s->pb, 1);
2268 return AVERROR(ENOSYS);
2271 void av_close_input_stream(AVFormatContext *s)
2273 int i;
2274 AVStream *st;
2276 /* free previous packet */
2277 if (s->cur_st && s->cur_st->parser)
2278 av_free_packet(&s->cur_pkt);
2280 if (s->iformat->read_close)
2281 s->iformat->read_close(s);
2282 for(i=0;i<s->nb_streams;i++) {
2283 /* free all data in a stream component */
2284 st = s->streams[i];
2285 if (st->parser) {
2286 av_parser_close(st->parser);
2288 av_metadata_free(&st->metadata);
2289 av_free(st->index_entries);
2290 av_free(st->codec->extradata);
2291 av_free(st->codec);
2292 av_free(st->filename);
2293 av_free(st->priv_data);
2294 av_free(st);
2296 for(i=s->nb_programs-1; i>=0; i--) {
2297 av_freep(&s->programs[i]->provider_name);
2298 av_freep(&s->programs[i]->name);
2299 av_metadata_free(&s->programs[i]->metadata);
2300 av_freep(&s->programs[i]->stream_index);
2301 av_freep(&s->programs[i]);
2303 av_freep(&s->programs);
2304 flush_packet_queue(s);
2305 av_freep(&s->priv_data);
2306 while(s->nb_chapters--) {
2307 av_free(s->chapters[s->nb_chapters]->title);
2308 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2309 av_free(s->chapters[s->nb_chapters]);
2311 av_freep(&s->chapters);
2312 av_metadata_free(&s->metadata);
2313 av_free(s);
2316 void av_close_input_file(AVFormatContext *s)
2318 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2319 av_close_input_stream(s);
2320 if (pb)
2321 url_fclose(pb);
2324 AVStream *av_new_stream(AVFormatContext *s, int id)
2326 AVStream *st;
2327 int i;
2329 if (s->nb_streams >= MAX_STREAMS)
2330 return NULL;
2332 st = av_mallocz(sizeof(AVStream));
2333 if (!st)
2334 return NULL;
2336 st->codec= avcodec_alloc_context();
2337 if (s->iformat) {
2338 /* no default bitrate if decoding */
2339 st->codec->bit_rate = 0;
2341 st->index = s->nb_streams;
2342 st->id = id;
2343 st->start_time = AV_NOPTS_VALUE;
2344 st->duration = AV_NOPTS_VALUE;
2345 /* we set the current DTS to 0 so that formats without any timestamps
2346 but durations get some timestamps, formats with some unknown
2347 timestamps have their first few packets buffered and the
2348 timestamps corrected before they are returned to the user */
2349 st->cur_dts = 0;
2350 st->first_dts = AV_NOPTS_VALUE;
2352 /* default pts setting is MPEG-like */
2353 av_set_pts_info(st, 33, 1, 90000);
2354 st->last_IP_pts = AV_NOPTS_VALUE;
2355 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2356 st->pts_buffer[i]= AV_NOPTS_VALUE;
2358 st->sample_aspect_ratio = (AVRational){0,1};
2360 s->streams[s->nb_streams++] = st;
2361 return st;
2364 AVProgram *av_new_program(AVFormatContext *ac, int id)
2366 AVProgram *program=NULL;
2367 int i;
2369 #ifdef DEBUG_SI
2370 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2371 #endif
2373 for(i=0; i<ac->nb_programs; i++)
2374 if(ac->programs[i]->id == id)
2375 program = ac->programs[i];
2377 if(!program){
2378 program = av_mallocz(sizeof(AVProgram));
2379 if (!program)
2380 return NULL;
2381 dynarray_add(&ac->programs, &ac->nb_programs, program);
2382 program->discard = AVDISCARD_NONE;
2384 program->id = id;
2386 return program;
2389 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2391 assert(!provider_name == !name);
2392 if(name){
2393 av_free(program->provider_name);
2394 av_free(program-> name);
2395 program->provider_name = av_strdup(provider_name);
2396 program-> name = av_strdup( name);
2400 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2402 AVChapter *chapter = NULL;
2403 int i;
2405 for(i=0; i<s->nb_chapters; i++)
2406 if(s->chapters[i]->id == id)
2407 chapter = s->chapters[i];
2409 if(!chapter){
2410 chapter= av_mallocz(sizeof(AVChapter));
2411 if(!chapter)
2412 return NULL;
2413 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2415 av_free(chapter->title);
2416 chapter->title = av_strdup(title);
2417 chapter->id = id;
2418 chapter->time_base= time_base;
2419 chapter->start = start;
2420 chapter->end = end;
2422 return chapter;
2425 /************************************************************/
2426 /* output media file */
2428 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2430 int ret;
2432 if (s->oformat->priv_data_size > 0) {
2433 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2434 if (!s->priv_data)
2435 return AVERROR(ENOMEM);
2436 } else
2437 s->priv_data = NULL;
2439 if (s->oformat->set_parameters) {
2440 ret = s->oformat->set_parameters(s, ap);
2441 if (ret < 0)
2442 return ret;
2444 return 0;
2447 int av_write_header(AVFormatContext *s)
2449 int ret, i;
2450 AVStream *st;
2452 // some sanity checks
2453 for(i=0;i<s->nb_streams;i++) {
2454 st = s->streams[i];
2456 switch (st->codec->codec_type) {
2457 case CODEC_TYPE_AUDIO:
2458 if(st->codec->sample_rate<=0){
2459 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2460 return -1;
2462 if(!st->codec->block_align)
2463 st->codec->block_align = st->codec->channels *
2464 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2465 break;
2466 case CODEC_TYPE_VIDEO:
2467 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2468 av_log(s, AV_LOG_ERROR, "time base not set\n");
2469 return -1;
2471 if(st->codec->width<=0 || st->codec->height<=0){
2472 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2473 return -1;
2475 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2476 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2477 return -1;
2479 break;
2482 if(s->oformat->codec_tag){
2483 if(st->codec->codec_tag){
2484 //FIXME
2485 //check that tag + id is in the table
2486 //if neither is in the table -> OK
2487 //if tag is in the table with another id -> FAIL
2488 //if id is in the table with another tag -> FAIL unless strict < ?
2489 }else
2490 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2494 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2495 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2496 if (!s->priv_data)
2497 return AVERROR(ENOMEM);
2500 #if LIBAVFORMAT_VERSION_MAJOR < 53
2501 ff_metadata_sync_compat(s);
2502 #endif
2504 if(s->oformat->write_header){
2505 ret = s->oformat->write_header(s);
2506 if (ret < 0)
2507 return ret;
2510 /* init PTS generation */
2511 for(i=0;i<s->nb_streams;i++) {
2512 int64_t den = AV_NOPTS_VALUE;
2513 st = s->streams[i];
2515 switch (st->codec->codec_type) {
2516 case CODEC_TYPE_AUDIO:
2517 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2518 break;
2519 case CODEC_TYPE_VIDEO:
2520 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2521 break;
2522 default:
2523 break;
2525 if (den != AV_NOPTS_VALUE) {
2526 if (den <= 0)
2527 return AVERROR_INVALIDDATA;
2528 av_frac_init(&st->pts, 0, 0, den);
2531 return 0;
2534 //FIXME merge with compute_pkt_fields
2535 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2536 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2537 int num, den, frame_size, i;
2539 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2541 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2542 return -1;*/
2544 /* duration field */
2545 if (pkt->duration == 0) {
2546 compute_frame_duration(&num, &den, st, NULL, pkt);
2547 if (den && num) {
2548 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2552 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2553 pkt->pts= pkt->dts;
2555 //XXX/FIXME this is a temporary hack until all encoders output pts
2556 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2557 pkt->dts=
2558 // pkt->pts= st->cur_dts;
2559 pkt->pts= st->pts.val;
2562 //calculate dts from pts
2563 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2564 st->pts_buffer[0]= pkt->pts;
2565 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2566 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2567 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2568 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2570 pkt->dts= st->pts_buffer[0];
2573 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2574 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2575 return -1;
2577 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2578 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2579 return -1;
2582 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2583 st->cur_dts= pkt->dts;
2584 st->pts.val= pkt->dts;
2586 /* update pts */
2587 switch (st->codec->codec_type) {
2588 case CODEC_TYPE_AUDIO:
2589 frame_size = get_audio_frame_size(st->codec, pkt->size);
2591 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2592 likely equal to the encoder delay, but it would be better if we
2593 had the real timestamps from the encoder */
2594 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2595 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2597 break;
2598 case CODEC_TYPE_VIDEO:
2599 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2600 break;
2601 default:
2602 break;
2604 return 0;
2607 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2609 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2611 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2612 return ret;
2614 ret= s->oformat->write_packet(s, pkt);
2615 if(!ret)
2616 ret= url_ferror(s->pb);
2617 return ret;
2620 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2621 AVPacketList *pktl, **next_point, *this_pktl;
2622 int stream_count=0;
2623 int streams[MAX_STREAMS];
2625 if(pkt){
2626 AVStream *st= s->streams[ pkt->stream_index];
2628 // assert(pkt->destruct != av_destruct_packet); //FIXME
2630 this_pktl = av_mallocz(sizeof(AVPacketList));
2631 this_pktl->pkt= *pkt;
2632 if(pkt->destruct == av_destruct_packet)
2633 pkt->destruct= NULL; // not shared -> must keep original from being freed
2634 else
2635 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2637 next_point = &s->packet_buffer;
2638 while(*next_point){
2639 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2640 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2641 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2642 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2643 break;
2644 next_point= &(*next_point)->next;
2646 this_pktl->next= *next_point;
2647 *next_point= this_pktl;
2650 memset(streams, 0, sizeof(streams));
2651 pktl= s->packet_buffer;
2652 while(pktl){
2653 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2654 if(streams[ pktl->pkt.stream_index ] == 0)
2655 stream_count++;
2656 streams[ pktl->pkt.stream_index ]++;
2657 pktl= pktl->next;
2660 if(stream_count && (s->nb_streams == stream_count || flush)){
2661 pktl= s->packet_buffer;
2662 *out= pktl->pkt;
2664 s->packet_buffer= pktl->next;
2665 av_freep(&pktl);
2666 return 1;
2667 }else{
2668 av_init_packet(out);
2669 return 0;
2674 * Interleaves an AVPacket correctly so it can be muxed.
2675 * @param out the interleaved packet will be output here
2676 * @param in the input packet
2677 * @param flush 1 if no further packets are available as input and all
2678 * remaining packets should be output
2679 * @return 1 if a packet was output, 0 if no packet could be output,
2680 * < 0 if an error occurred
2682 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2683 if(s->oformat->interleave_packet)
2684 return s->oformat->interleave_packet(s, out, in, flush);
2685 else
2686 return av_interleave_packet_per_dts(s, out, in, flush);
2689 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2690 AVStream *st= s->streams[ pkt->stream_index];
2692 //FIXME/XXX/HACK drop zero sized packets
2693 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2694 return 0;
2696 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2697 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2698 return -1;
2700 if(pkt->dts == AV_NOPTS_VALUE)
2701 return -1;
2703 for(;;){
2704 AVPacket opkt;
2705 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2706 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2707 return ret;
2709 ret= s->oformat->write_packet(s, &opkt);
2711 av_free_packet(&opkt);
2712 pkt= NULL;
2714 if(ret<0)
2715 return ret;
2716 if(url_ferror(s->pb))
2717 return url_ferror(s->pb);
2721 int av_write_trailer(AVFormatContext *s)
2723 int ret, i;
2725 for(;;){
2726 AVPacket pkt;
2727 ret= av_interleave_packet(s, &pkt, NULL, 1);
2728 if(ret<0) //FIXME cleanup needed for ret<0 ?
2729 goto fail;
2730 if(!ret)
2731 break;
2733 ret= s->oformat->write_packet(s, &pkt);
2735 av_free_packet(&pkt);
2737 if(ret<0)
2738 goto fail;
2739 if(url_ferror(s->pb))
2740 goto fail;
2743 if(s->oformat->write_trailer)
2744 ret = s->oformat->write_trailer(s);
2745 fail:
2746 if(ret == 0)
2747 ret=url_ferror(s->pb);
2748 for(i=0;i<s->nb_streams;i++)
2749 av_freep(&s->streams[i]->priv_data);
2750 av_freep(&s->priv_data);
2751 return ret;
2754 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2756 int i, j;
2757 AVProgram *program=NULL;
2758 void *tmp;
2760 for(i=0; i<ac->nb_programs; i++){
2761 if(ac->programs[i]->id != progid)
2762 continue;
2763 program = ac->programs[i];
2764 for(j=0; j<program->nb_stream_indexes; j++)
2765 if(program->stream_index[j] == idx)
2766 return;
2768 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2769 if(!tmp)
2770 return;
2771 program->stream_index = tmp;
2772 program->stream_index[program->nb_stream_indexes++] = idx;
2773 return;
2777 /* "user interface" functions */
2778 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2780 char buf[256];
2781 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2782 AVStream *st = ic->streams[i];
2783 int g = ff_gcd(st->time_base.num, st->time_base.den);
2784 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2785 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2786 /* the pid is an important information, so we display it */
2787 /* XXX: add a generic system */
2788 if (flags & AVFMT_SHOW_IDS)
2789 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2790 if (strlen(st->language) > 0)
2791 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2792 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2793 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2794 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2795 if(st->r_frame_rate.den && st->r_frame_rate.num)
2796 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2797 /* else if(st->time_base.den && st->time_base.num)
2798 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2799 else
2800 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2802 av_log(NULL, AV_LOG_INFO, "\n");
2805 void dump_format(AVFormatContext *ic,
2806 int index,
2807 const char *url,
2808 int is_output)
2810 int i;
2812 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2813 is_output ? "Output" : "Input",
2814 index,
2815 is_output ? ic->oformat->name : ic->iformat->name,
2816 is_output ? "to" : "from", url);
2817 if (!is_output) {
2818 av_log(NULL, AV_LOG_INFO, " Duration: ");
2819 if (ic->duration != AV_NOPTS_VALUE) {
2820 int hours, mins, secs, us;
2821 secs = ic->duration / AV_TIME_BASE;
2822 us = ic->duration % AV_TIME_BASE;
2823 mins = secs / 60;
2824 secs %= 60;
2825 hours = mins / 60;
2826 mins %= 60;
2827 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2828 (100 * us) / AV_TIME_BASE);
2829 } else {
2830 av_log(NULL, AV_LOG_INFO, "N/A");
2832 if (ic->start_time != AV_NOPTS_VALUE) {
2833 int secs, us;
2834 av_log(NULL, AV_LOG_INFO, ", start: ");
2835 secs = ic->start_time / AV_TIME_BASE;
2836 us = ic->start_time % AV_TIME_BASE;
2837 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2838 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2840 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2841 if (ic->bit_rate) {
2842 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2843 } else {
2844 av_log(NULL, AV_LOG_INFO, "N/A");
2846 av_log(NULL, AV_LOG_INFO, "\n");
2848 if(ic->nb_programs) {
2849 int j, k;
2850 for(j=0; j<ic->nb_programs; j++) {
2851 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2852 ic->programs[j]->name ? ic->programs[j]->name : "");
2853 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2854 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2856 } else
2857 for(i=0;i<ic->nb_streams;i++)
2858 dump_stream_format(ic, i, index, is_output);
2861 #if LIBAVFORMAT_VERSION_MAJOR < 53
2862 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2864 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2867 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2869 AVRational frame_rate;
2870 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2871 *frame_rate_num= frame_rate.num;
2872 *frame_rate_den= frame_rate.den;
2873 return ret;
2875 #endif
2877 int64_t av_gettime(void)
2879 struct timeval tv;
2880 gettimeofday(&tv,NULL);
2881 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2884 int64_t parse_date(const char *datestr, int duration)
2886 const char *p;
2887 int64_t t;
2888 struct tm dt;
2889 int i;
2890 static const char * const date_fmt[] = {
2891 "%Y-%m-%d",
2892 "%Y%m%d",
2894 static const char * const time_fmt[] = {
2895 "%H:%M:%S",
2896 "%H%M%S",
2898 const char *q;
2899 int is_utc, len;
2900 char lastch;
2901 int negative = 0;
2903 #undef time
2904 time_t now = time(0);
2906 len = strlen(datestr);
2907 if (len > 0)
2908 lastch = datestr[len - 1];
2909 else
2910 lastch = '\0';
2911 is_utc = (lastch == 'z' || lastch == 'Z');
2913 memset(&dt, 0, sizeof(dt));
2915 p = datestr;
2916 q = NULL;
2917 if (!duration) {
2918 /* parse the year-month-day part */
2919 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2920 q = small_strptime(p, date_fmt[i], &dt);
2921 if (q) {
2922 break;
2926 /* if the year-month-day part is missing, then take the
2927 * current year-month-day time */
2928 if (!q) {
2929 if (is_utc) {
2930 dt = *gmtime(&now);
2931 } else {
2932 dt = *localtime(&now);
2934 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2935 } else {
2936 p = q;
2939 if (*p == 'T' || *p == 't' || *p == ' ')
2940 p++;
2942 /* parse the hour-minute-second part */
2943 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2944 q = small_strptime(p, time_fmt[i], &dt);
2945 if (q) {
2946 break;
2949 } else {
2950 /* parse datestr as a duration */
2951 if (p[0] == '-') {
2952 negative = 1;
2953 ++p;
2955 /* parse datestr as HH:MM:SS */
2956 q = small_strptime(p, time_fmt[0], &dt);
2957 if (!q) {
2958 /* parse datestr as S+ */
2959 dt.tm_sec = strtol(p, (char **)&q, 10);
2960 if (q == p)
2961 /* the parsing didn't succeed */
2962 return INT64_MIN;
2963 dt.tm_min = 0;
2964 dt.tm_hour = 0;
2968 /* Now we have all the fields that we can get */
2969 if (!q) {
2970 return INT64_MIN;
2973 if (duration) {
2974 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2975 } else {
2976 dt.tm_isdst = -1; /* unknown */
2977 if (is_utc) {
2978 t = mktimegm(&dt);
2979 } else {
2980 t = mktime(&dt);
2984 t *= 1000000;
2986 /* parse the .m... part */
2987 if (*q == '.') {
2988 int val, n;
2989 q++;
2990 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2991 if (!isdigit(*q))
2992 break;
2993 val += n * (*q - '0');
2995 t += val;
2997 return negative ? -t : t;
3000 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3002 const char *p;
3003 char tag[128], *q;
3005 p = info;
3006 if (*p == '?')
3007 p++;
3008 for(;;) {
3009 q = tag;
3010 while (*p != '\0' && *p != '=' && *p != '&') {
3011 if ((q - tag) < sizeof(tag) - 1)
3012 *q++ = *p;
3013 p++;
3015 *q = '\0';
3016 q = arg;
3017 if (*p == '=') {
3018 p++;
3019 while (*p != '&' && *p != '\0') {
3020 if ((q - arg) < arg_size - 1) {
3021 if (*p == '+')
3022 *q++ = ' ';
3023 else
3024 *q++ = *p;
3026 p++;
3028 *q = '\0';
3030 if (!strcmp(tag, tag1))
3031 return 1;
3032 if (*p != '&')
3033 break;
3034 p++;
3036 return 0;
3039 int av_get_frame_filename(char *buf, int buf_size,
3040 const char *path, int number)
3042 const char *p;
3043 char *q, buf1[20], c;
3044 int nd, len, percentd_found;
3046 q = buf;
3047 p = path;
3048 percentd_found = 0;
3049 for(;;) {
3050 c = *p++;
3051 if (c == '\0')
3052 break;
3053 if (c == '%') {
3054 do {
3055 nd = 0;
3056 while (isdigit(*p)) {
3057 nd = nd * 10 + *p++ - '0';
3059 c = *p++;
3060 } while (isdigit(c));
3062 switch(c) {
3063 case '%':
3064 goto addchar;
3065 case 'd':
3066 if (percentd_found)
3067 goto fail;
3068 percentd_found = 1;
3069 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3070 len = strlen(buf1);
3071 if ((q - buf + len) > buf_size - 1)
3072 goto fail;
3073 memcpy(q, buf1, len);
3074 q += len;
3075 break;
3076 default:
3077 goto fail;
3079 } else {
3080 addchar:
3081 if ((q - buf) < buf_size - 1)
3082 *q++ = c;
3085 if (!percentd_found)
3086 goto fail;
3087 *q = '\0';
3088 return 0;
3089 fail:
3090 *q = '\0';
3091 return -1;
3094 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3096 int len, i, j, c;
3097 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3099 for(i=0;i<size;i+=16) {
3100 len = size - i;
3101 if (len > 16)
3102 len = 16;
3103 PRINT("%08x ", i);
3104 for(j=0;j<16;j++) {
3105 if (j < len)
3106 PRINT(" %02x", buf[i+j]);
3107 else
3108 PRINT(" ");
3110 PRINT(" ");
3111 for(j=0;j<len;j++) {
3112 c = buf[i+j];
3113 if (c < ' ' || c > '~')
3114 c = '.';
3115 PRINT("%c", c);
3117 PRINT("\n");
3119 #undef PRINT
3122 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3124 hex_dump_internal(NULL, f, 0, buf, size);
3127 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3129 hex_dump_internal(avcl, NULL, level, buf, size);
3132 //FIXME needs to know the time_base
3133 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3135 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3136 PRINT("stream #%d:\n", pkt->stream_index);
3137 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3138 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3139 /* DTS is _always_ valid after av_read_frame() */
3140 PRINT(" dts=");
3141 if (pkt->dts == AV_NOPTS_VALUE)
3142 PRINT("N/A");
3143 else
3144 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3145 /* PTS may not be known if B-frames are present. */
3146 PRINT(" pts=");
3147 if (pkt->pts == AV_NOPTS_VALUE)
3148 PRINT("N/A");
3149 else
3150 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3151 PRINT("\n");
3152 PRINT(" size=%d\n", pkt->size);
3153 #undef PRINT
3154 if (dump_payload)
3155 av_hex_dump(f, pkt->data, pkt->size);
3158 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3160 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3163 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3165 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3168 void url_split(char *proto, int proto_size,
3169 char *authorization, int authorization_size,
3170 char *hostname, int hostname_size,
3171 int *port_ptr,
3172 char *path, int path_size,
3173 const char *url)
3175 const char *p, *ls, *at, *col, *brk;
3177 if (port_ptr) *port_ptr = -1;
3178 if (proto_size > 0) proto[0] = 0;
3179 if (authorization_size > 0) authorization[0] = 0;
3180 if (hostname_size > 0) hostname[0] = 0;
3181 if (path_size > 0) path[0] = 0;
3183 /* parse protocol */
3184 if ((p = strchr(url, ':'))) {
3185 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3186 p++; /* skip ':' */
3187 if (*p == '/') p++;
3188 if (*p == '/') p++;
3189 } else {
3190 /* no protocol means plain filename */
3191 av_strlcpy(path, url, path_size);
3192 return;
3195 /* separate path from hostname */
3196 ls = strchr(p, '/');
3197 if(!ls)
3198 ls = strchr(p, '?');
3199 if(ls)
3200 av_strlcpy(path, ls, path_size);
3201 else
3202 ls = &p[strlen(p)]; // XXX
3204 /* the rest is hostname, use that to parse auth/port */
3205 if (ls != p) {
3206 /* authorization (user[:pass]@hostname) */
3207 if ((at = strchr(p, '@')) && at < ls) {
3208 av_strlcpy(authorization, p,
3209 FFMIN(authorization_size, at + 1 - p));
3210 p = at + 1; /* skip '@' */
3213 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3214 /* [host]:port */
3215 av_strlcpy(hostname, p + 1,
3216 FFMIN(hostname_size, brk - p));
3217 if (brk[1] == ':' && port_ptr)
3218 *port_ptr = atoi(brk + 2);
3219 } else if ((col = strchr(p, ':')) && col < ls) {
3220 av_strlcpy(hostname, p,
3221 FFMIN(col + 1 - p, hostname_size));
3222 if (port_ptr) *port_ptr = atoi(col + 1);
3223 } else
3224 av_strlcpy(hostname, p,
3225 FFMIN(ls + 1 - p, hostname_size));
3229 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3231 int i;
3232 static const char hex_table[16] = { '0', '1', '2', '3',
3233 '4', '5', '6', '7',
3234 '8', '9', 'A', 'B',
3235 'C', 'D', 'E', 'F' };
3237 for(i = 0; i < s; i++) {
3238 buff[i * 2] = hex_table[src[i] >> 4];
3239 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3242 return buff;
3245 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3246 int pts_num, int pts_den)
3248 unsigned int gcd= ff_gcd(pts_num, pts_den);
3249 s->pts_wrap_bits = pts_wrap_bits;
3250 s->time_base.num = pts_num/gcd;
3251 s->time_base.den = pts_den/gcd;
3253 if(gcd>1)
3254 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);