Only futz with frame durations at render-time when using VFR.
[HandBrake.git] / libhb / render.c
blob5ca6db9325a04ede98ba9cdf32392a086f28495c
1 /* $Id: render.c,v 1.17 2005/04/14 17:37:54 titer Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.m0k.org/>.
5 It may be used under the terms of the GNU General Public License. */
7 #include "hb.h"
9 #include "ffmpeg/avcodec.h"
10 #include "ffmpeg/swscale.h"
12 struct hb_work_private_s
14 hb_job_t * job;
16 struct SwsContext * context;
17 AVPicture pic_tmp_in;
18 AVPicture pic_tmp_crop;
19 AVPicture pic_tmp_out;
20 hb_buffer_t * buf_scale;
21 hb_fifo_t * subtitle_queue;
22 hb_fifo_t * delay_queue;
23 int dropped_frames;
24 int extended_frames;
25 uint64_t last_start[4];
26 uint64_t last_stop[4];
27 uint64_t lost_time[4];
28 uint64_t total_lost_time;
29 uint64_t total_gained_time;
30 int64_t chapter_time;
31 int chapter_val;
34 int renderInit( hb_work_object_t *, hb_job_t * );
35 int renderWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
36 void renderClose( hb_work_object_t * );
38 hb_work_object_t hb_render =
40 WORK_RENDER,
41 "Renderer",
42 renderInit,
43 renderWork,
44 renderClose
48 * getU() & getV()
50 * Utility function that finds where the U is in the YUV sub-picture
52 * The Y data is at the top, followed by U and V, but the U and V
53 * are half the width of the Y, i.e. each chroma element covers 2x2
54 * of the Y's.
56 static uint8_t *getU(uint8_t *data, int width, int height, int x, int y)
58 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height)]);
61 static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
63 return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height) +
64 (width*height)/4]);
67 static void ApplySub( hb_job_t * job, hb_buffer_t * buf,
68 hb_buffer_t ** _sub )
70 hb_buffer_t * sub = *_sub;
71 hb_title_t * title = job->title;
72 int i, j, offset_top, offset_left, margin_top, margin_percent;
73 uint8_t * lum, * alpha, * out, * sub_chromaU, * sub_chromaV;
76 * Percent of height of picture that form a margin that subtitles
77 * should not be displayed within.
79 margin_percent = 2;
81 if( !sub )
83 return;
86 /*
87 * If necessary, move the subtitle so it is not in a cropped zone.
88 * When it won't fit, we center it so we lose as much on both ends.
89 * Otherwise we try to leave a 20px or 2% margin around it.
91 margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
92 margin_percent ) / 100;
94 if( margin_top > 20 )
97 * A maximum margin of 20px regardless of height of the picture.
99 margin_top = 20;
102 if( sub->height > title->height - job->crop[0] - job->crop[1] -
103 ( margin_top * 2 ) )
106 * The subtitle won't fit in the cropped zone, so center
107 * it vertically so we fit in as much as we can.
109 offset_top = job->crop[0] + ( title->height - job->crop[0] -
110 job->crop[1] - sub->height ) / 2;
112 else if( sub->y < job->crop[0] + margin_top )
115 * The subtitle fits in the cropped zone, but is currently positioned
116 * within our top margin, so move it outside of our margin.
118 offset_top = job->crop[0] + margin_top;
120 else if( sub->y > title->height - job->crop[1] - margin_top - sub->height )
123 * The subtitle fits in the cropped zone, and is not within the top
124 * margin but is within the bottom margin, so move it to be above
125 * the margin.
127 offset_top = title->height - job->crop[1] - margin_top - sub->height;
129 else
132 * The subtitle is fine where it is.
134 offset_top = sub->y;
137 if( sub->width > title->width - job->crop[2] - job->crop[3] - 40 )
138 offset_left = job->crop[2] + ( title->width - job->crop[2] -
139 job->crop[3] - sub->width ) / 2;
140 else if( sub->x < job->crop[2] + 20 )
141 offset_left = job->crop[2] + 20;
142 else if( sub->x > title->width - job->crop[3] - 20 - sub->width )
143 offset_left = title->width - job->crop[3] - 20 - sub->width;
144 else
145 offset_left = sub->x;
147 lum = sub->data;
148 alpha = lum + sub->width * sub->height;
149 sub_chromaU = alpha + sub->width * sub->height;
150 sub_chromaV = sub_chromaU + sub->width * sub->height;
152 out = buf->data + offset_top * title->width + offset_left;
154 for( i = 0; i < sub->height; i++ )
156 if( offset_top + i >= 0 && offset_top + i < title->height )
158 for( j = 0; j < sub->width; j++ )
160 if( offset_left + j >= 0 && offset_left + j < title->width )
162 uint8_t *chromaU, *chromaV;
165 * Merge the luminance and alpha with the picture
167 out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
168 (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
170 * Set the chroma (colour) based on whether there is
171 * any alpha at all. Don't try to blend with the picture.
173 chromaU = getU(buf->data, title->width, title->height,
174 offset_left+j, offset_top+i);
176 chromaV = getV(buf->data, title->width, title->height,
177 offset_left+j, offset_top+i);
179 if( alpha[j] > 0 )
182 * Add the chroma from the sub-picture, as this is
183 * not a transparent element.
185 *chromaU = sub_chromaU[j];
186 *chromaV = sub_chromaV[j];
192 lum += sub->width;
193 alpha += sub->width;
194 sub_chromaU += sub->width;
195 sub_chromaV += sub->width;
196 out += title->width;
199 hb_buffer_close( _sub );
202 int renderWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
203 hb_buffer_t ** buf_out )
205 hb_work_private_t * pv = w->private_data;
206 hb_job_t * job = pv->job;
207 hb_title_t * title = job->title;
208 hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
209 hb_buffer_t * ivtc_buffer = NULL;
211 if(!in->data)
213 /* If the input buffer is end of stream, send out an empty one
214 * to the next stage as well. Note that this will result in us
215 * losing the current contents of the delay queue.
217 *buf_out = hb_buffer_init(0);
218 return HB_WORK_OK;
222 * During the indepth_scan ditch the buffers here before applying filters or attempting to
223 * use the subtitles.
225 if( job->indepth_scan )
227 *buf_out = NULL;
228 return HB_WORK_OK;
231 /* Push subtitles onto queue just in case we need to delay a frame */
232 if( in->sub )
234 hb_fifo_push( pv->subtitle_queue, in->sub );
236 else
238 hb_fifo_push( pv->subtitle_queue, hb_buffer_init(0) );
241 /* If there's a chapter mark remember it in case we delay or drop its frame */
242 if( in->new_chap )
244 pv->chapter_time = in->start;
245 pv->chapter_val = in->new_chap;
246 in->new_chap = 0;
249 /* Setup render buffer */
250 hb_buffer_t * buf_render = hb_buffer_init( 3 * job->width * job->height / 2 );
252 /* Apply filters */
253 if( job->filters )
255 int filter_count = hb_list_count( job->filters );
256 int i;
258 for( i = 0; i < filter_count; i++ )
260 hb_filter_object_t * filter = hb_list_item( job->filters, i );
262 if( !filter )
264 continue;
267 hb_buffer_t * buf_tmp_out = NULL;
269 int result = filter->work( buf_tmp_in,
270 &buf_tmp_out,
271 PIX_FMT_YUV420P,
272 title->width,
273 title->height,
274 filter->private_data );
277 * FILTER_OK: set temp buffer to filter buffer, continue
278 * FILTER_DELAY: set temp buffer to NULL, abort
279 * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
280 * FILTER_FAILED: leave temp buffer alone, continue
282 if( result == FILTER_OK )
284 buf_tmp_in = buf_tmp_out;
286 else if( result == FILTER_DELAY )
288 buf_tmp_in = NULL;
289 break;
291 else if( result == FILTER_DROP )
293 if( job->vfr )
295 /* We need to compensate for the time lost by dropping this frame.
296 Spread its duration out in quarters, because usually dropped frames
297 maintain a 1-out-of-5 pattern and this spreads it out amongst the remaining ones.
298 Store these in the lost_time array, which has 4 slots in it.
299 Because not every frame duration divides evenly by 4, and we can't lose the
300 remainder, we have to go through an awkward process to preserve it in the 4th array index. */
301 uint64_t temp_duration = buf_tmp_out->stop - buf_tmp_out->start;
302 pv->lost_time[0] += (temp_duration / 4);
303 pv->lost_time[1] += (temp_duration / 4);
304 pv->lost_time[2] += (temp_duration / 4);
305 pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
307 pv->total_lost_time += temp_duration;
308 pv->dropped_frames++;
310 hb_fifo_get( pv->subtitle_queue );
311 buf_tmp_in = NULL;
313 else
315 buf_tmp_in = buf_tmp_out;
317 break;
322 if( buf_tmp_in )
324 /* Cache frame start and stop times, so we can renumber
325 time stamps if dropping frames for VFR. */
326 int i;
327 for( i = 3; i >= 1; i-- )
329 pv->last_start[i] = pv->last_start[i-1];
330 pv->last_stop[i] = pv->last_stop[i-1];
333 /* In order to make sure we have continuous time stamps, store
334 the current frame's duration as starting when the last one stopped. */
335 pv->last_start[0] = pv->last_stop[1];
336 pv->last_stop[0] = pv->last_start[0] + (in->stop - in->start);
339 /* Apply subtitles */
340 if( buf_tmp_in )
342 hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
343 if( subtitles )
345 ApplySub( job, buf_tmp_in, &subtitles );
349 /* Apply crop/scale if specified */
350 if( buf_tmp_in && pv->context )
352 avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
353 PIX_FMT_YUV420P,
354 title->width, title->height );
356 avpicture_fill( &pv->pic_tmp_out, buf_render->data,
357 PIX_FMT_YUV420P,
358 job->width, job->height );
360 // Crop; this alters the pointer to the data to point to the correct place for cropped frame
361 av_picture_crop( &pv->pic_tmp_crop, &pv->pic_tmp_in, PIX_FMT_YUV420P,
362 job->crop[0], job->crop[2] );
364 // Scale pic_crop into pic_render according to the context set up in renderInit
365 sws_scale(pv->context,
366 pv->pic_tmp_crop.data, pv->pic_tmp_crop.linesize,
367 0, title->height - (job->crop[0] + job->crop[1]),
368 pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
370 hb_buffer_copy_settings( buf_render, buf_tmp_in );
372 buf_tmp_in = buf_render;
375 /* Set output to render buffer */
376 (*buf_out) = buf_render;
378 if( buf_tmp_in == NULL )
380 /* Teardown and cleanup buffers if we are emitting NULL */
381 if( buf_in && *buf_in )
383 hb_buffer_close( buf_in );
384 *buf_in = NULL;
386 if( buf_out && *buf_out )
388 hb_buffer_close( buf_out );
389 *buf_out = NULL;
392 else if( buf_tmp_in != buf_render )
394 /* Copy temporary results and settings into render buffer */
395 memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
396 hb_buffer_copy_settings( buf_render, buf_tmp_in );
399 if (*buf_out)
401 hb_fifo_push( pv->delay_queue, *buf_out );
402 *buf_out = NULL;
406 * Keep the last three frames in our queue, this ensures that we have the last
407 * two always in there should we need to rewrite the durations on them.
410 if( job->vfr )
412 if( hb_fifo_size( pv->delay_queue ) >= 3 )
414 *buf_out = hb_fifo_get( pv->delay_queue );
417 else
419 *buf_out = hb_fifo_get( pv->delay_queue );
422 if( *buf_out && job->vfr)
424 /* The current frame exists. That means it hasn't been dropped by a filter.
425 Make it accessible as ivtc_buffer so we can edit its duration if needed. */
426 ivtc_buffer = *buf_out;
428 if( pv->lost_time[3] > 0 )
431 * A frame's been dropped earlier by VFR detelecine.
432 * Gotta make up the lost time. This will also
433 * slow down the video.
434 * The dropped frame's has to be accounted for, so
435 * divvy it up amongst the 4 frames left behind.
436 * This is what the delay_queue is for;
437 * telecined sequences start 2 frames before
438 * the dropped frame, so to slow down the right
439 * ones you need a 2 frame delay between
440 * reading input and writing output.
443 /* We want to extend the outputted frame's duration by the value
444 stored in the 4th slot of the lost_time array. Because we need
445 to adjust all the values in the array so they're contiguous,
446 extend the duration inside the array first, before applying
447 it to the current frame buffer. */
448 pv->last_stop[3] += pv->lost_time[3];
450 /* Log how much time has been added back in to the video. */
451 pv->total_gained_time += pv->lost_time[3];
453 /* We've pulled the 4th value from the lost_time array
454 and added it to the last_stop array's 4th slot. Now, rotate the
455 lost_time array so the 4th slot now holds the 3rd's value, and
456 so on down the line, and set the 0 index to a value of 0. */
457 int i;
458 for( i=2; i >= 0; i--)
460 pv->lost_time[i+1] = pv->lost_time[i];
462 pv->lost_time[0] = 0;
464 /* Log how many frames have had their durations extended. */
465 pv->extended_frames++;
468 /* We can't use the given time stamps. Previous frames
469 might already have been extended, throwing off the
470 raw values fed to render.c. Instead, their
471 stop and start times are stored in arrays.
472 The 4th cached frame will be the to use.
473 If it needed its duration extended to make up
474 lost time, it will have happened above. */
475 ivtc_buffer->start = pv->last_start[3];
476 ivtc_buffer->stop = pv->last_stop[3];
478 /* Set the 3rd cached frame to start when this one stops,
479 and so on down the line. If any of them need to be
480 extended as well to make up lost time, it'll be handled
481 on the next loop through the renderer. */
482 int i;
483 for (i = 2; i >= 0; i--)
485 int temp_duration = pv->last_stop[i] - pv->last_start[i];
486 pv->last_start[i] = pv->last_stop[i+1];
487 pv->last_stop[i] = pv->last_start[i] + temp_duration;
490 /* If we have a pending chapter mark and this frame is at
491 or after the time of the mark, mark this frame & clear
492 our pending mark. */
493 if( pv->chapter_time && pv->chapter_time <= ivtc_buffer->start )
495 ivtc_buffer->new_chap = pv->chapter_val;
496 pv->chapter_time = 0;
501 return HB_WORK_OK;
504 void renderClose( hb_work_object_t * w )
506 hb_work_private_t * pv = w->private_data;
508 hb_log("render: lost time: %lld (%i frames)", pv->total_lost_time, pv->dropped_frames);
509 hb_log("render: gained time: %lld (%i frames) (%lld not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
510 if (pv->dropped_frames)
511 hb_log("render: average dropped frame duration: %lld", (pv->total_lost_time / pv->dropped_frames) );
513 /* Cleanup subtitle queue */
514 if( pv->subtitle_queue )
516 hb_fifo_close( &pv->subtitle_queue );
519 if( pv->delay_queue )
521 hb_fifo_close( &pv->delay_queue );
524 /* Cleanup render work structure */
525 free( pv );
526 w->private_data = NULL;
529 int renderInit( hb_work_object_t * w, hb_job_t * job )
531 /* Allocate new private work object */
532 hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
533 pv->job = job;
534 w->private_data = pv;
536 /* Get title and title size */
537 hb_title_t * title = job->title;
539 /* If crop or scale is specified, setup rescale context */
540 if( job->crop[0] || job->crop[1] || job->crop[2] || job->crop[3] ||
541 job->width != title->width || job->height != title->height )
543 pv->context = sws_getContext(title->width - (job->crop[2] + job->crop[3]),
544 title->height - (job->crop[0] + job->crop[1]),
545 PIX_FMT_YUV420P,
546 job->width, job->height, PIX_FMT_YUV420P,
547 (uint16_t)(SWS_LANCZOS|SWS_ACCURATE_RND), NULL, NULL, NULL);
550 /* Setup FIFO queue for subtitle cache */
551 pv->subtitle_queue = hb_fifo_init( 8 );
552 pv->delay_queue = hb_fifo_init( 8 );
554 /* VFR IVTC needs a bunch of time-keeping variables to track
555 how many frames are dropped, how many are extended, what the
556 last 4 start and stop times were (so they can be modified),
557 how much time has been lost and gained overall, how much time
558 the latest 4 frames should be extended by, and where chapter
559 markers are (so they can be saved if their frames are dropped.) */
560 pv->dropped_frames = 0;
561 pv->extended_frames = 0;
562 pv->last_start[0] = 0;
563 pv->last_stop[0] = 0;
564 pv->total_lost_time = 0;
565 pv->total_gained_time = 0;
566 pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
567 pv->chapter_time = 0;
568 pv->chapter_val = 0;
570 /* Setup filters */
571 /* TODO: Move to work.c? */
572 if( job->filters )
574 int filter_count = hb_list_count( job->filters );
575 int i;
577 for( i = 0; i < filter_count; i++ )
579 hb_filter_object_t * filter = hb_list_item( job->filters, i );
581 if( !filter ) continue;
583 filter->private_data = filter->init( PIX_FMT_YUV420P,
584 title->width,
585 title->height,
586 filter->settings );
590 return 0;