3 Copyright (c) 2003-2015 HandBrake Team
4 This file is part of the HandBrake source code
5 Homepage: <http://handbrake.fr/>.
6 It may be used under the terms of the GNU General Public License v2.
7 For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
13 #include "libavutil/base64.h"
16 * Convert an hb_state_t to a jansson dict
17 * @param state - Pointer to hb_state_t to convert
19 hb_dict_t
* hb_state_to_dict( hb_state_t
* state
)
21 hb_dict_t
*dict
= NULL
;
27 dict
= json_pack_ex(&error
, 0, "{s:o}",
28 "State", hb_value_int(state
->state
));
30 case HB_STATE_SCANNING
:
31 case HB_STATE_SCANDONE
:
32 dict
= json_pack_ex(&error
, 0,
33 "{s:o, s{s:o, s:o, s:o, s:o, s:o}}",
34 "State", hb_value_int(state
->state
),
36 "Progress", hb_value_double(state
->param
.scanning
.progress
),
37 "Preview", hb_value_int(state
->param
.scanning
.preview_cur
),
38 "PreviewCount", hb_value_int(state
->param
.scanning
.preview_count
),
39 "Title", hb_value_int(state
->param
.scanning
.title_cur
),
40 "TitleCount", hb_value_int(state
->param
.scanning
.title_count
));
42 case HB_STATE_WORKING
:
44 case HB_STATE_SEARCHING
:
45 dict
= json_pack_ex(&error
, 0,
46 "{s:o, s{s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o}}",
47 "State", hb_value_int(state
->state
),
49 "Progress", hb_value_double(state
->param
.working
.progress
),
50 "PassID", hb_value_int(state
->param
.working
.pass_id
),
51 "Pass", hb_value_int(state
->param
.working
.pass
),
52 "PassCount", hb_value_int(state
->param
.working
.pass_count
),
53 "Rate", hb_value_double(state
->param
.working
.rate_cur
),
54 "RateAvg", hb_value_double(state
->param
.working
.rate_avg
),
55 "Hours", hb_value_int(state
->param
.working
.hours
),
56 "Minutes", hb_value_int(state
->param
.working
.minutes
),
57 "Seconds", hb_value_int(state
->param
.working
.seconds
),
58 "SequenceID", hb_value_int(state
->param
.working
.sequence_id
));
60 case HB_STATE_WORKDONE
:
61 dict
= json_pack_ex(&error
, 0,
63 "State", hb_value_int(state
->state
),
65 "Error", hb_value_int(state
->param
.workdone
.error
));
68 dict
= json_pack_ex(&error
, 0,
70 "State", hb_value_int(state
->state
),
72 "Progress", hb_value_double(state
->param
.muxing
.progress
));
75 hb_error("hb_state_to_json: unrecognized state %d", state
->state
);
80 hb_error("json pack failure: %s", error
.text
);
86 * Get the current state of an hb instance as a json string
87 * @param h - Pointer to an hb_handle_t hb instance
89 char* hb_get_state_json( hb_handle_t
* h
)
93 hb_get_state(h
, &state
);
94 hb_dict_t
*dict
= hb_state_to_dict(&state
);
96 char *json_state
= hb_value_get_json(dict
);
102 static hb_dict_t
* hb_title_to_dict_internal( hb_title_t
*title
)
111 dict
= json_pack_ex(&error
, 0,
113 // Type, Path, Name, Index, Playlist, AngleCount
114 "s:o, s:o, s:o, s:o, s:o, s:o,"
115 // Duration {Ticks, Hours, Minutes, Seconds}
116 "s:{s:o, s:o, s:o, s:o},"
117 // Geometry {Width, Height, PAR {Num, Den},
118 "s:{s:o, s:o, s:{s:o, s:o}},"
119 // Crop[Top, Bottom, Left, Right]}
121 // Color {Primary, Transfer, Matrix}
123 // FrameRate {Num, Den}
125 // InterlaceDetected, VideoCodec
130 "Type", hb_value_int(title
->type
),
131 "Path", hb_value_string(title
->path
),
132 "Name", hb_value_string(title
->name
),
133 "Index", hb_value_int(title
->index
),
134 "Playlist", hb_value_int(title
->playlist
),
135 "AngleCount", hb_value_int(title
->angle_count
),
137 "Ticks", hb_value_int(title
->duration
),
138 "Hours", hb_value_int(title
->hours
),
139 "Minutes", hb_value_int(title
->minutes
),
140 "Seconds", hb_value_int(title
->seconds
),
142 "Width", hb_value_int(title
->geometry
.width
),
143 "Height", hb_value_int(title
->geometry
.height
),
145 "Num", hb_value_int(title
->geometry
.par
.num
),
146 "Den", hb_value_int(title
->geometry
.par
.den
),
147 "Crop", hb_value_int(title
->crop
[0]),
148 hb_value_int(title
->crop
[1]),
149 hb_value_int(title
->crop
[2]),
150 hb_value_int(title
->crop
[3]),
152 "Primary", hb_value_int(title
->color_prim
),
153 "Transfer", hb_value_int(title
->color_transfer
),
154 "Matrix", hb_value_int(title
->color_matrix
),
156 "Num", hb_value_int(title
->vrate
.num
),
157 "Den", hb_value_int(title
->vrate
.den
),
158 "InterlaceDetected", hb_value_bool(title
->detected_interlacing
),
159 "VideoCodec", hb_value_string(title
->video_codec_name
),
164 hb_error("json pack failure: %s", error
.text
);
168 if (title
->container_name
!= NULL
)
170 hb_dict_set(dict
, "Container", hb_value_string(title
->container_name
));
174 hb_dict_t
*meta_dict
= hb_dict_get(dict
, "Metadata");
175 if (title
->metadata
->name
!= NULL
)
177 hb_dict_set(meta_dict
, "Name", hb_value_string(title
->metadata
->name
));
179 if (title
->metadata
->artist
!= NULL
)
181 hb_dict_set(meta_dict
, "Artist",
182 hb_value_string(title
->metadata
->artist
));
184 if (title
->metadata
->composer
!= NULL
)
186 hb_dict_set(meta_dict
, "Composer",
187 hb_value_string(title
->metadata
->composer
));
189 if (title
->metadata
->comment
!= NULL
)
191 hb_dict_set(meta_dict
, "Comment",
192 hb_value_string(title
->metadata
->comment
));
194 if (title
->metadata
->genre
!= NULL
)
196 hb_dict_set(meta_dict
, "Genre",
197 hb_value_string(title
->metadata
->genre
));
199 if (title
->metadata
->album
!= NULL
)
201 hb_dict_set(meta_dict
, "Album",
202 hb_value_string(title
->metadata
->album
));
204 if (title
->metadata
->album_artist
!= NULL
)
206 hb_dict_set(meta_dict
, "AlbumArtist",
207 hb_value_string(title
->metadata
->album_artist
));
209 if (title
->metadata
->description
!= NULL
)
211 hb_dict_set(meta_dict
, "Description",
212 hb_value_string(title
->metadata
->description
));
214 if (title
->metadata
->long_description
!= NULL
)
216 hb_dict_set(meta_dict
, "LongDescription",
217 hb_value_string(title
->metadata
->long_description
));
219 if (title
->metadata
->release_date
!= NULL
)
221 hb_dict_set(meta_dict
, "ReleaseDate",
222 hb_value_string(title
->metadata
->release_date
));
225 // process chapter list
226 hb_dict_t
* chapter_list
= hb_value_array_init();
227 for (ii
= 0; ii
< hb_list_count(title
->list_chapter
); ii
++)
229 hb_dict_t
*chapter_dict
;
231 hb_chapter_t
*chapter
= hb_list_item(title
->list_chapter
, ii
);
232 if (chapter
->title
!= NULL
)
233 name
= chapter
->title
;
235 chapter_dict
= json_pack_ex(&error
, 0,
236 "{s:o, s:{s:o, s:o, s:o, s:o}}",
237 "Name", hb_value_string(name
),
239 "Ticks", hb_value_int(chapter
->duration
),
240 "Hours", hb_value_int(chapter
->hours
),
241 "Minutes", hb_value_int(chapter
->minutes
),
242 "Seconds", hb_value_int(chapter
->seconds
)
244 if (chapter_dict
== NULL
)
246 hb_error("json pack failure: %s", error
.text
);
249 hb_value_array_append(chapter_list
, chapter_dict
);
251 hb_dict_set(dict
, "ChapterList", chapter_list
);
253 // process audio list
254 hb_dict_t
* audio_list
= hb_value_array_init();
255 for (ii
= 0; ii
< hb_list_count(title
->list_audio
); ii
++)
257 hb_dict_t
*audio_dict
;
258 hb_audio_t
*audio
= hb_list_item(title
->list_audio
, ii
);
260 audio_dict
= json_pack_ex(&error
, 0,
261 "{s:o, s:o, s:o, s:o, s:o, s:o, s:o}",
262 "Description", hb_value_string(audio
->config
.lang
.description
),
263 "Language", hb_value_string(audio
->config
.lang
.simple
),
264 "LanguageCode", hb_value_string(audio
->config
.lang
.iso639_2
),
265 "Codec", hb_value_int(audio
->config
.in
.codec
),
266 "SampleRate", hb_value_int(audio
->config
.in
.samplerate
),
267 "BitRate", hb_value_int(audio
->config
.in
.bitrate
),
268 "ChannelLayout", hb_value_int(audio
->config
.in
.channel_layout
));
269 if (audio_dict
== NULL
)
271 hb_error("json pack failure: %s", error
.text
);
274 hb_value_array_append(audio_list
, audio_dict
);
276 hb_dict_set(dict
, "AudioList", audio_list
);
278 // process subtitle list
279 hb_dict_t
* subtitle_list
= hb_value_array_init();
280 for (ii
= 0; ii
< hb_list_count(title
->list_subtitle
); ii
++)
282 hb_dict_t
*subtitle_dict
;
283 hb_subtitle_t
*subtitle
= hb_list_item(title
->list_subtitle
, ii
);
285 subtitle_dict
= json_pack_ex(&error
, 0,
286 "{s:o, s:o, s:o, s:o}",
287 "Format", hb_value_int(subtitle
->format
),
288 "Source", hb_value_int(subtitle
->source
),
289 "Language", hb_value_string(subtitle
->lang
),
290 "LanguageCode", hb_value_string(subtitle
->iso639_2
));
291 if (subtitle_dict
== NULL
)
293 hb_error("json pack failure: %s", error
.text
);
296 hb_value_array_append(subtitle_list
, subtitle_dict
);
298 hb_dict_set(dict
, "SubtitleList", subtitle_list
);
304 * Convert an hb_title_t to a jansson dict
305 * @param title - Pointer to the hb_title_t to convert
307 hb_dict_t
* hb_title_to_dict( hb_handle_t
*h
, int title_index
)
309 hb_title_t
*title
= hb_find_title_by_index(h
, title_index
);
310 return hb_title_to_dict_internal(title
);
314 * Convert an hb_title_set_t to a jansson dict
315 * @param title - Pointer to the hb_title_set_t to convert
317 hb_dict_t
* hb_title_set_to_dict( const hb_title_set_t
* title_set
)
323 dict
= json_pack_ex(&error
, 0,
325 "MainFeature", hb_value_int(title_set
->feature
),
327 // process title list
328 hb_dict_t
*title_list
= hb_dict_get(dict
, "TitleList");
329 for (ii
= 0; ii
< hb_list_count(title_set
->list_title
); ii
++)
331 hb_title_t
*title
= hb_list_item(title_set
->list_title
, ii
);
332 hb_dict_t
*title_dict
= hb_title_to_dict_internal(title
);
333 hb_value_array_append(title_list
, title_dict
);
340 * Convert an hb_title_t to a json string
341 * @param title - Pointer to hb_title_t to convert
343 char* hb_title_to_json( hb_handle_t
*h
, int title_index
)
345 hb_dict_t
*dict
= hb_title_to_dict(h
, title_index
);
349 char *json_title
= hb_value_get_json(dict
);
350 hb_value_free(&dict
);
356 * Get the current title set of an hb instance as a json string
357 * @param h - Pointer to hb_handle_t hb instance
359 char* hb_get_title_set_json( hb_handle_t
* h
)
361 hb_dict_t
*dict
= hb_title_set_to_dict(hb_get_title_set(h
));
363 char *json_title_set
= hb_value_get_json(dict
);
364 hb_value_free(&dict
);
366 return json_title_set
;
370 * Convert an hb_job_t to an hb_dict_t
371 * @param job - Pointer to the hb_job_t to convert
373 hb_dict_t
* hb_job_to_dict( const hb_job_t
* job
)
377 int subtitle_search_burn
;
380 if (job
== NULL
|| job
->title
== NULL
)
383 // Assumes that the UI has reduced geometry settings to only the
384 // necessary PAR value
386 subtitle_search_burn
= job
->select_subtitle_config
.dest
== RENDERSUB
;
388 dict
= json_pack_ex(&error
, 0,
392 // Destination {Mux, ChapterMarkers, ChapterList}
393 "s:{s:o, s:o, s:[]},"
394 // Source {Path, Title, Angle}
395 "s:{s:o, s:o, s:o,},"
398 // Video {Codec, QSV {Decode, AsyncDepth}}
399 "s:{s:o, s:o, s:o, s:{s:o, s:o}},"
400 // Audio {CopyMask, FallbackEncoder, AudioList []}
401 "s:{s:[], s:o, s:[]},"
402 // Subtitles {Search {Enable, Forced, Default, Burn}, SubtitleList []}
403 "s:{s:{s:o, s:o, s:o, s:o}, s:[]},"
406 // Filters {Grayscale, FilterList []}
409 "SequenceID", hb_value_int(job
->sequence_id
),
411 "Mux", hb_value_int(job
->mux
),
412 "ChapterMarkers", hb_value_bool(job
->chapter_markers
),
415 "Path", hb_value_string(job
->title
->path
),
416 "Title", hb_value_int(job
->title
->index
),
417 "Angle", hb_value_int(job
->angle
),
419 "Num", hb_value_int(job
->par
.num
),
420 "Den", hb_value_int(job
->par
.den
),
422 "Encoder", hb_value_int(job
->vcodec
),
423 "OpenCL", hb_value_bool(job
->use_opencl
),
424 "HWDecode", hb_value_bool(job
->use_hwd
),
426 "Decode", hb_value_bool(job
->qsv
.decode
),
427 "AsyncDepth", hb_value_int(job
->qsv
.async_depth
),
430 "FallbackEncoder", hb_value_int(job
->acodec_fallback
),
434 "Enable", hb_value_bool(job
->indepth_scan
),
435 "Forced", hb_value_bool(job
->select_subtitle_config
.force
),
436 "Default", hb_value_bool(job
->select_subtitle_config
.default_track
),
437 "Burn", hb_value_bool(subtitle_search_burn
),
441 "Grayscale", hb_value_bool(job
->grayscale
),
446 hb_error("json pack failure: %s", error
.text
);
449 hb_dict_t
*dest_dict
= hb_dict_get(dict
, "Destination");
450 if (job
->file
!= NULL
)
452 hb_dict_set(dest_dict
, "File", hb_value_string(job
->file
));
454 if (job
->mux
& HB_MUX_MASK_MP4
)
457 mp4_dict
= json_pack_ex(&error
, 0, "{s:o, s:o}",
458 "Mp4Optimize", hb_value_bool(job
->mp4_optimize
),
459 "IpodAtom", hb_value_bool(job
->ipod_atom
));
460 hb_dict_set(dest_dict
, "Mp4Options", mp4_dict
);
462 hb_dict_t
*source_dict
= hb_dict_get(dict
, "Source");
463 hb_dict_t
*range_dict
;
464 if (job
->start_at_preview
> 0)
466 range_dict
= json_pack_ex(&error
, 0, "{s:o, s:o, s:o, s:o}",
467 "Type", hb_value_string("preview"),
468 "Start", hb_value_int(job
->start_at_preview
),
469 "End", hb_value_int(job
->pts_to_stop
),
470 "SeekPoints", hb_value_int(job
->seek_points
));
472 else if (job
->pts_to_start
!= 0)
474 range_dict
= json_pack_ex(&error
, 0, "{s:o, s:o, s:o}",
475 "Type", hb_value_string("time"),
476 "Start", hb_value_int(job
->pts_to_start
),
477 "End", hb_value_int(job
->pts_to_stop
));
479 else if (job
->frame_to_start
!= 0)
481 range_dict
= json_pack_ex(&error
, 0, "{s:o, s:o, s:o}",
482 "Type", hb_value_string("frame"),
483 "Start", hb_value_int(job
->frame_to_start
),
484 "End", hb_value_int(job
->frame_to_stop
));
488 range_dict
= json_pack_ex(&error
, 0, "{s:o, s:o, s:o}",
489 "Type", hb_value_string("chapter"),
490 "Start", hb_value_int(job
->chapter_start
),
491 "End", hb_value_int(job
->chapter_end
));
493 hb_dict_set(source_dict
, "Range", range_dict
);
495 hb_dict_t
*video_dict
= hb_dict_get(dict
, "Video");
496 if (job
->color_matrix_code
> 0)
498 hb_dict_set(video_dict
, "ColorMatrixCode",
499 hb_value_int(job
->color_matrix_code
));
501 if (job
->vquality
>= 0)
503 hb_dict_set(video_dict
, "Quality", hb_value_double(job
->vquality
));
507 hb_dict_set(video_dict
, "Bitrate", hb_value_int(job
->vbitrate
));
508 hb_dict_set(video_dict
, "TwoPass", hb_value_bool(job
->twopass
));
509 hb_dict_set(video_dict
, "Turbo",
510 hb_value_bool(job
->fastfirstpass
));
512 if (job
->encoder_preset
!= NULL
)
514 hb_dict_set(video_dict
, "Preset",
515 hb_value_string(job
->encoder_preset
));
517 if (job
->encoder_tune
!= NULL
)
519 hb_dict_set(video_dict
, "Tune", hb_value_string(job
->encoder_tune
));
521 if (job
->encoder_profile
!= NULL
)
523 hb_dict_set(video_dict
, "Profile",
524 hb_value_string(job
->encoder_profile
));
526 if (job
->encoder_level
!= NULL
)
528 hb_dict_set(video_dict
, "Level", hb_value_string(job
->encoder_level
));
530 if (job
->encoder_options
!= NULL
)
532 hb_dict_set(video_dict
, "Options",
533 hb_value_string(job
->encoder_options
));
535 hb_dict_t
*meta_dict
= hb_dict_get(dict
, "Metadata");
536 if (job
->metadata
->name
!= NULL
)
538 hb_dict_set(meta_dict
, "Name", hb_value_string(job
->metadata
->name
));
540 if (job
->metadata
->artist
!= NULL
)
542 hb_dict_set(meta_dict
, "Artist",
543 hb_value_string(job
->metadata
->artist
));
545 if (job
->metadata
->composer
!= NULL
)
547 hb_dict_set(meta_dict
, "Composer",
548 hb_value_string(job
->metadata
->composer
));
550 if (job
->metadata
->comment
!= NULL
)
552 hb_dict_set(meta_dict
, "Comment",
553 hb_value_string(job
->metadata
->comment
));
555 if (job
->metadata
->genre
!= NULL
)
557 hb_dict_set(meta_dict
, "Genre", hb_value_string(job
->metadata
->genre
));
559 if (job
->metadata
->album
!= NULL
)
561 hb_dict_set(meta_dict
, "Album", hb_value_string(job
->metadata
->album
));
563 if (job
->metadata
->album_artist
!= NULL
)
565 hb_dict_set(meta_dict
, "AlbumArtist",
566 hb_value_string(job
->metadata
->album_artist
));
568 if (job
->metadata
->description
!= NULL
)
570 hb_dict_set(meta_dict
, "Description",
571 hb_value_string(job
->metadata
->description
));
573 if (job
->metadata
->long_description
!= NULL
)
575 hb_dict_set(meta_dict
, "LongDescription",
576 hb_value_string(job
->metadata
->long_description
));
578 if (job
->metadata
->release_date
!= NULL
)
580 hb_dict_set(meta_dict
, "ReleaseDate",
581 hb_value_string(job
->metadata
->release_date
));
584 // process chapter list
585 hb_dict_t
*chapter_list
= hb_dict_get(dest_dict
, "ChapterList");
586 for (ii
= 0; ii
< hb_list_count(job
->list_chapter
); ii
++)
588 hb_dict_t
*chapter_dict
;
590 hb_chapter_t
*chapter
= hb_list_item(job
->list_chapter
, ii
);
591 if (chapter
->title
!= NULL
)
592 title
= chapter
->title
;
594 chapter_dict
= json_pack_ex(&error
, 0, "{s:o}",
595 "Name", hb_value_string(title
));
596 hb_value_array_append(chapter_list
, chapter_dict
);
599 // process filter list
600 hb_dict_t
*filters_dict
= hb_dict_get(dict
, "Filters");
601 hb_value_array_t
*filter_list
= hb_dict_get(filters_dict
, "FilterList");
602 for (ii
= 0; ii
< hb_list_count(job
->list_filter
); ii
++)
604 hb_dict_t
*filter_dict
;
605 hb_filter_object_t
*filter
= hb_list_item(job
->list_filter
, ii
);
607 filter_dict
= json_pack_ex(&error
, 0, "{s:o}",
608 "ID", hb_value_int(filter
->id
));
609 if (filter
->settings
!= NULL
)
611 hb_dict_set(filter_dict
, "Settings",
612 hb_value_string(filter
->settings
));
615 hb_value_array_append(filter_list
, filter_dict
);
618 hb_dict_t
*audios_dict
= hb_dict_get(dict
, "Audio");
619 // Construct audio CopyMask
620 hb_value_array_t
*copy_mask
= hb_dict_get(audios_dict
, "CopyMask");
622 for (acodec
= 1; acodec
!= HB_ACODEC_PASS_FLAG
; acodec
<<= 1)
624 if (acodec
& job
->acodec_copy_mask
)
627 name
= hb_audio_encoder_get_name(acodec
| HB_ACODEC_PASS_FLAG
);
630 hb_value_t
*val
= hb_value_string(name
);
631 hb_value_array_append(copy_mask
, val
);
635 // process audio list
636 hb_dict_t
*audio_list
= hb_dict_get(audios_dict
, "AudioList");
637 for (ii
= 0; ii
< hb_list_count(job
->list_audio
); ii
++)
639 hb_dict_t
*audio_dict
;
640 hb_audio_t
*audio
= hb_list_item(job
->list_audio
, ii
);
642 audio_dict
= json_pack_ex(&error
, 0,
643 "{s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o, s:o}",
644 "Track", hb_value_int(audio
->config
.in
.track
),
645 "Encoder", hb_value_int(audio
->config
.out
.codec
),
646 "Gain", hb_value_double(audio
->config
.out
.gain
),
647 "DRC", hb_value_double(audio
->config
.out
.dynamic_range_compression
),
648 "Mixdown", hb_value_int(audio
->config
.out
.mixdown
),
649 "NormalizeMixLevel", hb_value_bool(audio
->config
.out
.normalize_mix_level
),
650 "DitherMethod", hb_value_int(audio
->config
.out
.dither_method
),
651 "Samplerate", hb_value_int(audio
->config
.out
.samplerate
),
652 "Bitrate", hb_value_int(audio
->config
.out
.bitrate
),
653 "Quality", hb_value_double(audio
->config
.out
.quality
),
654 "CompressionLevel", hb_value_double(audio
->config
.out
.compression_level
));
655 if (audio
->config
.out
.name
!= NULL
)
657 hb_dict_set(audio_dict
, "Name",
658 hb_value_string(audio
->config
.out
.name
));
661 hb_value_array_append(audio_list
, audio_dict
);
664 // process subtitle list
665 hb_dict_t
*subtitles_dict
= hb_dict_get(dict
, "Subtitle");
666 hb_dict_t
*subtitle_list
= hb_dict_get(subtitles_dict
, "SubtitleList");
667 for (ii
= 0; ii
< hb_list_count(job
->list_subtitle
); ii
++)
669 hb_dict_t
*subtitle_dict
;
670 hb_subtitle_t
*subtitle
= hb_list_item(job
->list_subtitle
, ii
);
672 if (subtitle
->source
== SRTSUB
)
674 subtitle_dict
= json_pack_ex(&error
, 0,
675 "{s:o, s:o, s:o, s:{s:o, s:o, s:o}}",
676 "Default", hb_value_bool(subtitle
->config
.default_track
),
677 "Burn", hb_value_bool(subtitle
->config
.dest
== RENDERSUB
),
678 "Offset", hb_value_int(subtitle
->config
.offset
),
680 "Filename", hb_value_string(subtitle
->config
.src_filename
),
681 "Language", hb_value_string(subtitle
->iso639_2
),
682 "Codeset", hb_value_string(subtitle
->config
.src_codeset
));
686 subtitle_dict
= json_pack_ex(&error
, 0,
687 "{s:o, s:o, s:o, s:o, s:o}",
688 "Track", hb_value_int(subtitle
->track
),
689 "Default", hb_value_bool(subtitle
->config
.default_track
),
690 "Forced", hb_value_bool(subtitle
->config
.force
),
691 "Burn", hb_value_bool(subtitle
->config
.dest
== RENDERSUB
),
692 "Offset", hb_value_int(subtitle
->config
.offset
));
694 hb_value_array_append(subtitle_list
, subtitle_dict
);
701 * Convert an hb_job_t to a json string
702 * @param job - Pointer to the hb_job_t to convert
704 char* hb_job_to_json( const hb_job_t
* job
)
706 hb_dict_t
*dict
= hb_job_to_dict(job
);
711 char *json_job
= hb_value_get_json(dict
);
712 hb_value_free(&dict
);
717 // These functions exist only to perform type checking when using
719 static double* unpack_f(double *f
) { return f
; }
720 static int* unpack_i(int *i
) { return i
; }
721 static json_int_t
* unpack_I(json_int_t
*i
) { return i
; }
722 static int * unpack_b(int *b
) { return b
; }
723 static char** unpack_s(char **s
) { return s
; }
724 static json_t
** unpack_o(json_t
** o
) { return o
; }
726 void hb_json_job_scan( hb_handle_t
* h
, const char * json_job
)
732 dict
= hb_value_json(json_job
);
734 int title_index
, use_hwd
= 0;
737 result
= json_unpack_ex(dict
, &error
, 0, "{s:{s:s, s:i}, s?{s?b}}",
739 "Path", unpack_s(&path
),
740 "Title", unpack_i(&title_index
),
742 "HWDecode", unpack_b(&use_hwd
)
746 hb_error("json unpack failure, failed to find title: %s", error
.text
);
747 hb_value_free(&dict
);
751 // If the job wants to use Hardware decode, it must also be
752 // enabled during scan. So enable it here.
753 hb_hwd_set_enable(h
, use_hwd
);
754 hb_scan(h
, path
, title_index
, 10, 0, 0);
756 // Wait for scan to complete
758 hb_get_state2(h
, &state
);
759 while (state
.state
== HB_STATE_SCANNING
)
762 hb_get_state2(h
, &state
);
764 hb_value_free(&dict
);
767 static int validate_audio_codec_mux(int codec
, int mux
, int track
)
769 const hb_encoder_t
*enc
= NULL
;
770 while ((enc
= hb_audio_encoder_get_next(enc
)) != NULL
)
772 if ((enc
->codec
== codec
) && (enc
->muxers
& mux
) == 0)
774 hb_error("track %d: incompatible encoder '%s' for muxer '%s'",
775 track
+ 1, enc
->short_name
,
776 hb_container_get_short_name(mux
));
784 * Convert a json string representation of a job to an hb_job_t
785 * @param h - Pointer to the hb_hanle_t hb instance which contains the
786 * title that the job refers to.
787 * @param json_job - Pointer to json string representation of a job
789 hb_job_t
* hb_dict_to_job( hb_handle_t
* h
, hb_dict_t
*dict
)
799 result
= json_unpack_ex(dict
, &error
, 0, "{s:{s:i}}",
800 "Source", "Title", unpack_i(&titleindex
));
803 hb_error("hb_dict_to_job: failed to find title: %s", error
.text
);
807 job
= hb_job_init_by_index(h
, titleindex
);
810 hb_error("hb_dict_to_job: Title %d doesn't exist", titleindex
);
814 hb_value_array_t
*chapter_list
= NULL
;
815 hb_value_array_t
*audio_list
= NULL
;
816 hb_value_array_t
*subtitle_list
= NULL
;
817 hb_value_array_t
*filter_list
= NULL
;
818 hb_value_t
*mux
= NULL
, *vcodec
= NULL
;
819 hb_value_t
*acodec_copy_mask
= NULL
, *acodec_fallback
= NULL
;
820 char *destfile
= NULL
;
821 char *range_type
= NULL
;
822 char *video_preset
= NULL
, *video_tune
= NULL
;
823 char *video_profile
= NULL
, *video_level
= NULL
;
824 char *video_options
= NULL
;
825 int subtitle_search_burn
= 0;
826 char *meta_name
= NULL
, *meta_artist
= NULL
, *meta_album_artist
= NULL
;
827 char *meta_release
= NULL
, *meta_comment
= NULL
, *meta_genre
= NULL
;
828 char *meta_composer
= NULL
, *meta_desc
= NULL
, *meta_long_desc
= NULL
;
829 json_int_t range_start
= -1, range_end
= -1, range_seek_points
= -1;
831 result
= json_unpack_ex(dict
, &error
, 0,
835 // Destination {File, Mux, ChapterMarkers, ChapterList,
836 // Mp4Options {Mp4Optimize, IpodAtom}}
837 "s:{s?s, s:o, s:b, s?o s?{s?b, s?b}},"
838 // Source {Angle, Range {Type, Start, End, SeekPoints}}
839 "s:{s?i, s?{s:s, s?I, s?I, s?I}},"
842 // Video {Codec, Quality, Bitrate, Preset, Tune, Profile, Level, Options
843 // TwoPass, Turbo, ColorMatrixCode,
844 // OpenCL, HWDecode, QSV {Decode, AsyncDepth}}
845 "s:{s:o, s?f, s?i, s?s, s?s, s?s, s?s, s?s,"
847 " s?b, s?b, s?{s?b, s?i}},"
848 // Audio {CopyMask, FallbackEncoder, AudioList}
850 // Subtitle {Search {Enable, Forced, Default, Burn}, SubtitleList}
851 "s?{s?{s:b, s?b, s?b, s?b}, s?o},"
852 // Metadata {Name, Artist, Composer, AlbumArtist, ReleaseDate,
853 // Comment, Genre, Description, LongDescription}
854 "s?{s?s, s?s, s?s, s?s, s?s, s?s, s?s, s?s, s?s},"
855 // Filters {FilterList}
858 "SequenceID", unpack_i(&job
->sequence_id
),
860 "File", unpack_s(&destfile
),
861 "Mux", unpack_o(&mux
),
862 "ChapterMarkers", unpack_b(&job
->chapter_markers
),
863 "ChapterList", unpack_o(&chapter_list
),
865 "Mp4Optimize", unpack_b(&job
->mp4_optimize
),
866 "IpodAtom", unpack_b(&job
->ipod_atom
),
868 "Angle", unpack_i(&job
->angle
),
870 "Type", unpack_s(&range_type
),
871 "Start", unpack_I(&range_start
),
872 "End", unpack_I(&range_end
),
873 "SeekPoints", unpack_I(&range_seek_points
),
875 "Num", unpack_i(&job
->par
.num
),
876 "Den", unpack_i(&job
->par
.den
),
878 "Encoder", unpack_o(&vcodec
),
879 "Quality", unpack_f(&job
->vquality
),
880 "Bitrate", unpack_i(&job
->vbitrate
),
881 "Preset", unpack_s(&video_preset
),
882 "Tune", unpack_s(&video_tune
),
883 "Profile", unpack_s(&video_profile
),
884 "Level", unpack_s(&video_level
),
885 "Options", unpack_s(&video_options
),
886 "TwoPass", unpack_b(&job
->twopass
),
887 "Turbo", unpack_b(&job
->fastfirstpass
),
888 "ColorMatrixCode", unpack_i(&job
->color_matrix_code
),
889 "OpenCL", unpack_b(&job
->use_opencl
),
890 "HWDecode", unpack_b(&job
->use_hwd
),
892 "Decode", unpack_b(&job
->qsv
.decode
),
893 "AsyncDepth", unpack_i(&job
->qsv
.async_depth
),
895 "CopyMask", unpack_o(&acodec_copy_mask
),
896 "FallbackEncoder", unpack_o(&acodec_fallback
),
897 "AudioList", unpack_o(&audio_list
),
900 "Enable", unpack_b(&job
->indepth_scan
),
901 "Forced", unpack_b(&job
->select_subtitle_config
.force
),
902 "Default", unpack_b(&job
->select_subtitle_config
.default_track
),
903 "Burn", unpack_b(&subtitle_search_burn
),
904 "SubtitleList", unpack_o(&subtitle_list
),
906 "Name", unpack_s(&meta_name
),
907 "Artist", unpack_s(&meta_artist
),
908 "Composer", unpack_s(&meta_composer
),
909 "AlbumArtist", unpack_s(&meta_album_artist
),
910 "ReleaseDate", unpack_s(&meta_release
),
911 "Comment", unpack_s(&meta_comment
),
912 "Genre", unpack_s(&meta_genre
),
913 "Description", unpack_s(&meta_desc
),
914 "LongDescription", unpack_s(&meta_long_desc
),
916 "Grayscale", unpack_b(&job
->grayscale
),
917 "FilterList", unpack_o(&filter_list
)
921 hb_error("hb_dict_to_job: failed to parse dict: %s", error
.text
);
926 if (hb_value_type(mux
) == HB_VALUE_TYPE_STRING
)
928 const char *s
= hb_value_get_string(mux
);
929 job
->mux
= hb_container_get_from_name(s
);
931 job
->mux
= hb_container_get_from_extension(s
);
935 job
->mux
= hb_value_get_int(mux
);
938 // Lookup video codec
939 if (hb_value_type(vcodec
) == HB_VALUE_TYPE_STRING
)
941 const char *s
= hb_value_get_string(vcodec
);
942 job
->vcodec
= hb_video_encoder_get_from_name(s
);
946 job
->vcodec
= hb_value_get_int(vcodec
);
949 if (range_type
!= NULL
)
951 if (!strcasecmp(range_type
, "preview"))
953 if (range_start
>= 0)
954 job
->start_at_preview
= range_start
;
956 job
->pts_to_stop
= range_end
;
957 if (range_seek_points
>= 0)
958 job
->seek_points
= range_seek_points
;
960 else if (!strcasecmp(range_type
, "chapter"))
962 if (range_start
>= 0)
963 job
->chapter_start
= range_start
;
965 job
->chapter_end
= range_end
;
967 else if (!strcasecmp(range_type
, "time"))
969 if (range_start
>= 0)
970 job
->pts_to_start
= range_start
;
972 job
->pts_to_stop
= range_end
;
974 else if (!strcasecmp(range_type
, "frame"))
976 if (range_start
>= 0)
977 job
->frame_to_start
= range_start
;
979 job
->frame_to_stop
= range_end
;
983 if (destfile
!= NULL
&& destfile
[0] != 0)
985 hb_job_set_file(job
, destfile
);
988 hb_job_set_encoder_preset(job
, video_preset
);
989 hb_job_set_encoder_tune(job
, video_tune
);
990 hb_job_set_encoder_profile(job
, video_profile
);
991 hb_job_set_encoder_level(job
, video_level
);
992 hb_job_set_encoder_options(job
, video_options
);
994 job
->select_subtitle_config
.dest
= subtitle_search_burn
?
995 RENDERSUB
: PASSTHRUSUB
;
996 if (meta_name
!= NULL
&& meta_name
[0] != 0)
998 hb_metadata_set_name(job
->metadata
, meta_name
);
1000 if (meta_artist
!= NULL
&& meta_artist
[0] != 0)
1002 hb_metadata_set_artist(job
->metadata
, meta_artist
);
1004 if (meta_composer
!= NULL
&& meta_composer
[0] != 0)
1006 hb_metadata_set_composer(job
->metadata
, meta_composer
);
1008 if (meta_album_artist
!= NULL
&& meta_album_artist
[0] != 0)
1010 hb_metadata_set_album_artist(job
->metadata
, meta_album_artist
);
1012 if (meta_release
!= NULL
&& meta_release
[0] != 0)
1014 hb_metadata_set_release_date(job
->metadata
, meta_release
);
1016 if (meta_comment
!= NULL
&& meta_comment
[0] != 0)
1018 hb_metadata_set_comment(job
->metadata
, meta_comment
);
1020 if (meta_genre
!= NULL
&& meta_genre
[0] != 0)
1022 hb_metadata_set_genre(job
->metadata
, meta_genre
);
1024 if (meta_desc
!= NULL
&& meta_desc
[0] != 0)
1026 hb_metadata_set_description(job
->metadata
, meta_desc
);
1028 if (meta_long_desc
!= NULL
&& meta_long_desc
[0] != 0)
1030 hb_metadata_set_long_description(job
->metadata
, meta_long_desc
);
1033 // process chapter list
1034 if (chapter_list
!= NULL
&&
1035 hb_value_type(chapter_list
) == HB_VALUE_TYPE_ARRAY
)
1038 hb_dict_t
*chapter_dict
;
1039 count
= hb_value_array_len(chapter_list
);
1040 for (ii
= 0; ii
< count
; ii
++)
1042 chapter_dict
= hb_value_array_get(chapter_list
, ii
);
1044 result
= json_unpack_ex(chapter_dict
, &error
, 0,
1045 "{s:s}", "Name", unpack_s(&name
));
1048 hb_error("hb_dict_to_job: failed to find chapter name: %s",
1052 if (name
!= NULL
&& name
[0] != 0)
1054 hb_chapter_t
*chapter
;
1055 chapter
= hb_list_item(job
->list_chapter
, ii
);
1056 if (chapter
!= NULL
)
1058 hb_chapter_set_title(chapter
, name
);
1064 // process filter list
1065 if (filter_list
!= NULL
&&
1066 hb_value_type(filter_list
) == HB_VALUE_TYPE_ARRAY
)
1069 hb_dict_t
*filter_dict
;
1070 count
= hb_value_array_len(filter_list
);
1071 for (ii
= 0; ii
< count
; ii
++)
1073 filter_dict
= hb_value_array_get(filter_list
, ii
);
1075 char *filter_settings
= NULL
;
1076 result
= json_unpack_ex(filter_dict
, &error
, 0, "{s:i, s?s}",
1077 "ID", unpack_i(&filter_id
),
1078 "Settings", unpack_s(&filter_settings
));
1081 hb_error("hb_dict_to_job: failed to find filter settings: %s",
1085 if (filter_id
>= HB_FILTER_FIRST
&& filter_id
<= HB_FILTER_LAST
)
1087 hb_filter_object_t
*filter
;
1088 filter
= hb_filter_init(filter_id
);
1089 hb_add_filter(job
, filter
, filter_settings
);
1094 // process audio list
1095 if (acodec_fallback
!= NULL
)
1097 if (hb_value_type(acodec_fallback
) == HB_VALUE_TYPE_STRING
)
1099 const char *s
= hb_value_get_string(acodec_fallback
);
1100 job
->acodec_fallback
= hb_audio_encoder_get_from_name(s
);
1104 job
->acodec_fallback
= hb_value_get_int(acodec_fallback
);
1107 if (acodec_copy_mask
!= NULL
)
1109 if (hb_value_type(acodec_copy_mask
) == HB_VALUE_TYPE_ARRAY
)
1112 count
= hb_value_array_len(acodec_copy_mask
);
1113 for (ii
= 0; ii
< count
; ii
++)
1115 hb_value_t
*value
= hb_value_array_get(acodec_copy_mask
, ii
);
1116 if (hb_value_type(value
) == HB_VALUE_TYPE_STRING
)
1118 const char *s
= hb_value_get_string(value
);
1119 job
->acodec_copy_mask
|= hb_audio_encoder_get_from_name(s
);
1123 job
->acodec_copy_mask
|= hb_value_get_int(value
);
1127 else if (hb_value_type(acodec_copy_mask
) == HB_VALUE_TYPE_STRING
)
1129 // Split the string at ','
1130 char *s
= strdup(hb_value_get_string(acodec_copy_mask
));
1132 while (cur
!= NULL
&& cur
[0] != 0)
1134 char *next
= strchr(cur
, ',');
1140 job
->acodec_copy_mask
|= hb_audio_encoder_get_from_name(cur
);
1147 job
->acodec_copy_mask
= hb_value_get_int(acodec_copy_mask
);
1150 if (audio_list
!= NULL
&& hb_value_type(audio_list
) == HB_VALUE_TYPE_ARRAY
)
1153 hb_dict_t
*audio_dict
;
1154 count
= hb_value_array_len(audio_list
);
1155 for (ii
= 0; ii
< count
; ii
++)
1157 audio_dict
= hb_value_array_get(audio_list
, ii
);
1158 hb_audio_config_t audio
;
1159 hb_value_t
*acodec
= NULL
, *samplerate
= NULL
, *mixdown
= NULL
;
1160 hb_value_t
*dither
= NULL
;
1162 hb_audio_config_init(&audio
);
1163 result
= json_unpack_ex(audio_dict
, &error
, 0,
1164 "{s:i, s?s, s?o, s?F, s?F, s?o, s?b, s?o, s?o, s?i, s?F, s?F}",
1165 "Track", unpack_i(&audio
.in
.track
),
1166 "Name", unpack_s(&audio
.out
.name
),
1167 "Encoder", unpack_o(&acodec
),
1168 "Gain", unpack_f(&audio
.out
.gain
),
1169 "DRC", unpack_f(&audio
.out
.dynamic_range_compression
),
1170 "Mixdown", unpack_o(&mixdown
),
1171 "NormalizeMixLevel", unpack_b(&audio
.out
.normalize_mix_level
),
1172 "DitherMethod", unpack_o(&dither
),
1173 "Samplerate", unpack_o(&samplerate
),
1174 "Bitrate", unpack_i(&audio
.out
.bitrate
),
1175 "Quality", unpack_f(&audio
.out
.quality
),
1176 "CompressionLevel", unpack_f(&audio
.out
.compression_level
));
1179 hb_error("hb_dict_to_job: failed to find audio settings: %s",
1185 if (hb_value_type(acodec
) == HB_VALUE_TYPE_STRING
)
1187 const char *s
= hb_value_get_string(acodec
);
1188 audio
.out
.codec
= hb_audio_encoder_get_from_name(s
);
1192 audio
.out
.codec
= hb_value_get_int(acodec
);
1195 if (mixdown
!= NULL
)
1197 if (hb_value_type(mixdown
) == HB_VALUE_TYPE_STRING
)
1199 const char *s
= hb_value_get_string(mixdown
);
1200 audio
.out
.mixdown
= hb_mixdown_get_from_name(s
);
1204 audio
.out
.mixdown
= hb_value_get_int(mixdown
);
1207 if (samplerate
!= NULL
)
1209 if (hb_value_type(samplerate
) == HB_VALUE_TYPE_STRING
)
1211 const char *s
= hb_value_get_string(samplerate
);
1212 audio
.out
.samplerate
= hb_audio_samplerate_get_from_name(s
);
1213 if (audio
.out
.samplerate
< 0)
1214 audio
.out
.samplerate
= 0;
1218 audio
.out
.samplerate
= hb_value_get_int(samplerate
);
1223 if (hb_value_type(dither
) == HB_VALUE_TYPE_STRING
)
1225 const char *s
= hb_value_get_string(dither
);
1226 audio
.out
.dither_method
= hb_audio_dither_get_from_name(s
);
1230 audio
.out
.dither_method
= hb_value_get_int(dither
);
1233 if (audio
.in
.track
>= 0)
1235 audio
.out
.track
= ii
;
1236 hb_audio_add(job
, &audio
);
1241 // Audio sanity checks
1242 int count
= hb_list_count(job
->list_audio
);
1244 for (ii
= 0; ii
< count
; ii
++)
1246 hb_audio_config_t
*acfg
;
1247 acfg
= hb_list_audio_config_item(job
->list_audio
, ii
);
1248 if (validate_audio_codec_mux(acfg
->out
.codec
, job
->mux
, ii
))
1254 // process subtitle list
1255 if (subtitle_list
!= NULL
&&
1256 hb_value_type(subtitle_list
) == HB_VALUE_TYPE_ARRAY
)
1259 hb_dict_t
*subtitle_dict
;
1260 count
= hb_value_array_len(subtitle_list
);
1261 for (ii
= 0; ii
< count
; ii
++)
1263 subtitle_dict
= hb_value_array_get(subtitle_list
, ii
);
1264 hb_subtitle_config_t sub_config
;
1267 char *srtfile
= NULL
;
1268 json_int_t offset
= 0;
1270 result
= json_unpack_ex(subtitle_dict
, &error
, 0,
1272 "Track", unpack_i(&track
),
1274 "Filename", unpack_s(&srtfile
));
1277 hb_error("json unpack failure: %s", error
.text
);
1281 // Embedded subtitle track
1282 if (track
>= 0 && srtfile
== NULL
)
1284 hb_subtitle_t
*subtitle
;
1285 subtitle
= hb_list_item(job
->title
->list_subtitle
, track
);
1286 if (subtitle
!= NULL
)
1288 sub_config
= subtitle
->config
;
1289 result
= json_unpack_ex(subtitle_dict
, &error
, 0,
1290 "{s?b, s?b, s?b, s?i}",
1291 "Default", unpack_i(&sub_config
.default_track
),
1292 "Forced", unpack_b(&sub_config
.force
),
1293 "Burn", unpack_b(&burn
),
1294 "Offset", unpack_I(&offset
));
1297 hb_error("json unpack failure: %s", error
.text
);
1301 sub_config
.offset
= offset
;
1302 sub_config
.dest
= burn
? RENDERSUB
: PASSTHRUSUB
;
1303 hb_subtitle_add(job
, &sub_config
, track
);
1306 else if (srtfile
!= NULL
)
1308 strncpy(sub_config
.src_filename
, srtfile
, 255);
1309 sub_config
.src_filename
[255] = 0;
1311 char *srtlang
= "und";
1312 char *srtcodeset
= "UTF-8";
1313 result
= json_unpack_ex(subtitle_dict
, &error
, 0,
1314 "{s?b, s?b, s?i, " // Common
1315 "s?{s?s, s?s, s?s}}", // SRT
1316 "Default", unpack_b(&sub_config
.default_track
),
1317 "Burn", unpack_b(&burn
),
1318 "Offset", unpack_I(&offset
),
1320 "Filename", unpack_s(&srtfile
),
1321 "Language", unpack_s(&srtlang
),
1322 "Codeset", unpack_s(&srtcodeset
));
1325 hb_error("json unpack failure: %s", error
.text
);
1329 sub_config
.offset
= offset
;
1330 sub_config
.dest
= burn
? RENDERSUB
: PASSTHRUSUB
;
1331 strncpy(sub_config
.src_codeset
, srtcodeset
, 39);
1332 sub_config
.src_codeset
[39] = 0;
1333 hb_srt_add(job
, &sub_config
, srtlang
);
1345 hb_job_t
* hb_json_to_job( hb_handle_t
* h
, const char * json_job
)
1347 hb_dict_t
*dict
= hb_value_json(json_job
);
1348 hb_job_t
*job
= hb_dict_to_job(h
, dict
);
1349 hb_value_free(&dict
);
1354 * Initialize an hb_job_t and return a json string representation of the job
1355 * @param h - Pointer to hb_handle_t instance that contains the
1356 * specified title_index
1357 * @param title_index - Index of hb_title_t to use for job initialization.
1358 * Index comes from title->index or "Index" key
1359 * in json representation of a title.
1361 char* hb_job_init_json(hb_handle_t
*h
, int title_index
)
1363 hb_job_t
*job
= hb_job_init_by_index(h
, title_index
);
1364 char *json_job
= hb_job_to_json(job
);
1370 * Add a json string job to the hb queue
1371 * @param h - Pointer to hb_handle_t instance that job is added to
1372 * @param json_job - json string representation of job to add
1374 int hb_add_json( hb_handle_t
* h
, const char * json_job
)
1378 job
.json
= json_job
;
1386 * Calculates destination width and height for anamorphic content
1388 * Returns geometry as json string {Width, Height, PAR {Num, Den}}
1389 * @param json_param - contains source and destination geometry params.
1390 * This encapsulates the values that are in
1391 * hb_geometry_t and hb_geometry_settings_t
1393 char* hb_set_anamorphic_size_json(const char * json_param
)
1398 hb_geometry_t geo_result
;
1400 hb_geometry_settings_t ui_geo
;
1402 // Clear dest geometry since some fields are optional.
1403 memset(&ui_geo
, 0, sizeof(ui_geo
));
1405 dict
= hb_value_json(json_param
);
1406 json_result
= json_unpack_ex(dict
, &error
, 0,
1409 // {Width, Height, PAR {Num, Den}}
1410 "s:{s:i, s:i, s:{s:i, s:i}},"
1413 // Geometry {Width, Height, PAR {Num, Den}},
1414 "s:{s:i, s:i, s:{s:i, s:i}},"
1415 // AnamorphicMode, Keep, ItuPAR, Modulus, MaxWidth, MaxHeight,
1416 "s:i, s?i, s?b, s:i, s:i, s:i,"
1417 // Crop [Top, Bottom, Left, Right]
1422 "Width", unpack_i(&src
.width
),
1423 "Height", unpack_i(&src
.height
),
1425 "Num", unpack_i(&src
.par
.num
),
1426 "Den", unpack_i(&src
.par
.den
),
1429 "Width", unpack_i(&ui_geo
.geometry
.width
),
1430 "Height", unpack_i(&ui_geo
.geometry
.height
),
1432 "Num", unpack_i(&ui_geo
.geometry
.par
.num
),
1433 "Den", unpack_i(&ui_geo
.geometry
.par
.den
),
1434 "AnamorphicMode", unpack_i(&ui_geo
.mode
),
1435 "Keep", unpack_i(&ui_geo
.keep
),
1436 "ItuPAR", unpack_b(&ui_geo
.itu_par
),
1437 "Modulus", unpack_i(&ui_geo
.modulus
),
1438 "MaxWidth", unpack_i(&ui_geo
.maxWidth
),
1439 "MaxHeight", unpack_i(&ui_geo
.maxHeight
),
1440 "Crop", unpack_i(&ui_geo
.crop
[0]),
1441 unpack_i(&ui_geo
.crop
[1]),
1442 unpack_i(&ui_geo
.crop
[2]),
1443 unpack_i(&ui_geo
.crop
[3])
1445 hb_value_free(&dict
);
1447 if (json_result
< 0)
1449 hb_error("json unpack failure: %s", error
.text
);
1453 hb_set_anamorphic_size2(&src
, &ui_geo
, &geo_result
);
1455 dict
= json_pack_ex(&error
, 0,
1456 "{s:o, s:o, s:{s:o, s:o}}",
1457 "Width", hb_value_int(geo_result
.width
),
1458 "Height", hb_value_int(geo_result
.height
),
1460 "Num", hb_value_int(geo_result
.par
.num
),
1461 "Den", hb_value_int(geo_result
.par
.den
));
1464 hb_error("hb_set_anamorphic_size_json: pack failure: %s", error
.text
);
1467 char *result
= hb_value_get_json(dict
);
1468 hb_value_free(&dict
);
1473 char* hb_get_preview_json(hb_handle_t
* h
, const char *json_param
)
1476 int ii
, title_idx
, preview_idx
, deinterlace
= 0;
1481 hb_geometry_settings_t settings
;
1483 // Clear dest geometry since some fields are optional.
1484 memset(&settings
, 0, sizeof(settings
));
1486 dict
= hb_value_json(json_param
);
1487 json_result
= json_unpack_ex(dict
, &error
, 0,
1489 // Title, Preview, Deinterlace
1493 // Geometry {Width, Height, PAR {Num, Den}},
1494 "s:{s:i, s:i, s:{s:i, s:i}},"
1495 // AnamorphicMode, Keep, ItuPAR, Modulus, MaxWidth, MaxHeight,
1496 "s:i, s?i, s?b, s:i, s:i, s:i,"
1497 // Crop [Top, Bottom, Left, Right]
1501 "Title", unpack_i(&title_idx
),
1502 "Preview", unpack_i(&preview_idx
),
1503 "Deinterlace", unpack_b(&deinterlace
),
1506 "Width", unpack_i(&settings
.geometry
.width
),
1507 "Height", unpack_i(&settings
.geometry
.height
),
1509 "Num", unpack_i(&settings
.geometry
.par
.num
),
1510 "Den", unpack_i(&settings
.geometry
.par
.den
),
1511 "AnamorphicMode", unpack_i(&settings
.mode
),
1512 "Keep", unpack_i(&settings
.keep
),
1513 "ItuPAR", unpack_b(&settings
.itu_par
),
1514 "Modulus", unpack_i(&settings
.modulus
),
1515 "MaxWidth", unpack_i(&settings
.maxWidth
),
1516 "MaxHeight", unpack_i(&settings
.maxHeight
),
1517 "Crop", unpack_i(&settings
.crop
[0]),
1518 unpack_i(&settings
.crop
[1]),
1519 unpack_i(&settings
.crop
[2]),
1520 unpack_i(&settings
.crop
[3])
1522 hb_value_free(&dict
);
1524 if (json_result
< 0)
1526 hb_error("preview params: json unpack failure: %s", error
.text
);
1530 image
= hb_get_preview2(h
, title_idx
, preview_idx
, &settings
, deinterlace
);
1536 dict
= json_pack_ex(&error
, 0,
1538 "Format", hb_value_int(image
->format
),
1539 "Width", hb_value_int(image
->width
),
1540 "Height", hb_value_int(image
->height
));
1543 hb_error("hb_get_preview_json: pack failure: %s", error
.text
);
1547 hb_value_array_t
* planes
= hb_value_array_init();
1548 for (ii
= 0; ii
< 4; ii
++)
1550 int base64size
= AV_BASE64_SIZE(image
->plane
[ii
].size
);
1551 if (image
->plane
[ii
].size
<= 0 || base64size
<= 0)
1554 char *plane_base64
= calloc(base64size
, 1);
1555 av_base64_encode(plane_base64
, base64size
,
1556 image
->plane
[ii
].data
, image
->plane
[ii
].size
);
1558 base64size
= strlen(plane_base64
);
1559 hb_dict_t
*plane_dict
;
1560 plane_dict
= json_pack_ex(&error
, 0,
1561 "{s:o, s:o, s:o, s:o, s:o, s:o}",
1562 "Width", hb_value_int(image
->plane
[ii
].width
),
1563 "Height", hb_value_int(image
->plane
[ii
].height
),
1564 "Stride", hb_value_int(image
->plane
[ii
].stride
),
1565 "HeightStride", hb_value_int(image
->plane
[ii
].height_stride
),
1566 "Size", hb_value_int(base64size
),
1567 "Data", hb_value_string(plane_base64
)
1569 if (plane_dict
== NULL
)
1571 hb_error("plane_dict: json pack failure: %s", error
.text
);
1574 hb_value_array_append(planes
, plane_dict
);
1576 hb_dict_set(dict
, "Planes", planes
);
1577 hb_image_close(&image
);
1579 char *result
= hb_value_get_json(dict
);
1580 hb_value_free(&dict
);
1585 char* hb_get_preview_params_json(int title_idx
, int preview_idx
,
1586 int deinterlace
, hb_geometry_settings_t
*settings
)
1591 dict
= json_pack_ex(&error
, 0,
1595 " s:{s:o, s:o, s:{s:o, s:o}},"
1596 " s:o, s:o, s:o, s:o, s:o, s:o"
1600 "Title", hb_value_int(title_idx
),
1601 "Preview", hb_value_int(preview_idx
),
1602 "Deinterlace", hb_value_bool(deinterlace
),
1605 "Width", hb_value_int(settings
->geometry
.width
),
1606 "Height", hb_value_int(settings
->geometry
.height
),
1608 "Num", hb_value_int(settings
->geometry
.par
.num
),
1609 "Den", hb_value_int(settings
->geometry
.par
.den
),
1610 "AnamorphicMode", hb_value_int(settings
->mode
),
1611 "Keep", hb_value_int(settings
->keep
),
1612 "ItuPAR", hb_value_bool(settings
->itu_par
),
1613 "Modulus", hb_value_int(settings
->modulus
),
1614 "MaxWidth", hb_value_int(settings
->maxWidth
),
1615 "MaxHeight", hb_value_int(settings
->maxHeight
),
1616 "Crop", hb_value_int(settings
->crop
[0]),
1617 hb_value_int(settings
->crop
[1]),
1618 hb_value_int(settings
->crop
[2]),
1619 hb_value_int(settings
->crop
[3])
1623 hb_error("hb_get_preview_params_json: pack failure: %s", error
.text
);
1627 char *result
= hb_value_get_json(dict
);
1628 hb_value_free(&dict
);
1633 hb_image_t
* hb_json_to_image(char *json_image
)
1638 int pix_fmt
, width
, height
;
1639 dict
= hb_value_json(json_image
);
1640 json_result
= json_unpack_ex(dict
, &error
, 0,
1642 // Format, Width, Height
1645 "Format", unpack_i(&pix_fmt
),
1646 "Width", unpack_i(&width
),
1647 "Height", unpack_b(&height
)
1649 if (json_result
< 0)
1651 hb_error("image: json unpack failure: %s", error
.text
);
1652 hb_value_free(&dict
);
1656 hb_image_t
*image
= hb_image_init(pix_fmt
, width
, height
);
1659 hb_value_free(&dict
);
1663 hb_value_array_t
* planes
= NULL
;
1664 json_result
= json_unpack_ex(dict
, &error
, 0,
1665 "{s:o}", "Planes", unpack_o(&planes
));
1666 if (json_result
< 0)
1668 hb_error("image::planes: json unpack failure: %s", error
.text
);
1669 hb_value_free(&dict
);
1672 if (hb_value_type(planes
) == HB_VALUE_TYPE_ARRAY
)
1675 hb_dict_t
*plane_dict
;
1676 count
= hb_value_array_len(planes
);
1677 for (ii
= 0; ii
< count
; ii
++)
1679 plane_dict
= hb_value_array_get(planes
, ii
);
1682 json_result
= json_unpack_ex(plane_dict
, &error
, 0,
1684 "Size", unpack_i(&size
),
1685 "Data", unpack_s(&data
));
1686 if (json_result
< 0)
1688 hb_error("image::plane::data: json unpack failure: %s", error
.text
);
1689 hb_value_free(&dict
);
1692 if (image
->plane
[ii
].size
> 0 && data
!= NULL
)
1694 av_base64_decode(image
->plane
[ii
].data
, data
,
1695 image
->plane
[ii
].size
);
1699 hb_value_free(&dict
);