npv:_reasonable_ "pedanticage" of the code
[nyanmp.git] / npv / video / local / code.frag.c
blob0ccccd2bcd7a94a72b231d6abba92c719ff007e4
1 STATIC void fatal(u8 *fmt, ...)
3 va_list ap;
5 npv_perr("video:");
6 va_start(ap, fmt);
7 npv_vfatal(fmt, ap);
8 va_end(ap); /* unreachable */
10 STATIC void warning(u8 *fmt, ...)
12 va_list ap;
14 npv_perr("video:");
15 va_start(ap, fmt);
16 npv_vwarning(fmt, ap);
17 va_end(ap);
19 STATIC void fatalff(u8 *fmt, ...)
21 va_list ap;
23 npv_perr("ffmpeg:");
24 va_start(ap, fmt);
25 npv_vfatal(fmt, ap);
26 va_end(ap); /* unreachable */
28 STATIC void poutff(u8 *fmt, ...)
30 va_list ap;
32 npv_pout("ffmpeg:");
33 va_start(ap, fmt);
34 npv_vpout(fmt, ap);
35 va_end(ap);
37 STATIC void fatalvk(u8 *fmt, ...)
39 va_list ap;
41 npv_perr("video:");
42 va_start(ap, fmt);
43 npv_vk_vfatal(fmt, ap);
44 va_end(ap); /* unreachable */
46 STATIC void warningvk(u8 *fmt, ...)
48 va_list ap;
50 npv_perr("video:");
51 va_start(ap, fmt);
52 npv_vk_vwarning(fmt, ap);
53 va_end(ap);
55 STATIC void init_once_local(void)
57 u8 i;
59 dec_l = 0;
60 memset(&tmp_mem_rqmts_l, 0, sizeof(tmp_mem_rqmts_l));
61 i = 0;
62 loop {
63 if (i == npv_vk_swpchn_imgs_n_max)
64 break;
65 blit_l[i].vp.width = -1;
66 blit_l[i].vp.height = -1;
67 blit_l[i].vp.top_left.x = -1;
68 blit_l[i].vp.top_left.y = -1;
69 blit_l[i].vp.top_left.z = -1;
70 blit_l[i].vp.bottom_right.x = -1;
71 blit_l[i].vp.bottom_right.y = -1;
72 blit_l[i].vp.bottom_right.z = -1;
73 ++i;
75 receive_fr_l = avutil_video_fr_ref_alloc();
77 STATIC void scaler_img_create(avutil_video_fr_ref_t *fr)
79 struct vk_img_create_info_t info;
80 s32 r;
82 memset(&info, 0, sizeof(info));
83 info.type = vk_struct_type_img_create_info;
84 info.flags = vk_img_create_flag_2d_array_compatible_bit;
85 info.img_type = vk_img_type_2d;
86 info.texel_mem_blk_fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb;
87 info.extent.width = (u32)fr->width;
88 info.extent.height = (u32)fr->height;
89 info.extent.depth = 1;
90 info.mip_lvls_n = 1;
91 info.samples_n = vk_samples_n_1_bit;
92 info.array_layers_n = 1;
93 info.img_tiling = vk_img_tiling_linear;
94 info.usage = vk_img_usage_transfer_src_bit;
95 info.initial_layout = vk_img_layout_undefined;
96 vk_create_img(&info, &scaler_p.img.vk);
97 IF_FATALVVK("%d:device:%p:unable to create scaler frame image\n", r, npv_vk_surf_p.dev.vk);
99 STATIC void img_mem_barrier_run_once(struct vk_img_mem_barrier_t *b)
101 s32 r;
102 struct vk_cb_begin_info_t begin_info;
103 struct vk_submit_info_t submit_info;
105 memset(&begin_info, 0, sizeof(begin_info));
106 begin_info.type = vk_struct_type_cb_begin_info;
107 begin_info.flags = vk_cb_usage_one_time_submit_bit;
108 /* we use the first cb which will be used for the swpchn */
109 vk_begin_cb(npv_vk_surf_p.dev.cbs[0], &begin_info);
110 IF_FATALVVK("%d:unable to begin recording the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.cbs[0]);
111 /*--------------------------------------------------------------------*/
112 vk_cmd_pl_barrier(npv_vk_surf_p.dev.cbs[0], b);
113 /*--------------------------------------------------------------------*/
114 vk_end_cb(npv_vk_surf_p.dev.cbs[0]);
115 IF_FATALVVK("%d:unable to end recording of the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.cbs[0]);
116 /*--------------------------------------------------------------------*/
117 memset(&submit_info, 0, sizeof(submit_info));
118 submit_info.type = vk_struct_type_submit_info;
119 submit_info.cbs_n = 1;
120 submit_info.cbs = &npv_vk_surf_p.dev.cbs[0];
121 vk_q_submit(&submit_info);
122 IF_FATALVVK("%d:queue:%p:unable to submit the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.q);
123 /*--------------------------------------------------------------------*/
124 vk_q_wait_idle();
125 IF_FATALVVK("%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r, npv_vk_surf_p.dev.q);
126 /*--------------------------------------------------------------------*/
128 * since it is tagged to run once its state_p is invalid, we need to
129 * reset it to the initial state_p
131 vk_reset_cb(npv_vk_surf_p.dev.cbs[0]);
132 IF_FATALVVK("%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r, npv_vk_surf_p.dev.cbs[0]);
134 /* once in general layout, the dev sees the img */
135 STATIC void scaler_img_layout_to_general(void)
137 struct vk_img_mem_barrier_t b;
138 struct vk_img_subrsrc_range_t *r;
140 memset(&b, 0, sizeof(b));
141 b.type = vk_struct_type_img_mem_barrier;
142 b.old_layout = vk_img_layout_undefined;
143 b.new_layout = vk_img_layout_general;
144 b.src_q_fam = vk_q_fam_ignored;
145 b.dst_q_fam = vk_q_fam_ignored;
146 b.img = scaler_p.img.vk;
147 r = &b.subrsrc_range;
148 r->aspect = vk_img_aspect_color_bit;
149 r->lvls_n = 1;
150 r->array_layers_n = 1;
151 img_mem_barrier_run_once(&b);
153 STATIC void scaler_img_subrsrc_layout_get(void)
155 struct vk_img_subrsrc_t s;
157 memset(&s, 0, sizeof(s));
158 /* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */
159 s.aspect = vk_img_aspect_color_bit;
160 vk_get_img_subrsrc_layout(scaler_p.img.vk, &s, &scaler_p.img.layout);
162 STATIC void tmp_scaler_img_mem_rqmts_get(void)
164 struct vk_img_mem_rqmts_info_t info;
165 struct vk_mem_rqmts_t *rqmts;
166 s32 r;
168 memset(&info, 0, sizeof(info));
169 info.type = vk_struct_type_img_mem_rqmts_info;
170 info.img = scaler_p.img.vk;
171 rqmts = &tmp_mem_rqmts_l;
172 memset(rqmts, 0, sizeof(*rqmts));
173 rqmts->type = vk_struct_type_mem_rqmts;
174 vk_get_img_mem_rqmts(&info, rqmts);
175 IF_FATALVVK("%d:device:%p:unable to get memory requirements for scaler image\n", r, npv_vk_surf_p.dev.vk);
177 #define WANTED_MEM_PROPS (vk_mem_prop_host_visible_bit \
178 | vk_mem_prop_host_cached_bit)
179 #define IS_DEV_LOCAL(x) (((x)->prop_flags & vk_mem_prop_dev_local_bit) != 0)
180 STATIC bool match_mem_type(u8 mem_type_idx,
181 struct vk_mem_rqmts_t *img_rqmts, bool ignore_gpu_is_discret)
183 struct vk_mem_type_t *mem_type;
185 /* first check this mem type is in our img rqmts */
186 if (((1 << mem_type_idx) & img_rqmts->core.mem_type_bits) == 0)
187 return false;
188 mem_type = &npv_vk_surf_p.dev.phydev.mem_types[mem_type_idx];
189 if (!ignore_gpu_is_discret)
190 if (npv_vk_surf_p.dev.phydev.is_discret_gpu
191 && IS_DEV_LOCAL(mem_type))
192 return false;
193 if ((mem_type->prop_flags & WANTED_MEM_PROPS) == WANTED_MEM_PROPS)
194 return true;
195 return false;
197 #undef WANTED_MEM_PROPS
198 #undef IS_DEV_LOCAL
199 STATIC bool try_alloc_scaler_img_dev_mem(struct vk_mem_rqmts_t *img_rqmts,
200 u8 mem_type_idx)
202 struct vk_mem_alloc_info_t info;
203 s32 r;
205 memset(&info, 0, sizeof(info));
206 info.type = vk_struct_type_mem_alloc_info;
207 info.sz = img_rqmts->core.sz;
208 info.mem_type_idx = mem_type_idx;
209 vk_alloc_mem(&info, &scaler_p.img.dev_mem);
210 if (r < 0) {
211 warningvk("%d:device:%p:unable to allocate %lu bytes from physical dev %p memory type %u\n", r, npv_vk_surf_p.dev.vk, img_rqmts->core.sz, npv_vk_surf_p.dev.phydev.vk, mem_type_idx);
212 return false;
214 return true;
217 * we are looking for host visible and host cached mem. on discret gpu we would
218 * like non dev local mem that in order to avoid wasting video ram. if we have
219 * a discret gpu but could not find a mem type without dev local mem, let's
220 * retry with only host visible and host cached mem.
222 #define IGNORE_GPU_IS_DISCRET true
223 STATIC void scaler_img_dev_mem_alloc(void)
225 struct vk_mem_rqmts_t *img_rqmts;
226 u8 mem_type;
228 img_rqmts = &tmp_mem_rqmts_l;
229 mem_type = 0;
230 loop {
231 if (mem_type == npv_vk_surf_p.dev.phydev.mem_types_n)
232 break;
233 if (match_mem_type(mem_type, img_rqmts, !IGNORE_GPU_IS_DISCRET))
234 if (try_alloc_scaler_img_dev_mem(img_rqmts, mem_type))
235 return;
236 ++mem_type;
238 if (!npv_vk_surf_p.dev.phydev.is_discret_gpu)
239 fatalvk("physical device:%p:scaler image:unable to find proper memory type or to allocate memory\n", npv_vk_surf_p.dev.phydev.vk);
241 * lookup again, but relax the match based on discret gpu constraint for
242 * gpu
244 mem_type = 0;
245 loop {
246 if (mem_type == npv_vk_surf_p.dev.phydev.mem_types_n)
247 break;
248 if (match_mem_type(mem_type, img_rqmts, IGNORE_GPU_IS_DISCRET)
249 && try_alloc_scaler_img_dev_mem(img_rqmts, mem_type))
250 return;
251 ++mem_type;
253 fatalvk("physical device:%p:unable to find proper memory type or to allocate memory\n", npv_vk_surf_p.dev.phydev.vk);
255 #undef IGNORE_GPU_IS_DISCRET
256 STATIC void scaler_img_dev_mem_bind(void)
258 struct vk_bind_img_mem_info_t info;
259 s32 r;
261 memset(&info, 0, sizeof(info) * 1);
262 info.type = vk_struct_type_bind_img_mem_info;
263 info.img = scaler_p.img.vk;
264 info.mem = scaler_p.img.dev_mem;
266 * TODO: switch to vkBindImageMemory2 if extension in vk 1.1 for
267 * consistency
269 vk_bind_img_mem(&info);
270 IF_FATALVVK("%d:device:%p:scaler image:unable to bind device memory to image\n", r, npv_vk_surf_p.dev.vk);
272 STATIC void scaler_img_dev_mem_map(void)
274 s32 r;
276 vk_map_mem(scaler_p.img.dev_mem, &scaler_p.img.data);
277 IF_FATALVVK("%d:device:%p:scaler image:unable to map image memory\n", r, npv_vk_surf_p.dev.vk);
279 STATIC void dec_a_grow(void)
281 u16 new_idx;
283 new_idx = dec_frs_p.n_max;
284 dec_frs_p.a = realloc(dec_frs_p.a, sizeof(*dec_frs_p.a)
285 * (dec_frs_p.n_max + 1));
286 if (dec_frs_p.a == 0)
287 fatal("unable to allocate memory for an additional pointer on a decoded frame reference\n");
288 dec_frs_p.priv_a = realloc(dec_frs_p.priv_a,
289 sizeof(*dec_frs_p.priv_a) * (dec_frs_p.n_max + 1));
290 if (dec_frs_p.priv_a == 0)
291 fatal("unable to allocate memory for an additional pointer on private data for decoded frames\n");
293 dec_frs_p.a[new_idx] = avutil_video_fr_ref_alloc();
294 if (dec_frs_p.a[new_idx] == 0)
295 fatal("ffmpeg:unable to allocate a decoded frame reference\n");
296 dec_frs_p.priv_a[new_idx] = calloc(1, sizeof(**dec_frs_p.priv_a));
297 if (dec_frs_p.priv_a[new_idx] == 0)
298 fatal("unable to allocate decoded frame private data\n");
300 ++dec_frs_p.n_max;
302 #define NO_FR 0
303 /* extract a fr ref, shift the a, push it back at the e, and unref its bufs */
304 STATIC void fr_drop(u16 fr)
306 struct dec_fr_priv_t *priv_save;
307 avutil_video_fr_ref_t *save;
309 priv_save = dec_frs_p.priv_a[fr];
310 if (!priv_save->was_qed_to_pe)
311 warning("dropping undisplayed frame\n");
312 save = dec_frs_p.a[fr];
313 avutil_video_fr_unref(save);
314 memset(priv_save, 0, sizeof(*priv_save));
315 if (dec_frs_p.n > 1) {
316 u16 e;
318 e = dec_frs_p.n;
319 memmove(&dec_frs_p.a[fr], &dec_frs_p.a[fr + 1],
320 sizeof(*dec_frs_p.a) * (e - (fr + 1)));
321 dec_frs_p.a[e - 1] = save;
323 memmove(&dec_frs_p.priv_a[fr], &dec_frs_p.priv_a[fr + 1],
324 sizeof(*dec_frs_p.priv_a) * (e - (fr + 1)));
325 dec_frs_p.priv_a[e - 1] = priv_save;
327 dec_frs_p.n--;
329 #undef NO_FR
330 #define NO_FR 0
331 STATIC void frs_drop(s64 now)
333 s64 low;
334 s64 threshold;
335 u16 fr;
337 /* audio can be late up to 0.25s, and audio is 'now' */
338 threshold = (250 * st_p.tb.den) / (st_p.tb.num * 1000);
339 low = now - threshold;
340 fr = 0;
341 loop {
342 s64 pts;
343 struct dec_fr_priv_t *fr_priv;
345 if (dec_frs_p.n == fr)
346 break;
348 pts = dec_frs_p.a[fr]->pts;
349 fr_priv = dec_frs_p.priv_a[fr];
351 /* keep the fr the scaler is related to */
352 if ((dec_frs_p.a[fr] != scaler_p.img.fr) && (pts < low)) {
353 if (dec_frs_p.a[fr] == last_fr_sent_to_pe_l)
354 last_fr_sent_to_pe_l = NO_FR;
355 fr_drop(fr); /* do not advance */
356 } else
357 ++fr;
360 #undef NO_FR
361 #define NO_FR 0
362 STATIC void select_fr(s64 now, avutil_video_fr_ref_t **selected_fr,
363 struct dec_fr_priv_t **selected_fr_priv)
365 u16 fr_idx;
366 u16 selected_fr_idx;
367 u64 selected_fr_delta;
369 fr_idx = 0;
370 *selected_fr = NO_FR;
371 selected_fr_delta = S64_MAX;
372 loop {
373 u64 delta;
375 if (fr_idx == dec_frs_p.n)
376 break;
377 delta = s64_abs(now - (s64)dec_frs_p.a[fr_idx]->pts);
378 if (delta < selected_fr_delta) {
379 *selected_fr = dec_frs_p.a[fr_idx];
380 *selected_fr_priv = dec_frs_p.priv_a[fr_idx];
381 selected_fr_idx = fr_idx;
382 selected_fr_delta = delta;
384 ++fr_idx;
387 #undef NO_FR
388 STATIC void frs_reset(void)
390 u16 fr;
392 fr = 0;
393 loop {
394 if (fr == dec_frs_p.n)
395 break;
396 avutil_video_fr_unref(dec_frs_p.a[fr]);
397 memset(dec_frs_p.priv_a[fr], 0, sizeof(**dec_frs_p.priv_a));
398 ++fr;
400 dec_frs_p.n = 0;
402 STATIC void scaler_img_destroy(void)
404 vk_destroy_img(scaler_p.img.vk);
405 scaler_p.img.vk = 0;
406 vk_unmap_mem(scaler_p.img.dev_mem);
407 scaler_p.img.data = 0;
408 vk_free_mem(scaler_p.img.dev_mem);
409 scaler_p.img.dev_mem = 0;
411 STATIC void blit_compute_offsets(u8 swpchn_img,
412 struct vk_extent_2d_t *new_vp)
414 struct blit_vp_t *vp;
415 s32 want_width;
416 s32 want_height;
418 vp = &blit_l[swpchn_img].vp;
420 * XXX: THE BOUNDS OF THE BLIT ARE NOT PIXEL OFFSETS! THOSE ARE
421 * INTEGER BOUNDS FOR TEXELS COORDS WHICH ARE TAKEN AT THE CENTER OF
422 * EACH PIXEL: NAMELY LAST TEXEL INTEGER BOUND = LAST PIXEL OFFSET + 1.
424 want_width = new_vp->height * aspect_ratio.width / aspect_ratio.height;
425 want_height = new_vp->width * aspect_ratio.height / aspect_ratio.width;
426 if (want_width < new_vp->width) {
427 s32 gap;
429 vp->top_left.y = 0;
430 vp->bottom_right.y = new_vp->height;
432 gap = new_vp->width - want_width;
433 vp->top_left.x = gap / 2;
434 vp->bottom_right.x = new_vp->width - gap / 2;
435 } else if (want_height < new_vp->height) {
436 s32 gap;
438 vp->top_left.x = 0;
439 vp->bottom_right.x = new_vp->width;
441 gap = new_vp->height - want_height;
442 vp->top_left.y = gap / 2;
443 vp->bottom_right.y = new_vp->height - gap / 2;
444 } else {
445 vp->top_left.x = 0;
446 vp->top_left.y = 0;
447 vp->bottom_right.x = new_vp->width;
448 vp->bottom_right.y = new_vp->height;
450 /* keep track in order to detect change */
451 vp->width = new_vp->width;
452 vp->height = new_vp->height;
454 STATIC void blit_setup(u8 swpchn_img, bool scaler_dims_changed)
456 s32 r;
457 struct vk_cb_begin_info_t begin_info;
458 struct vk_img_mem_barrier_t b;
459 struct vk_img_blit_t region;
460 struct vk_extent_2d_t *current;
461 union vk_clr_color_val_t clr_color_val;
462 struct vk_img_subrsrc_range_t range;
464 current = &npv_vk_surf_p.dev.phydev.surf_caps.core.current_extent;
466 if (!scaler_dims_changed && blit_l[swpchn_img].vp.width
467 == current->width && blit_l[swpchn_img].vp.height
468 == current->height)
469 return;
471 blit_compute_offsets(swpchn_img, current);
473 /* sync: may be in pending state? */
474 vk_reset_cb(npv_vk_surf_p.dev.cbs[swpchn_img]);
475 IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable reset\n", r, swpchn_img, npv_vk_surf_p.dev.cbs[swpchn_img]);
476 /*--------------------------------------------------------------------*/
477 memset(&begin_info, 0, sizeof(begin_info));
478 begin_info.type = vk_struct_type_cb_begin_info;
479 vk_begin_cb(npv_vk_surf_p.dev.cbs[swpchn_img], &begin_info);
480 IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r, swpchn_img, npv_vk_surf_p.dev.cbs[swpchn_img]);
481 /*--------------------------------------------------------------------*/
482 /* acquired img (undefined layout) to presentation layout */
483 memset(&b, 0, sizeof(b));
484 b.type = vk_struct_type_img_mem_barrier;
485 b.old_layout = vk_img_layout_undefined;
486 b.new_layout = vk_img_layout_present;
487 b.src_q_fam = vk_q_fam_ignored;
488 b.dst_q_fam = vk_q_fam_ignored;
489 b.img = npv_vk_surf_p.dev.swpchn.imgs[swpchn_img];
490 b.subrsrc_range.aspect = vk_img_aspect_color_bit;
491 b.subrsrc_range.lvls_n = 1;
492 b.subrsrc_range.array_layers_n = 1;
493 vk_cmd_pl_barrier(npv_vk_surf_p.dev.cbs[swpchn_img], &b);
494 /*--------------------------------------------------------------------*/
495 /* clear the viewport with integer black pixels since we work in sRGB */
496 memset(&clr_color_val, 0, sizeof(clr_color_val));
497 memset(&range, 0, sizeof(range));
498 range.aspect = vk_img_aspect_color_bit;
499 range.lvls_n = 1;
500 range.array_layers_n = 1;
501 vk_cmd_clr_color_img(npv_vk_surf_p.dev.cbs[swpchn_img],
502 npv_vk_surf_p.dev.swpchn.imgs[swpchn_img],
503 vk_img_layout_present, &clr_color_val, 1,
504 &range);
505 /*--------------------------------------------------------------------*/
506 /* blit from cpu img to pe img */
507 memset(&region, 0, sizeof(region));
508 region.src_subrsrc.aspect = vk_img_aspect_color_bit;
509 region.src_subrsrc.array_layers_n = 1;
510 /* scaler */
511 region.src_offsets[1].x = scaler_p.ctx->cfg.width;
512 region.src_offsets[1].y = scaler_p.ctx->cfg.height;
513 region.src_offsets[1].z = 1; /* see vk specs */
514 region.dst_subrsrc.aspect = vk_img_aspect_color_bit;
515 region.dst_subrsrc.array_layers_n = 1;
516 /* xcb viewport */
517 memcpy(&region.dst_offsets[0], &blit_l[swpchn_img].vp.top_left,
518 sizeof(region.dst_offsets[0]));
519 region.dst_offsets[0].z = 0; /* see vk specs */
520 memcpy(&region.dst_offsets[1], &blit_l[swpchn_img].vp.bottom_right,
521 sizeof(region.dst_offsets[1]));
522 region.dst_offsets[1].z = 1; /* see vk specs */
523 vk_cmd_blit_img(npv_vk_surf_p.dev.cbs[swpchn_img], scaler_p.img.vk,
524 npv_vk_surf_p.dev.swpchn.imgs[swpchn_img], &region);
525 /*--------------------------------------------------------------------*/
526 vk_end_cb(npv_vk_surf_p.dev.cbs[swpchn_img]);
527 IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r, swpchn_img, npv_vk_surf_p.dev.cbs[swpchn_img]);
529 #define READY 0
530 #define NOT_READY 1
531 STATIC u8 swpchn_next_img(u32 *swpchn_img) { loop
533 struct vk_acquire_next_img_info_t info;
534 s32 r;
536 memset(&info, 0, sizeof(info));
537 info.type = vk_struct_type_acquire_next_img_info;
538 info.swpchn = npv_vk_surf_p.dev.swpchn.vk;
539 info.timeout = 0;
540 info.devs = 0x00000001; /* no device group then 1 */
541 info.sem = npv_vk_surf_p.dev.sems[npv_vk_sem_acquire_img_done];
542 vk_acquire_next_img(&info, swpchn_img);
543 if (r == vk_not_ready)
544 return NOT_READY;
545 else if (r == vk_out_of_date || r == vk_suboptimal) {
546 npv_vk_swpchn_update();
547 continue;
548 } else if (r >= 0)
549 return READY;
550 npv_vk_fatal("%d:device:%p:unable to acquire next image from swapchain %p\n", r, npv_vk_surf_p.dev.vk, npv_vk_surf_p.dev.swpchn.vk);
552 #undef READY
553 #undef NOT_READY
554 #define SENT 0
555 #define SWPCHN_UPDATED 1
556 STATIC u8 send_to_pe(u32 swpchn_img)
558 struct vk_submit_info_t submit_info;
559 struct vk_present_info_t present_info;
560 u32 wait_dst_stage;
561 s32 r;
562 u32 idxs[1];
563 /* run the command buffer and do present queue */
564 /*--------------------------------------------------------------------*/
565 memset(&submit_info, 0, sizeof(submit_info));
566 submit_info.type = vk_struct_type_submit_info;
567 submit_info.wait_sems_n = 1;
568 submit_info.wait_sems =
569 &npv_vk_surf_p.dev.sems[npv_vk_sem_acquire_img_done];
570 wait_dst_stage = vk_pl_stage_bottom_of_pipe_bit;
571 submit_info.wait_dst_stages = &wait_dst_stage;
572 submit_info.cbs_n = 1;
573 submit_info.cbs = &npv_vk_surf_p.dev.cbs[swpchn_img];
574 submit_info.signal_sems_n = 1;
575 submit_info.signal_sems = &npv_vk_surf_p.dev.sems[npv_vk_sem_blit_done];
576 vk_q_submit(&submit_info);
577 IF_FATALVVK("%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r, npv_vk_surf_p.dev.q);
578 /*--------------------------------------------------------------------*/
579 idxs[0] = swpchn_img;
580 memset(&present_info, 0, sizeof(present_info));
581 present_info.type = vk_struct_type_present_info;
582 present_info.wait_sems_n = 1;
583 present_info.wait_sems = &npv_vk_surf_p.dev.sems[npv_vk_sem_blit_done];
584 present_info.swpchns_n = 1;
585 present_info.swpchns = &npv_vk_surf_p.dev.swpchn.vk;
586 present_info.idxs = idxs;
587 present_info.results = 0;
588 vk_q_present(&present_info);
589 if (r == vk_out_of_date || r == vk_suboptimal) {
590 npv_vk_swpchn_update();
591 return SWPCHN_UPDATED;
593 IF_FATALVVK("%d:queue:%p:unable to submit the image %u to the presentation engine\n", r, npv_vk_surf_p.dev.q, swpchn_img);
594 return SENT;
596 #undef SENT
597 #undef SWPCHN_UPDATED
598 STATIC void start_scaling(avutil_video_fr_ref_t *fr,
599 struct dec_fr_priv_t *fr_priv, bool *scaler_dims_changed)
601 u32 scaled_line_bytes_n;
603 if (scaler_p.ctx->cfg.width != fr->width
604 || scaler_p.ctx->cfg.height != fr->height) {
605 if (scaler_p.img.vk != 0)
606 scaler_img_destroy();
607 scaler_img_create(fr);
608 scaler_img_layout_to_general();
609 scaler_img_subrsrc_layout_get();
610 tmp_scaler_img_mem_rqmts_get();
611 scaler_img_dev_mem_alloc();
612 scaler_img_dev_mem_bind();
613 scaler_img_dev_mem_map();
615 *scaler_dims_changed = true;
616 scaler_p.ctx->cfg.width = fr->width;
617 scaler_p.ctx->cfg.height = fr->height;
618 } else
619 *scaler_dims_changed = false;
620 scaler_p.ctx->cfg.src_fmt = fr->fmt;
621 scaler_p.ctx->cfg.dst_fmt = AVUTIL_PIX_FMT_RGB32;
622 scaler_p.ctx->cfg.flags = SWS_POINT; /* | SWS_PRINT_INFO */
624 scaled_line_bytes_n = (u32)scaler_p.img.layout.row_pitch;
625 scaler_p.ctx->scale.src_slices = fr->data;
626 scaler_p.ctx->scale.src_strides = fr->linesize;
627 scaler_p.ctx->scale.dst_slice = scaler_p.img.data;
628 scaler_p.ctx->scale.dst_stride = scaled_line_bytes_n;
629 npv_thdsws_run(scaler_p.ctx);
630 scaler_p.img.fr = fr;
632 STATIC void timer_ack(void)
634 int r;
635 uint64_t exps_n;
637 exps_n = 0;
638 r = read(timer_fd_p, &exps_n, sizeof(exps_n));
639 if (r == -1)
640 fatal("unable to read the number of timer expirations\n");