1 static void init_once_local(void)
6 memset(&tmp_mem_rqmts_l
, 0, sizeof(tmp_mem_rqmts_l
));
9 if (i
== npv_vk_swpchn_imgs_n_max
)
11 blit_l
[i
].vp
.width
= -1;
12 blit_l
[i
].vp
.height
= -1;
13 blit_l
[i
].vp
.top_left
.x
= -1;
14 blit_l
[i
].vp
.top_left
.y
= -1;
15 blit_l
[i
].vp
.top_left
.z
= -1;
16 blit_l
[i
].vp
.bottom_right
.x
= -1;
17 blit_l
[i
].vp
.bottom_right
.y
= -1;
18 blit_l
[i
].vp
.bottom_right
.z
= -1;
21 receive_fr
= avutil_video_fr_ref_alloc();
23 static void scaler_img_create(avutil_video_fr_ref_t
*fr
)
25 struct vk_img_create_info_t info
;
28 memset(&info
, 0, sizeof(info
));
29 info
.type
= vk_struct_type_img_create_info
;
30 info
.flags
= vk_img_create_flag_2d_array_compatible_bit
;
31 info
.img_type
= vk_img_type_2d
;
32 info
.texel_mem_blk_fmt
= vk_texel_mem_blk_fmt_b8g8r8a8_srgb
;
33 info
.extent
.width
= (u32
)fr
->width
;
34 info
.extent
.height
= (u32
)fr
->height
;
35 info
.extent
.depth
= 1;
37 info
.samples_n
= vk_samples_n_1_bit
;
38 info
.array_layers_n
= 1;
39 info
.img_tiling
= vk_img_tiling_linear
;
40 info
.usage
= vk_img_usage_transfer_src_bit
;
41 info
.initial_layout
= vk_img_layout_undefined
;
42 vk_create_img(&info
, &scaler_p
.img
.vk
);
43 IF_FATALVVK("%d:device:%p:unable to create scaler frame image\n", r
, npv_vk_surf_p
.dev
.vk
);
45 static void img_mem_barrier_run_once(struct vk_img_mem_barrier_t
*b
)
48 struct vk_cb_begin_info_t begin_info
;
49 struct vk_submit_info_t submit_info
;
51 memset(&begin_info
, 0, sizeof(begin_info
));
52 begin_info
.type
= vk_struct_type_cb_begin_info
;
53 begin_info
.flags
= vk_cb_usage_one_time_submit_bit
;
54 /* we use the first cb which will be used for the swpchn */
55 vk_begin_cb(npv_vk_surf_p
.dev
.cbs
[0], &begin_info
);
56 IF_FATALVVK("%d:unable to begin recording the initial layout transition command buffer\n", r
, npv_vk_surf_p
.dev
.cbs
[0]);
57 /*--------------------------------------------------------------------*/
58 vk_cmd_pl_barrier(npv_vk_surf_p
.dev
.cbs
[0], b
);
59 /*--------------------------------------------------------------------*/
60 vk_end_cb(npv_vk_surf_p
.dev
.cbs
[0]);
61 IF_FATALVVK("%d:unable to end recording of the initial layout transition command buffer\n", r
, npv_vk_surf_p
.dev
.cbs
[0]);
62 /*--------------------------------------------------------------------*/
63 memset(&submit_info
, 0, sizeof(submit_info
));
64 submit_info
.type
= vk_struct_type_submit_info
;
65 submit_info
.cbs_n
= 1;
66 submit_info
.cbs
= &npv_vk_surf_p
.dev
.cbs
[0];
67 vk_q_submit(&submit_info
);
68 IF_FATALVVK("%d:queue:%p:unable to submit the initial layout transition command buffer\n", r
, npv_vk_surf_p
.dev
.q
);
69 /*--------------------------------------------------------------------*/
71 IF_FATALVVK("%d:queue:%p:unable to wait for idle or completion of initial layout transition command buffer\n", r
, npv_vk_surf_p
.dev
.q
);
72 /*--------------------------------------------------------------------*/
74 * since it is tagged to run once its state_p is invalid, we need to
75 * reset it to the initial state_p
77 vk_reset_cb(npv_vk_surf_p
.dev
.cbs
[0]);
78 IF_FATALVVK("%d:command buffer:%p:unable to reset the initial layout transition command buffer\n", r
, npv_vk_surf_p
.dev
.cbs
[0]);
80 /* once in general layout, the dev sees the img */
81 static void scaler_img_layout_to_general(void)
83 struct vk_img_mem_barrier_t b
;
84 struct vk_img_subrsrc_range_t
*r
;
86 memset(&b
, 0, sizeof(b
));
87 b
.type
= vk_struct_type_img_mem_barrier
;
88 b
.old_layout
= vk_img_layout_undefined
;
89 b
.new_layout
= vk_img_layout_general
;
90 b
.src_q_fam
= vk_q_fam_ignored
;
91 b
.dst_q_fam
= vk_q_fam_ignored
;
92 b
.img
= scaler_p
.img
.vk
;
94 r
->aspect
= vk_img_aspect_color_bit
;
96 r
->array_layers_n
= 1;
97 img_mem_barrier_run_once(&b
);
99 static void scaler_img_subrsrc_layout_get(void)
101 struct vk_img_subrsrc_t s
;
103 memset(&s
, 0, sizeof(s
));
104 /* 1 subrsrc = uniq color plane of mip lvl 0 and array 0 */
105 s
.aspect
= vk_img_aspect_color_bit
;
106 vk_get_img_subrsrc_layout(scaler_p
.img
.vk
, &s
, &scaler_p
.img
.layout
);
108 static void tmp_scaler_img_mem_rqmts_get(void)
110 struct vk_img_mem_rqmts_info_t info
;
111 struct vk_mem_rqmts_t
*rqmts
;
114 memset(&info
, 0, sizeof(info
));
115 info
.type
= vk_struct_type_img_mem_rqmts_info
;
116 info
.img
= scaler_p
.img
.vk
;
117 rqmts
= &tmp_mem_rqmts_l
;
118 memset(rqmts
, 0, sizeof(*rqmts
));
119 rqmts
->type
= vk_struct_type_mem_rqmts
;
120 vk_get_img_mem_rqmts(&info
, rqmts
);
121 IF_FATALVVK("%d:device:%p:unable to get memory requirements for scaler image\n", r
, npv_vk_surf_p
.dev
.vk
);
123 #define WANTED_MEM_PROPS (vk_mem_prop_host_visible_bit \
124 | vk_mem_prop_host_cached_bit)
125 #define IS_DEV_LOCAL(x) (((x)->prop_flags & vk_mem_prop_dev_local_bit) != 0)
126 static bool match_mem_type(u8 mem_type_idx
,
127 struct vk_mem_rqmts_t
*img_rqmts
, bool ignore_gpu_is_discret
)
129 struct vk_mem_type_t
*mem_type
;
131 /* first check this mem type is in our img rqmts */
132 if (((1 << mem_type_idx
) & img_rqmts
->core
.mem_type_bits
) == 0)
134 mem_type
= &npv_vk_surf_p
.dev
.phydev
.mem_types
[mem_type_idx
];
135 if (!ignore_gpu_is_discret
)
136 if (npv_vk_surf_p
.dev
.phydev
.is_discret_gpu
137 && IS_DEV_LOCAL(mem_type
))
139 if ((mem_type
->prop_flags
& WANTED_MEM_PROPS
) == WANTED_MEM_PROPS
)
143 #undef WANTED_MEM_PROPS
145 static bool try_alloc_scaler_img_dev_mem(struct vk_mem_rqmts_t
*img_rqmts
,
148 struct vk_mem_alloc_info_t info
;
151 memset(&info
, 0, sizeof(info
));
152 info
.type
= vk_struct_type_mem_alloc_info
;
153 info
.sz
= img_rqmts
->core
.sz
;
154 info
.mem_type_idx
= mem_type_idx
;
155 vk_alloc_mem(&info
, &scaler_p
.img
.dev_mem
);
157 WARNINGVVK("%d:device:%p:unable to allocate %lu bytes from physical dev %p memory type %u\n", r
, npv_vk_surf_p
.dev
.vk
, img_rqmts
->core
.sz
, npv_vk_surf_p
.dev
.phydev
.vk
, mem_type_idx
);
160 //POUTVVK("device:%p:physical device:%p:scaler queue image:%u:%lu bytes allocated from memory type %u\n", npv_vk_surf_p.dev.vk, npv_vk_surf_p.dev.phydev.vk, i, img_rqmts->core.sz, mem_type_idx);
164 * we are looking for host visible and host cached mem. on discret gpu we would
165 * like non dev local mem that in order to avoid wasting video ram. if we have
166 * a discret gpu but could not find a mem type without dev local mem, let's
167 * retry with only host visible and host cached mem.
169 #define IGNORE_GPU_IS_DISCRET true
170 static void scaler_img_dev_mem_alloc(void)
172 struct vk_mem_rqmts_t
*img_rqmts
;
175 img_rqmts
= &tmp_mem_rqmts_l
;
178 if (mem_type
== npv_vk_surf_p
.dev
.phydev
.mem_types_n
)
180 if (match_mem_type(mem_type
, img_rqmts
, !IGNORE_GPU_IS_DISCRET
))
181 if (try_alloc_scaler_img_dev_mem(img_rqmts
, mem_type
))
185 if (!npv_vk_surf_p
.dev
.phydev
.is_discret_gpu
)
186 FATALVVK("physical device:%p:scaler image:unable to find proper memory type or to allocate memory\n", npv_vk_surf_p
.dev
.phydev
.vk
);
188 * lookup again, but relax the match based on discret gpu constraint for
193 if (mem_type
== npv_vk_surf_p
.dev
.phydev
.mem_types_n
)
195 if (match_mem_type(mem_type
, img_rqmts
, IGNORE_GPU_IS_DISCRET
)
196 && try_alloc_scaler_img_dev_mem(img_rqmts
, mem_type
))
200 FATALVVK("physical device:%p:unable to find proper memory type or to allocate memory\n", npv_vk_surf_p
.dev
.phydev
.vk
);
202 #undef IGNORE_GPU_IS_DISCRET
203 static void scaler_img_dev_mem_bind(void)
205 struct vk_bind_img_mem_info_t info
;
208 memset(&info
, 0, sizeof(info
) * 1);
209 info
.type
= vk_struct_type_bind_img_mem_info
;
210 info
.img
= scaler_p
.img
.vk
;
211 info
.mem
= scaler_p
.img
.dev_mem
;
213 * TODO: switch to vkBindImageMemory2 if extension in vk 1.1 for
216 vk_bind_img_mem(&info
);
217 IF_FATALVVK("%d:device:%p:scaler image:unable to bind device memory to image\n", r
, npv_vk_surf_p
.dev
.vk
);
219 static void scaler_img_dev_mem_map(void)
223 vk_map_mem(scaler_p
.img
.dev_mem
, &scaler_p
.img
.data
);
224 IF_FATALVVK("%d:device:%p:scaler image:unable to map image memory\n", r
, npv_vk_surf_p
.dev
.vk
);
226 static void dec_a_grow(void)
230 new_idx
= dec_frs_p
.n_max
;
231 dec_frs_p
.a
= realloc(dec_frs_p
.a
, sizeof(*dec_frs_p
.a
)
232 * (dec_frs_p
.n_max
+ 1));
233 if (dec_frs_p
.a
== 0)
234 FATALV("unable to allocate memory for an additional pointer on a decoded frame reference\n");
235 dec_frs_p
.priv_a
= realloc(dec_frs_p
.priv_a
,
236 sizeof(*dec_frs_p
.priv_a
) * (dec_frs_p
.n_max
+ 1));
237 if (dec_frs_p
.priv_a
== 0)
238 FATALV("unable to allocate memory for an additional pointer on private data for decoded frames\n");
240 dec_frs_p
.a
[new_idx
] = avutil_video_fr_ref_alloc();
241 if (dec_frs_p
.a
[new_idx
] == 0)
242 FATALV("ffmpeg:unable to allocate a decoded frame reference\n");
243 dec_frs_p
.priv_a
[new_idx
] = calloc(1, sizeof(**dec_frs_p
.priv_a
));
244 if (dec_frs_p
.priv_a
[new_idx
] == 0)
245 FATALV("unable to allocate decoded frame private data\n");
250 /* extract a fr ref, shift the a, push it back at the e, and unref its bufs */
251 static void fr_drop(u16 fr
)
253 struct dec_fr_priv_t
*priv_save
;
254 avutil_video_fr_ref_t
*save
;
256 priv_save
= dec_frs_p
.priv_a
[fr
];
257 if (!priv_save
->was_qed_to_pe
)
258 WARNINGV("dropping undisplayed frame\n");
259 save
= dec_frs_p
.a
[fr
];
260 avutil_video_fr_unref(save
);
261 memset(priv_save
, 0, sizeof(*priv_save
));
262 if (dec_frs_p
.n
> 1) {
266 memmove(&dec_frs_p
.a
[fr
], &dec_frs_p
.a
[fr
+ 1],
267 sizeof(*dec_frs_p
.a
) * (e
- (fr
+ 1)));
268 dec_frs_p
.a
[e
- 1] = save
;
270 memmove(&dec_frs_p
.priv_a
[fr
], &dec_frs_p
.priv_a
[fr
+ 1],
271 sizeof(*dec_frs_p
.priv_a
) * (e
- (fr
+ 1)));
272 dec_frs_p
.priv_a
[e
- 1] = priv_save
;
278 static void frs_drop(s64 now
)
284 /* audio can be late up to 0.25s, and audio is 'now' */
285 threshold
= (250 * st_p
.tb
.den
) / (st_p
.tb
.num
* 1000);
286 low
= now
- threshold
;
290 struct dec_fr_priv_t
*fr_priv
;
292 if (dec_frs_p
.n
== fr
)
295 pts
= dec_frs_p
.a
[fr
]->pts
;
296 fr_priv
= dec_frs_p
.priv_a
[fr
];
298 /* keep the fr the scaler is related to */
299 if ((dec_frs_p
.a
[fr
] != scaler_p
.img
.fr
) && (pts
< low
)) {
300 if (dec_frs_p
.a
[fr
] == last_fr_sent_to_pe
)
301 last_fr_sent_to_pe
= NO_FR
;
302 fr_drop(fr
); /* do not advance */
309 static void select_fr(s64 now
, avutil_video_fr_ref_t
**selected_fr
,
310 struct dec_fr_priv_t
**selected_fr_priv
)
314 u64 selected_fr_delta
;
317 *selected_fr
= NO_FR
;
318 selected_fr_delta
= S64_MAX
;
322 if (fr_idx
== dec_frs_p
.n
)
324 delta
= s64_abs(now
- (s64
)dec_frs_p
.a
[fr_idx
]->pts
);
325 if (delta
< selected_fr_delta
) {
326 *selected_fr
= dec_frs_p
.a
[fr_idx
];
327 *selected_fr_priv
= dec_frs_p
.priv_a
[fr_idx
];
328 selected_fr_idx
= fr_idx
;
329 selected_fr_delta
= delta
;
335 static void frs_reset(void)
341 if (fr
== dec_frs_p
.n
)
343 avutil_video_fr_unref(dec_frs_p
.a
[fr
]);
344 memset(dec_frs_p
.priv_a
[fr
], 0, sizeof(**dec_frs_p
.priv_a
));
349 static void scaler_img_destroy(void)
351 vk_destroy_img(scaler_p
.img
.vk
);
353 vk_unmap_mem(scaler_p
.img
.dev_mem
);
354 scaler_p
.img
.data
= 0;
355 vk_free_mem(scaler_p
.img
.dev_mem
);
356 scaler_p
.img
.dev_mem
= 0;
358 static void blit_compute_offsets(u8 swpchn_img
,
359 struct vk_extent_2d_t
*new_vp
)
361 struct blit_vp_t
*vp
;
365 vp
= &blit_l
[swpchn_img
].vp
;
367 * XXX: THE BOUNDS OF THE BLIT ARE NOT PIXEL OFFSETS! THOSE ARE
368 * INTEGER BOUNDS FOR TEXELS COORDS WHICH ARE TAKEN AT THE CENTER OF
369 * EACH PIXEL: NAMELY LAST TEXEL INTEGER BOUND = LAST PIXEL OFFSET + 1.
371 want_width
= new_vp
->height
* aspect
.width
/ aspect
.height
;
372 want_height
= new_vp
->width
* aspect
.height
/ aspect
.width
;
373 if (want_width
< new_vp
->width
) {
377 vp
->bottom_right
.y
= new_vp
->height
;
379 gap
= new_vp
->width
- want_width
;
380 vp
->top_left
.x
= gap
/ 2;
381 vp
->bottom_right
.x
= new_vp
->width
- gap
/ 2;
382 } else if (want_height
< new_vp
->height
) {
386 vp
->bottom_right
.x
= new_vp
->width
;
388 gap
= new_vp
->height
- want_height
;
389 vp
->top_left
.y
= gap
/ 2;
390 vp
->bottom_right
.y
= new_vp
->height
- gap
/ 2;
394 vp
->bottom_right
.x
= new_vp
->width
;
395 vp
->bottom_right
.y
= new_vp
->height
;
397 /* keep track in order to detect change */
398 vp
->width
= new_vp
->width
;
399 vp
->height
= new_vp
->height
;
401 static void blit_setup(u8 swpchn_img
, bool scaler_dims_changed
)
404 struct vk_cb_begin_info_t begin_info
;
405 struct vk_img_mem_barrier_t b
;
406 struct vk_img_blit_t region
;
407 struct vk_extent_2d_t
*current
;
408 union vk_clr_color_val_t clr_color_val
;
409 struct vk_img_subrsrc_range_t range
;
411 current
= &npv_vk_surf_p
.dev
.phydev
.surf_caps
.core
.current_extent
;
413 if (!scaler_dims_changed
&& blit_l
[swpchn_img
].vp
.width
414 == current
->width
&& blit_l
[swpchn_img
].vp
.height
418 blit_compute_offsets(swpchn_img
, current
);
420 /* sync: may be in pending state? */
421 vk_reset_cb(npv_vk_surf_p
.dev
.cbs
[swpchn_img
]);
422 IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable reset\n", r
, swpchn_img
, npv_vk_surf_p
.dev
.cbs
[swpchn_img
]);
423 /*--------------------------------------------------------------------*/
424 memset(&begin_info
, 0, sizeof(begin_info
));
425 begin_info
.type
= vk_struct_type_cb_begin_info
;
426 vk_begin_cb(npv_vk_surf_p
.dev
.cbs
[swpchn_img
], &begin_info
);
427 IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable to begin recording\n", r
, swpchn_img
, npv_vk_surf_p
.dev
.cbs
[swpchn_img
]);
428 /*--------------------------------------------------------------------*/
429 /* acquired img (undefined layout) to presentation layout */
430 memset(&b
, 0, sizeof(b
));
431 b
.type
= vk_struct_type_img_mem_barrier
;
432 b
.old_layout
= vk_img_layout_undefined
;
433 b
.new_layout
= vk_img_layout_present
;
434 b
.src_q_fam
= vk_q_fam_ignored
;
435 b
.dst_q_fam
= vk_q_fam_ignored
;
436 b
.img
= npv_vk_surf_p
.dev
.swpchn
.imgs
[swpchn_img
];
437 b
.subrsrc_range
.aspect
= vk_img_aspect_color_bit
;
438 b
.subrsrc_range
.lvls_n
= 1;
439 b
.subrsrc_range
.array_layers_n
= 1;
440 vk_cmd_pl_barrier(npv_vk_surf_p
.dev
.cbs
[swpchn_img
], &b
);
441 /*--------------------------------------------------------------------*/
442 /* clear the viewport with integer black pixels since we work in sRGB */
443 memset(&clr_color_val
, 0, sizeof(clr_color_val
));
444 memset(&range
, 0, sizeof(range
));
445 range
.aspect
= vk_img_aspect_color_bit
;
447 range
.array_layers_n
= 1;
448 vk_cmd_clr_color_img(npv_vk_surf_p
.dev
.cbs
[swpchn_img
],
449 npv_vk_surf_p
.dev
.swpchn
.imgs
[swpchn_img
],
450 vk_img_layout_present
, &clr_color_val
, 1,
452 /*--------------------------------------------------------------------*/
453 /* blit from cpu img to pe img */
454 memset(®ion
, 0, sizeof(region
));
455 region
.src_subrsrc
.aspect
= vk_img_aspect_color_bit
;
456 region
.src_subrsrc
.array_layers_n
= 1;
458 region
.src_offsets
[1].x
= scaler_p
.ctx
->cfg
.width
;
459 region
.src_offsets
[1].y
= scaler_p
.ctx
->cfg
.height
;
460 region
.src_offsets
[1].z
= 1; /* see vk specs */
461 region
.dst_subrsrc
.aspect
= vk_img_aspect_color_bit
;
462 region
.dst_subrsrc
.array_layers_n
= 1;
464 memcpy(®ion
.dst_offsets
[0], &blit_l
[swpchn_img
].vp
.top_left
,
465 sizeof(region
.dst_offsets
[0]));
466 region
.dst_offsets
[0].z
= 0; /* see vk specs */
467 memcpy(®ion
.dst_offsets
[1], &blit_l
[swpchn_img
].vp
.bottom_right
,
468 sizeof(region
.dst_offsets
[1]));
469 region
.dst_offsets
[1].z
= 1; /* see vk specs */
470 vk_cmd_blit_img(npv_vk_surf_p
.dev
.cbs
[swpchn_img
], scaler_p
.img
.vk
,
471 npv_vk_surf_p
.dev
.swpchn
.imgs
[swpchn_img
], ®ion
);
472 /*--------------------------------------------------------------------*/
473 vk_end_cb(npv_vk_surf_p
.dev
.cbs
[swpchn_img
]);
474 IF_FATALVVK("%d:swapchain img:%u:command buffer:%p:unable to end recording\n", r
, swpchn_img
, npv_vk_surf_p
.dev
.cbs
[swpchn_img
]);
478 static u8
swpchn_next_img(u32
*swpchn_img
) { loop
480 struct vk_acquire_next_img_info_t info
;
483 memset(&info
, 0, sizeof(info
));
484 info
.type
= vk_struct_type_acquire_next_img_info
;
485 info
.swpchn
= npv_vk_surf_p
.dev
.swpchn
.vk
;
487 info
.devs
= 0x00000001; /* no device group then 1 */
488 info
.sem
= npv_vk_surf_p
.dev
.sems
[npv_vk_sem_acquire_img_done
];
489 vk_acquire_next_img(&info
, swpchn_img
);
490 if (r
== vk_not_ready
)
492 else if (r
== vk_out_of_date
|| r
== vk_suboptimal
) {
493 npv_vk_swpchn_update();
497 FATALVK("%d:device:%p:unable to acquire next image from swapchain %p\n", r
, npv_vk_surf_p
.dev
.vk
, npv_vk_surf_p
.dev
.swpchn
.vk
);
502 #define SWPCHN_UPDATED 1
503 static u8
send_to_pe(u32 swpchn_img
)
505 struct vk_submit_info_t submit_info
;
506 struct vk_present_info_t present_info
;
510 /* run the command buffer and do present queue */
511 /*--------------------------------------------------------------------*/
512 memset(&submit_info
, 0, sizeof(submit_info
));
513 submit_info
.type
= vk_struct_type_submit_info
;
514 submit_info
.wait_sems_n
= 1;
515 submit_info
.wait_sems
=
516 &npv_vk_surf_p
.dev
.sems
[npv_vk_sem_acquire_img_done
];
517 wait_dst_stage
= vk_pl_stage_bottom_of_pipe_bit
;
518 submit_info
.wait_dst_stages
= &wait_dst_stage
;
519 submit_info
.cbs_n
= 1;
520 submit_info
.cbs
= &npv_vk_surf_p
.dev
.cbs
[swpchn_img
];
521 submit_info
.signal_sems_n
= 1;
522 submit_info
.signal_sems
= &npv_vk_surf_p
.dev
.sems
[npv_vk_sem_blit_done
];
523 vk_q_submit(&submit_info
);
524 IF_FATALVVK("%d:queue:%p:unable to submit the image pre-recorded command buffer\n", r
, npv_vk_surf_p
.dev
.q
);
525 /*--------------------------------------------------------------------*/
526 idxs
[0] = swpchn_img
;
527 memset(&present_info
, 0, sizeof(present_info
));
528 present_info
.type
= vk_struct_type_present_info
;
529 present_info
.wait_sems_n
= 1;
530 present_info
.wait_sems
= &npv_vk_surf_p
.dev
.sems
[npv_vk_sem_blit_done
];
531 present_info
.swpchns_n
= 1;
532 present_info
.swpchns
= &npv_vk_surf_p
.dev
.swpchn
.vk
;
533 present_info
.idxs
= idxs
;
534 present_info
.results
= 0;
535 vk_q_present(&present_info
);
536 if (r
== vk_out_of_date
|| r
== vk_suboptimal
) {
537 npv_vk_swpchn_update();
538 return SWPCHN_UPDATED
;
540 IF_FATALVVK("%d:queue:%p:unable to submit the image %u to the presentation engine\n", r
, npv_vk_surf_p
.dev
.q
, swpchn_img
);
544 #undef SWPCHN_UPDATED
545 static void start_scaling(avutil_video_fr_ref_t
*fr
,
546 struct dec_fr_priv_t
*fr_priv
, bool *scaler_dims_changed
)
548 u32 scaled_line_bytes_n
;
550 if (scaler_p
.ctx
->cfg
.width
!= fr
->width
551 || scaler_p
.ctx
->cfg
.height
!= fr
->height
) {
552 if (scaler_p
.img
.vk
!= 0)
553 scaler_img_destroy();
554 scaler_img_create(fr
);
555 scaler_img_layout_to_general();
556 scaler_img_subrsrc_layout_get();
557 tmp_scaler_img_mem_rqmts_get();
558 scaler_img_dev_mem_alloc();
559 scaler_img_dev_mem_bind();
560 scaler_img_dev_mem_map();
562 *scaler_dims_changed
= true;
563 scaler_p
.ctx
->cfg
.width
= fr
->width
;
564 scaler_p
.ctx
->cfg
.height
= fr
->height
;
566 *scaler_dims_changed
= false;
567 scaler_p
.ctx
->cfg
.src_fmt
= fr
->fmt
;
568 scaler_p
.ctx
->cfg
.dst_fmt
= AVUTIL_PIX_FMT_RGB32
;
569 scaler_p
.ctx
->cfg
.flags
= SWS_POINT
; /* | SWS_PRINT_INFO */
571 scaled_line_bytes_n
= (u32
)scaler_p
.img
.layout
.row_pitch
;
572 scaler_p
.ctx
->scale
.src_slices
= fr
->data
;
573 scaler_p
.ctx
->scale
.src_strides
= fr
->linesize
;
574 scaler_p
.ctx
->scale
.dst_slice
= scaler_p
.img
.data
;
575 scaler_p
.ctx
->scale
.dst_stride
= scaled_line_bytes_n
;
576 thdsws_run(scaler_p
.ctx
);
577 scaler_p
.img
.fr
= fr
;
579 static void timer_ack(void)
585 r
= read(timer_fd_p
, &exps_n
, sizeof(exps_n
));
587 FATALV("unable to read the number of timer expirations\n");