npv:aspect ratio handling
[nyanmp.git] / npv / vk / local / code.frag.c
blob3280653a9dd7f5c1dafc8c4df0d7106012a09f25
1 #define INSTANCE_STATIC_SYM(x,y) \
2 dl_##y = vk_get_instance_proc_addr(0, #x); \
3 if (dl_##y == 0) \
4 FATALVK("unable to find vulkan " #x "\n");
5 static void instance_static_syms(void)
7 INSTANCE_STATIC_SYM(vkEnumerateInstanceVersion,
8 vk_enumerate_instance_version);
9 INSTANCE_STATIC_SYM(vkEnumerateInstanceExtensionProperties,
10 vk_enumerate_instance_ext_props);
11 INSTANCE_STATIC_SYM(vkEnumerateInstanceLayerProperties,
12 vk_enumerate_instance_layer_props);
13 INSTANCE_STATIC_SYM(vkCreateInstance, vk_create_instance);
15 #undef INSTANCE_STATIC_SYM
16 /*----------------------------------------------------------------------------*/
17 #define INSTANCE_SYM(x,y) \
18 dl_##y = vk_get_instance_proc_addr(instance_l, #x); \
19 if (dl_##y == 0) \
20 FATALVK("unable to find vulkan " #x "\n");
21 static void instance_syms(void)
23 INSTANCE_SYM(vkEnumeratePhysicalDevices, vk_enumerate_phydevs);
24 INSTANCE_SYM(vkEnumerateDeviceExtensionProperties,
25 vk_enumerate_dev_ext_props);
26 INSTANCE_SYM(vkGetPhysicalDeviceProperties2, vk_get_phydev_props);
27 INSTANCE_SYM(vkGetPhysicalDeviceQueueFamilyProperties2,
28 vk_get_phydev_q_fam_props);
29 INSTANCE_SYM(vkCreateDevice, vk_create_dev);
30 /* wsi related -------------------------------------------------------*/
31 INSTANCE_SYM(vkGetPhysicalDeviceSurfaceSupportKHR,
32 vk_get_phydev_surf_support);
33 INSTANCE_SYM(vkGetPhysicalDeviceSurfaceFormats2KHR,
34 vk_get_phydev_surf_texel_mem_blk_confs);
35 INSTANCE_SYM(vkCreateXcbSurfaceKHR, vk_create_xcb_surf);
36 INSTANCE_SYM(vkGetPhysicalDeviceMemoryProperties2,
37 vk_get_phydev_mem_props);
38 INSTANCE_SYM(vkGetPhysicalDeviceSurfaceCapabilities2KHR,
39 vk_get_phydev_surf_caps);
40 INSTANCE_SYM(vkGetPhysicalDeviceSurfacePresentModesKHR,
41 vk_get_phydev_surf_present_modes);
42 /*--------------------------------------------------------------------*/
44 #undef INSTANCE_SYM
45 /*----------------------------------------------------------------------------*/
46 #define DEV_SYM(x,y) \
47 surf_p.dev.dl_##y = vk_get_dev_proc_addr(surf_p.dev.vk, #x); \
48 if (surf_p.dev.dl_##y == 0) \
49 FATALVK("unable to find vulkan device " #x "\n");
50 static void dev_syms(void)
52 DEV_SYM(vkGetDeviceQueue, vk_get_dev_q);
53 DEV_SYM(vkCreateCommandPool, vk_create_cp);
54 DEV_SYM(vkCreateSwapchainKHR, vk_create_swpchn);
55 DEV_SYM(vkDestroySwapchainKHR, vk_destroy_swpchn);
56 DEV_SYM(vkGetSwapchainImagesKHR, vk_get_swpchn_imgs);
57 DEV_SYM(vkCreateImage, vk_create_img);
58 DEV_SYM(vkDestroyImage, vk_destroy_img);
59 DEV_SYM(vkGetImageMemoryRequirements2KHR, vk_get_img_mem_rqmts);
60 DEV_SYM(vkAllocateMemory, vk_alloc_mem);
61 DEV_SYM(vkFreeMemory, vk_free_mem);
62 DEV_SYM(vkBindImageMemory2KHR, vk_bind_img_mem);
63 DEV_SYM(vkMapMemory, vk_map_mem);
64 DEV_SYM(vkUnmapMemory, vk_unmap_mem);
65 DEV_SYM(vkAllocateCommandBuffers, vk_alloc_cbs);
66 DEV_SYM(vkBeginCommandBuffer, vk_begin_cb);
67 DEV_SYM(vkEndCommandBuffer, vk_end_cb);
68 DEV_SYM(vkCmdPipelineBarrier, vk_cmd_pl_barrier);
69 DEV_SYM(vkQueueSubmit, vk_q_submit);
70 DEV_SYM(vkQueueWaitIdle, vk_q_wait_idle);
71 DEV_SYM(vkGetImageSubresourceLayout, vk_get_img_subrsrc_layout);
72 DEV_SYM(vkAcquireNextImage2KHR, vk_acquire_next_img);
73 DEV_SYM(vkResetCommandBuffer, vk_reset_cb);
74 DEV_SYM(vkCmdBlitImage, vk_cmd_blit_img);
75 DEV_SYM(vkQueuePresentKHR, vk_q_present);
76 DEV_SYM(vkCreateSemaphore, vk_create_sem);
77 DEV_SYM(vkCmdClearColorImage, vk_cmd_clr_color_img);
79 #undef DEVICE_SYM
80 /*----------------------------------------------------------------------------*/
81 #define DLSYM(x, y) \
82 dl_##y = dlsym(loader_l, #x); \
83 if (dl_##y == 0) \
84 FATALVK("%s:unable to find " #x "\n", dlerror());
85 static void loader_syms(void)
87 DLSYM(vkGetInstanceProcAddr, vk_get_instance_proc_addr);
88 DLSYM(vkGetDeviceProcAddr, vk_get_dev_proc_addr);
90 #undef DLSYM
91 /*----------------------------------------------------------------------------*/
92 /*NSPC*/
93 static void load_vk_loader(void)
95 /* no '/' in the shared dynamic lib path name, then standard lookup */
96 loader_l = dlopen("libvulkan.so.1", RTLD_LAZY);
97 if (loader_l == 0)
98 FATALVK("%s:unable to load the vulkan loader dynamic shared library\n", dlerror());
100 /*NSPC*/
101 static void check_vk_version(void)
103 u32 api_version;
104 s32 r;
106 vk_enumerate_instance_version(&api_version);
107 if (r != vk_success)
108 FATALVK("%d:unable to enumerate instance version\n", r);
109 POUTVK("vulkan instance version %#x = %u.%u.%u\n", api_version, VK_VERSION_MAJOR(api_version), VK_VERSION_MINOR(api_version), VK_VERSION_PATCH(api_version));
110 if (VK_VERSION_MAJOR(api_version) == 1
111 && VK_VERSION_MINOR(api_version) == 0)
112 FATALVK("instance version too old\n");
114 #define EXTS_N_MAX 256
115 /*NSPC*/
116 /* in theory, this could change on the fly */
117 static void instance_exts_dump(void)
119 struct vk_ext_props_t exts[EXTS_N_MAX];
120 u32 n;
121 s32 r;
123 memset(exts, 0, sizeof(exts));
124 n = EXTS_N_MAX;
125 vk_enumerate_instance_ext_props(&n, exts);
126 if (r != vk_success && r != vk_incomplete) {
127 WARNINGVK("%d:unable to enumerate instance extension(s)\n", r);
128 return;
130 if (r == vk_incomplete) {
131 WARNINGVK("too many extensions (%u/%u), dumping disabled", n, EXTS_N_MAX);
132 return;
134 /* vk_success */
135 POUTVK("have %u instance extension(s)\n", n);
136 loop {
137 if (n == 0)
138 break;
139 POUTVK("instance extension:name=%s:specification version=%u\n", exts[n - 1].name, exts[n - 1].spec_version);
140 n--;
143 #undef EXTS_N_MAX
144 #define LAYERS_N_MAX 32
145 /*NSPC*/
146 /* in theory, this could change on the fly */
147 static void instance_layers_dump(void)
149 struct vk_layer_props_t layers[LAYERS_N_MAX];
150 u32 n;
151 s32 r;
153 memset(layers, 0, sizeof(layers));
154 n = LAYERS_N_MAX;
155 vk_enumerate_instance_layer_props(&n, layers);
156 if (r != vk_success && r != vk_incomplete) {
157 WARNINGVK("%d:unable to enumerate instance layer(s)\n", r);
158 return;
160 if (r == vk_incomplete) {
161 WARNINGVK("too many layers (%u/%u), dumping disabled", n, LAYERS_N_MAX);
162 return;
164 /* vk_success */
165 POUTVK("have %u instance layer(s)\n", n);
166 loop {
167 if (n == 0)
168 break;
169 POUTVK("instance layer:%u:name=%s:specification version=%u:implementation version=%u:description=%s\n", n, layers[n].name, layers[n].spec_version, layers[n].implementation_version, layers[n].desc);
170 n--;
173 #undef LAYERS_N_MAX
174 /*NSPC*/
175 static void instance_create(void)
177 s32 r;
178 struct vk_instance_create_info_t info;
179 static u8 *exts[] = {
181 * TODO: there is a shabby (coze mess of pixel fmts),
182 * "expensive", promoted YUV extension
185 * XXX: not 1.1 promoted, should not use it, but it is fixing
186 * some non-consistency from 1.0
188 "VK_KHR_get_surface_capabilities2",
189 /* 1.1 promoted */
190 "VK_KHR_get_physical_device_properties2",
191 "VK_KHR_xcb_surface",
192 "VK_KHR_surface"};
193 u32 i;
195 i = 0;
196 loop {
197 if (i == ARRAY_N(exts))
198 break;
199 POUTVK("will use instance extension %s\n", exts[i]);
200 ++i;
202 memset(&info, 0, sizeof(info));
203 info.type = vk_struct_type_instance_create_info;
204 info.enabled_exts_n = ARRAY_N(exts);
205 info.enabled_ext_names = exts;
206 vk_create_instance(&info);
207 IF_FATALVK("%d:unable to create an instance\n", r);
208 POUTVK("instance handle %p\n", instance_l);
210 /*NSPC*/
211 static void tmp_phydevs_get(void)
213 struct vk_phydev_t *phydevs[tmp_phydevs_n_max];
214 u32 n;
215 s32 r;
217 memset(phydevs, 0, sizeof(phydevs));
218 n = tmp_phydevs_n_max;
219 vk_enumerate_phydevs(&n, phydevs);
220 if (r != vk_success && r != vk_incomplete)
221 FATALVK("%ld:unable to enumerate physical devices\n", r);
222 if (r == vk_incomplete)
223 FATALVK("too many vulkan physical devices %u/%u for our temporary storage\n", n, tmp_phydevs_n_max);
224 /* vk_success */
225 POUTVK("detected %u physical devices\n", n);
226 if (n == 0)
227 FATALVK("no vulkan physical devices, exiting\n");
228 tmp_phydevs_n_l = n;
229 memset(tmp_phydevs_l, 0, sizeof(tmp_phydevs_l));
230 n = 0;
231 loop {
232 if (n == tmp_phydevs_n_l)
233 break;
234 tmp_phydevs_l[n].vk = phydevs[n];
235 ++n;
238 #define EXTS_N_MAX 512
239 /*NSPC*/
240 static void phydev_exts_dump(void *phydev)
242 struct vk_ext_props_t exts[EXTS_N_MAX];
243 u32 n;
244 s32 r;
246 memset(exts, 0, sizeof(exts));
247 n = EXTS_N_MAX;
248 vk_enumerate_dev_ext_props(phydev, &n, exts);
249 if (r != vk_success && r != vk_incomplete) {
250 WARNINGVK("physical device:%p:%d:unable to enumerate device extension(s)\n", phydev, r);
251 return;
253 if (r == vk_incomplete) {
254 WARNINGVK("physical device:%p:too many extensions (%u/%u), dumping disabled", phydev, n, EXTS_N_MAX);
255 return;
257 /* vk_success */
258 POUTVK("physical device:%p:have %u device extension(s)\n", phydev, n);
259 loop {
260 if (n == 0)
261 break;
262 POUTVK("physical device:%p:device extension:name=%s:specification version=%u\n", phydev, exts[n - 1].name, exts[n - 1].spec_version);
263 n--;
266 #undef EXTS_N_MAX
267 /*NSPC*/
268 static void tmp_phydevs_exts_dump(void)
270 u8 i;
272 i = 0;
273 loop {
274 if (i == tmp_phydevs_n_l)
275 break;
276 phydev_exts_dump(tmp_phydevs_l[i].vk);
277 ++i;
280 /*NSPC*/
281 static u8 *dev_type_str(u32 type)
283 switch (type) {
284 case vk_phydev_type_other:
285 return "other";
286 case vk_phydev_type_integrated_gpu:
287 return "integrated gpu";
288 case vk_phydev_type_discrete_gpu:
289 return "discrete gpu";
290 case vk_phydev_type_virtual_gpu:
291 return "virtual gpu";
292 case vk_phydev_type_cpu:
293 return "cpu";
294 default:
295 return "UNKNOWN";
298 /*NSPC*/
299 static u8 *uuid_str(u8 *uuid)
301 static u8 uuid_str[VK_UUID_SZ * 2 + 1];
302 u8 i;
304 memset(uuid_str, 0, sizeof(uuid_str));
305 i = 0;
306 loop {
307 if (i == VK_UUID_SZ)
308 break;
309 /* XXX: always write a terminating 0, truncated or not */
310 snprintf(uuid_str + i * 2, 3, "%02x", uuid[i]);
311 ++i;
313 return uuid_str;
315 /*NSPC*/
316 static void tmp_phydevs_props_dump(void)
318 u32 i;
320 i = 0;
321 loop {
322 struct vk_phydev_props_t props;
323 struct tmp_phydev_t *p;
325 if (i == tmp_phydevs_n_l)
326 break;
327 p = &tmp_phydevs_l[i];
328 memset(&props, 0, sizeof(props));
329 props.type = vk_struct_type_phydev_props;
330 vk_get_phydev_props(p->vk, &props);
331 POUTVK("physical device:%p:properties:api version=%#x=%u.%u.%u\n", p->vk, props.core.api_version, VK_VERSION_MAJOR(props.core.api_version), VK_VERSION_MINOR(props.core.api_version), VK_VERSION_PATCH(props.core.api_version));
332 POUTVK("physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p->vk, props.core.driver_version, VK_VERSION_MAJOR(props.core.driver_version), VK_VERSION_MINOR(props.core.driver_version), VK_VERSION_PATCH(props.core.driver_version));
333 POUTVK("physical device:%p:properties:vendor id=%#x\n", p->vk, props.core.vendor_id);
334 POUTVK("physical device:%p:properties:device id=%#x\n", p->vk, props.core.dev_id);
335 POUTVK("physical device:%p:properties:type=%s\n", p->vk, dev_type_str(props.core.dev_type));
336 if (props.core.dev_type == vk_phydev_type_discrete_gpu)
337 p->is_discret_gpu = true;
338 else
339 p->is_discret_gpu = false;
340 POUTVK("physical device:%p:properties:name=%s\n", p->vk, props.core.name);
341 POUTVK("physical device:%p:properties:pipeline cache uuid=%s\n", p->vk, uuid_str(props.core.pl_cache_uuid));
342 /* disp the limits and sparse props at "higher log lvl", if needed in the end */
343 ++i;
346 /*NSPC*/
347 static void tmp_phydev_mem_props_get(struct tmp_phydev_t *p)
349 memset(&p->mem_props, 0, sizeof(p->mem_props));
350 p->mem_props.type = vk_struct_type_phydev_mem_props;
351 vk_get_phydev_mem_props(p->vk, &p->mem_props);
353 /*NSPC*/
354 static void tmp_phydevs_mem_props_get(void)
356 u8 i;
358 i = 0;
359 loop {
360 if (i == tmp_phydevs_n_l)
361 break;
362 tmp_phydev_mem_props_get(&tmp_phydevs_l[i]);
363 ++i;
366 /*NSPC*/
367 static void phydev_mem_type_dump(void *phydev, u8 i,
368 struct vk_mem_type_t *type)
370 POUTVK("physical device:%p:memory type:%u:heap:%u\n", phydev, i, type->heap);
371 POUTVK("physical device:%p:memory type:%u:flags:%#08x\n", phydev, i, type->prop_flags);
372 if ((type->prop_flags & vk_mem_prop_dev_local_bit) != 0)
373 POUTVK("physical device:%p:memory type:%u:device local\n", phydev, i);
374 if ((type->prop_flags & vk_mem_prop_host_visible_bit) != 0)
375 POUTVK("physical device:%p:memory type:%u:host visible\n", phydev, i);
376 if ((type->prop_flags & vk_mem_prop_host_cached_bit) != 0)
377 POUTVK("physical device:%p:memory type:%u:host cached\n", phydev, i);
379 /*NSPC*/
380 static void tmp_phydev_mem_types_dump(struct tmp_phydev_t *p)
382 u8 i;
384 POUTVK("physical device:%p:%u memory types\n", p->vk, p->mem_props.core.mem_types_n);
385 i = 0;
386 loop {
387 if (i == p->mem_props.core.mem_types_n)
388 break;
389 phydev_mem_type_dump(p->vk, i,
390 &p->mem_props.core.mem_types[i]);
391 ++i;
394 /*NSPC*/
395 static void phydev_mem_heap_dump(void *phydev, u8 i,
396 struct vk_mem_heap_t *heap)
398 POUTVK("physical device:%p:memory heap:%u:size:%u bytes\n", phydev, i, heap->sz);
399 POUTVK("physical device:%p:memory heap:%u:flags:%#08x\n", phydev, i, heap->flags);
400 if ((heap->flags & vk_mem_heap_dev_local_bit) != 0)
401 POUTVK("physical device:%p:memory heap:%u:device local\n", phydev, i);
402 if ((heap->flags & vk_mem_heap_multi_instance_bit) != 0)
403 POUTVK("physical device:%p:memory type:%u:multi instance\n", phydev, i);
405 /*NSPC*/
406 static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t *p)
408 u8 i;
410 POUTVK("physical device:%p:%u memory heaps\n", p->vk, p->mem_props.core.mem_heaps_n);
411 i = 0;
412 loop {
413 if (i == p->mem_props.core.mem_heaps_n)
414 break;
415 phydev_mem_heap_dump(p->vk, i,
416 &p->mem_props.core.mem_heaps[i]);
417 ++i;
421 /*NSPC*/
422 static void tmp_phydev_mem_props_dump(struct tmp_phydev_t *p)
424 tmp_phydev_mem_types_dump(p);
425 tmp_phydev_mem_heaps_dump(p);
427 /*NSPC*/
428 static void tmp_phydevs_mem_props_dump(void)
430 u8 i;
432 i = 0;
433 loop {
434 if (i == tmp_phydevs_n_l)
435 break;
436 tmp_phydev_mem_props_dump(&tmp_phydevs_l[i]);
437 ++i;
440 /*NSPC*/
441 static void tmp_phydev_q_fams_get(struct tmp_phydev_t *p)
443 u8 i;
444 u32 n;
446 n = 0;
447 vk_get_phydev_q_fam_props(p->vk, &n, 0);
448 if (n > tmp_phydev_q_fams_n_max)
449 FATALVK("physical device:%p:too many queue families %u/%u\n", p->vk, n, tmp_phydev_q_fams_n_max);
450 memset(p->q_fams, 0, sizeof(p->q_fams));
451 i = 0;
452 loop {
453 if (i == tmp_phydev_q_fams_n_max)
454 break;
455 p->q_fams[i].type = vk_struct_type_q_fam_props;
456 ++i;
458 vk_get_phydev_q_fam_props(p->vk, &n, p->q_fams);
459 p->q_fams_n = n;
460 POUTVK("physical device:%p:have %u queue families\n", p->vk, p->q_fams_n);
462 /*NSPC*/
463 static void tmp_phydevs_q_fams_get(void)
465 u8 i;
467 i = 0;
468 loop {
469 if (i == tmp_phydevs_n_l)
470 break;
471 tmp_phydev_q_fams_get(&tmp_phydevs_l[i]);
472 ++i;
475 /*NSPC*/
476 static void tmp_phydev_q_fams_dump(struct tmp_phydev_t *p)
478 u8 i;
480 i = 0;
481 loop {
482 if (i == p->q_fams_n)
483 break;
484 if ((p->q_fams[i].core.flags & vk_q_gfx_bit) != 0)
485 POUTVK("physical device:%p:queue family:%u:flags:graphics\n", p->vk, i);
486 if ((p->q_fams[i].core.flags & vk_q_compute_bit) != 0)
487 POUTVK("physical device:%p:queue family:%u:flags:compute\n", p->vk, i);
488 if ((p->q_fams[i].core.flags & vk_q_transfer_bit) != 0)
489 POUTVK("physical device:%p:queue family:%u:flags:transfer\n", p->vk, i);
490 if ((p->q_fams[i].core.flags & vk_q_sparse_binding_bit) != 0)
491 POUTVK("physical device:%p:queue family:%u:flags:sparse binding\n", p->vk, i);
492 if ((p->q_fams[i].core.flags & vk_q_protected_bit) != 0)
493 POUTVK("physical device:%p:queue family:%u:flags:protected\n", p->vk, i);
494 POUTVK("physical device:%p:queue family:%u:%u queues\n", p->vk, i, p->q_fams[i].core.qs_n);
495 POUTVK("physical device:%p:queue family:%u:%u bits timestamps\n", p->vk, i, p->q_fams[i].core.timestamp_valid_bits);
496 POUTVK("physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p->vk, i, p->q_fams[i].core.min_img_transfer_granularity.width, p->q_fams[i].core.min_img_transfer_granularity.height, p->q_fams[i].core.min_img_transfer_granularity.depth);
497 ++i;
500 /*NSPC*/
501 static void tmp_phydevs_q_fams_dump(void)
503 u8 i;
505 i = 0;
506 loop {
507 if (i == tmp_phydevs_n_l)
508 break;
509 tmp_phydev_q_fams_dump(&tmp_phydevs_l[i]);
510 ++i;
514 * the major obj to use in vk abstraction of gfx hardware is the q. In this
515 * abstraction, many core objs like bufs/imgs are "own" by a specific q, and
516 * transfer of such ownership to other qs can be expensive. we know it's not
517 * really the case on AMD hardware, but if vk abstraction insists on this, it
518 * probably means it is important on some hardware of other vendors.
520 /*NSPC*/
521 static void tmp_phydevs_q_fams_surf_support_get(void)
523 u8 i;
525 i = 0;
526 loop {
527 struct tmp_phydev_t *p;
528 u8 j;
530 if (i == tmp_phydevs_n_l)
531 break;
532 p = &tmp_phydevs_l[i];
533 j = 0;
534 loop {
535 s32 r;
536 u32 supported;
538 if (j == p->q_fams_n)
539 break;
540 supported = vk_false;
541 vk_get_phydev_surf_support(p->vk, j, &supported);
542 IF_FATALVK("%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r, p->vk, j, surf_p.vk);
543 if (supported == vk_true) {
544 POUTVK("physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p->vk, j, surf_p.vk);
545 p->q_fams_surf_support[j] = true;
546 } else {
547 POUTVK("physical device:%p:queue family:%u:surface:%p:does not support wsi/(image presentation to our surface)\n", p->vk, j, surf_p.vk);
548 p->q_fams_surf_support[j] = false;
550 ++j;
552 ++i;
555 /*NSPC*/
556 static void tmp_selected_phydev_cherry_pick(u8 i)
558 struct tmp_phydev_t *p;
560 p = &tmp_phydevs_l[i];
561 surf_p.dev.phydev.vk = p->vk;
562 surf_p.dev.phydev.is_discret_gpu = p->is_discret_gpu;
563 surf_p.dev.phydev.mem_types_n = p->mem_props.core.mem_types_n;
564 memcpy(surf_p.dev.phydev.mem_types, p->mem_props.core.mem_types,
565 sizeof(surf_p.dev.phydev.mem_types));
568 * we ask qs of phydevs which one is able to present imgs to the
569 * external pe surf. Additionally we require this q to support gfx. we
570 * select basically the first q from the first phydev fitting what we are
571 * looking for.
573 /*NSPC*/
574 static void tmp_phydev_and_q_fam_select(void)
576 u8 i;
578 i = 0;
579 loop {
580 u8 j;
581 struct tmp_phydev_t *p;
583 if (i == tmp_phydevs_n_l)
584 break;
585 p = &tmp_phydevs_l[i];
586 j = 0;
587 loop {
588 if (j == p->q_fams_n)
589 break;
591 * we are looking for a q fam with:
592 * - img presentation to our surf
593 * - gfx
594 * - transfer (implicit with gfx)
596 if (p->q_fams_surf_support[j]
597 && (p->q_fams[j].core.flags & vk_q_gfx_bit)
598 != 0) {
599 surf_p.dev.phydev.q_fam = j;
600 tmp_selected_phydev_cherry_pick(i);
601 POUTVK("physical device %p selected for (wsi/image presentation to our surface %p) using its queue family %u\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.q_fam);
602 return;
604 ++j;
606 ++i;
609 /*NSPC*/
610 static void texel_mem_blk_confs_dump(u32 confs_n,
611 struct vk_surf_texel_mem_blk_conf_t *confs)
613 u32 i;
615 i = 0;
616 loop {
617 if (i == confs_n)
618 break;
619 POUTVK("physical device:%p:surface:%p:texel memory block configuration:format=%u color_space=%u\n", surf_p.dev.phydev.vk, surf_p.vk, confs[i].core.fmt, confs[i].core.color_space);
620 ++i;
624 * we only know this phydev/q is "able to present imgs" to the external
625 * pe surf. Here we choose the cfg of textel blk
627 #define CONFS_N_MAX 1024
628 /*NSPC*/
629 static void phydev_surf_texel_mem_blk_conf_select(void)
631 struct vk_phydev_surf_info_t info;
632 struct vk_surf_texel_mem_blk_conf_t confs[CONFS_N_MAX];
633 struct vk_surf_texel_mem_blk_conf_core_t *cc;
634 s32 r;
635 u32 confs_n;
636 u32 i;
638 memset(&info, 0, sizeof(info));
639 info.type = vk_struct_type_phydev_surf_info;
640 info.surf = surf_p.vk;
641 vk_get_phydev_surf_texel_mem_blk_confs(&info, &confs_n, 0);
642 IF_FATALVK("%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r, surf_p.dev.phydev.vk, surf_p.vk);
643 if (confs_n > CONFS_N_MAX)
644 FATALVK("physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_p.dev.phydev.vk, surf_p.vk, confs_n, CONFS_N_MAX);
646 memset(confs, 0, sizeof(confs[0]) * confs_n);
647 i = 0;
648 loop {
649 if (i == confs_n)
650 break;
651 confs[i].type = vk_struct_type_surf_texel_mem_blk_conf;
652 ++i;
654 vk_get_phydev_surf_texel_mem_blk_confs(&info, &confs_n, confs);
655 IF_FATALVK("%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r, surf_p.dev.phydev.vk, surf_p.vk);
656 if (confs_n == 0)
657 FATALVK("physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_p.dev.phydev.vk, surf_p.vk);
658 texel_mem_blk_confs_dump(confs_n, confs);
660 cc = &surf_p.dev.phydev.selected_texel_mem_blk_conf_core;
662 * the following texel cfg is guaranteed to exist, and this is what we
663 * get from ff scaler
665 cc->fmt = vk_texel_mem_blk_fmt_b8g8r8a8_srgb;
666 POUTVK("physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_p.dev.phydev.vk, surf_p.vk, cc->fmt);
667 cc->color_space = vk_color_space_srgb_nonlinear;
668 POUTVK("physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_p.dev.phydev.vk, surf_p.vk, cc->color_space);
670 #undef CONFS_N_MAX
671 /*NSPC*/
672 static void phydev_surf_caps_get(void)
674 s32 r;
675 struct vk_phydev_surf_info_t info;
677 memset(&info, 0, sizeof(info));
678 info.type = vk_struct_type_phydev_surf_info;
679 info.surf = surf_p.vk;
680 memset(&surf_p.dev.phydev.surf_caps, 0,
681 sizeof(surf_p.dev.phydev.surf_caps));
682 surf_p.dev.phydev.surf_caps.type = vk_struct_type_surf_caps;
683 vk_get_phydev_surf_caps(&info, &surf_p.dev.phydev.surf_caps);
684 IF_FATALVK("%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r, surf_p.dev.phydev.vk, surf_p.vk);
685 /* we have room for a maximum of 3 images per swapchain */
686 if (surf_p.dev.phydev.surf_caps.core.imgs_n_min > swpchn_imgs_n_max)
687 FATALVK("physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_p.dev.phydev.vk, surf_p.vk, swpchn_imgs_n_max, surf_p.dev.phydev.surf_caps.core.imgs_n_min);
689 /*NSPC*/
690 static void phydev_surf_caps_dump(void)
692 POUTVK("physical device:%p:surface:%p:imgs_n_min=%u\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.imgs_n_min);
693 POUTVK("physical device:%p:surface:%p:imgs_n_max=%u\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.imgs_n_max);
694 POUTVK("physical device:%p:surface:%p:current extent=(width=%u, height=%u)\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.current_extent.width, surf_p.dev.phydev.surf_caps.core.current_extent.height);
695 POUTVK("physical device:%p:surface:%p:minimal extent=(width=%u, height=%u)\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.img_extent_min.width, surf_p.dev.phydev.surf_caps.core.img_extent_min.height);
696 POUTVK("physical device:%p:surface:%p:maximal extent=(width=%u, height=%u)\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.img_extent_max.width, surf_p.dev.phydev.surf_caps.core.img_extent_max.height);
697 POUTVK("physical device:%p:surface:%p:img_array_layers_n_max=%u\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.img_array_layers_n_max);
698 POUTVK("physical device:%p:surface:%p:supported_transforms=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.supported_transforms);
699 POUTVK("physical device:%p:surface:%p:current_transform=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.current_transform);
700 POUTVK("physical device:%p:surface:%p:supported_composite_alpha=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.supported_composite_alpha);
701 POUTVK("physical device:%p:surface:%p:supported_img_usage_flags=%#08x\n", surf_p.dev.phydev.vk, surf_p.vk, surf_p.dev.phydev.surf_caps.core.supported_img_usage_flags);
703 /*NSPC*/
704 static void tmp_phydev_surf_present_modes_get(void)
706 s32 r;
708 tmp_present_modes_n_l = tmp_present_modes_n_max;
709 vk_get_phydev_surf_present_modes();
710 IF_FATALVK("%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r, surf_p.dev.phydev.vk, surf_p.vk);
712 /*NSPC*/
713 static u8 *present_mode_to_str(u32 mode)
715 switch (mode) {
716 case vk_present_mode_immediate:
717 return "immediate";
718 case vk_present_mode_mailbox:
719 return "mailbox";
720 case vk_present_mode_fifo:
721 return "fifo";
722 case vk_present_mode_fifo_relaxed:
723 return "fifo relaxed";
724 default:
725 return "unknown";
728 /*NSPC*/
729 static void tmp_phydev_surf_present_modes_dump(void)
731 u8 i;
733 i = 0;
734 POUTVK("physical device:%p:surface:%p:%u present modes\n", surf_p.dev.phydev.vk, surf_p.vk, tmp_present_modes_n_l);
735 loop {
736 if (i == (u8)tmp_present_modes_n_l)
737 break;
738 POUTVK("physical device:%p:surface:%p:present mode=%s\n", surf_p.dev.phydev.vk, surf_p.vk, present_mode_to_str(tmp_present_modes_l[i]));
739 ++i;
742 /*NSPC*/
743 static void phydev_init(void)
745 tmp_phydevs_get();
746 /*--------------------------------------------------------------------*/
747 tmp_phydevs_exts_dump();
748 tmp_phydevs_props_dump();
749 tmp_phydevs_mem_props_get();
750 tmp_phydevs_mem_props_dump();
751 /*--------------------------------------------------------------------*/
752 tmp_phydevs_q_fams_get();
753 tmp_phydevs_q_fams_dump();
754 /*====================================================================*/
755 /* from here our surf is involved */
756 /*--------------------------------------------------------------------*/
757 /* select the phydev and its q family which can work with our surf */
758 tmp_phydevs_q_fams_surf_support_get();
759 tmp_phydev_and_q_fam_select();
760 /*--------------------------------------------------------------------*/
761 phydev_surf_texel_mem_blk_conf_select();
762 /*--------------------------------------------------------------------*/
763 phydev_surf_caps_get();
764 phydev_surf_caps_dump();
765 /*--------------------------------------------------------------------*/
766 tmp_phydev_surf_present_modes_get();
767 tmp_phydev_surf_present_modes_dump();
769 /* the phydev q fam selected */
770 /*NSPC*/
771 static void dev_create(void)
773 struct vk_dev_create_info_t info;
774 struct vk_dev_q_create_info_t q_info;
775 float q_prio;
776 static u8 *exts[] = {
777 /* 1.1 promoted */
778 "VK_KHR_bind_memory2",
779 /* 1.1 promoted */
780 "VK_KHR_get_memory_requirements2",
781 "VK_KHR_swapchain"};
783 s32 r;
785 memset(&info, 0, sizeof(info));
786 memset(&q_info, 0, sizeof(q_info));
787 /*--------------------------------------------------------------------*/
788 q_info.type = vk_struct_type_dev_q_create_info;
789 q_info.q_fam = surf_p.dev.phydev.q_fam;
790 q_info.qs_n = 1;
791 q_info.q_prios = &q_prio;
792 q_prio = 1.0f;
793 /*--------------------------------------------------------------------*/
794 info.type = vk_struct_type_dev_create_info;
795 info.q_create_infos_n = 1;
796 info.q_create_infos = &q_info;
797 info.enabled_exts_n = ARRAY_N(exts);
798 info.enabled_ext_names = exts;
799 vk_create_dev(&info);
800 IF_FATALVK("%d:physical device:%p:unable to create a vulkan device\n", r, surf_p.dev.phydev.vk);
801 POUTVK("physical device:%p:vulkan device created with one proper queue:%p\n", surf_p.dev.phydev.vk, surf_p.dev.vk);
803 /*NSPC*/
804 static void q_get(void)
806 POUTVK("device:%p:getting queue:family=%u queue=0\n", surf_p.dev.vk, surf_p.dev.phydev.q_fam);
807 vk_get_dev_q();
808 POUTVK("device:%p:got queue:%p\n", surf_p.dev.vk, surf_p.dev.q);
810 /*NSPC*/
811 static void cp_create(void)
813 s32 r;
814 struct vk_cp_create_info_t info;
816 memset(&info, 0, sizeof(info));
817 info.type = vk_struct_type_cp_create_info;
818 info.flags = vk_cp_create_reset_cb_bit;
819 info.q_fam = surf_p.dev.phydev.q_fam;
820 vk_create_cp(&info);
821 IF_FATALVK("%d:unable create the commmand pool\n", r);
822 POUTVK("device:%p:queue family:%u:created command pool %p\n", surf_p.dev.vk, surf_p.dev.phydev.q_fam, surf_p.dev.cp);
824 /*NSPC*/
825 static void dev_init(void)
827 phydev_init();
828 /*--------------------------------------------------------------------*/
829 dev_create();
830 dev_syms();
831 q_get();
832 cp_create();
834 /* XXX: the surf is an obj at the instance lvl, NOT THE [PHYSICAL] * DEV LVL */
835 /*NSPC*/
836 static void surf_create(xcb_connection_t *c, u32 win_id)
838 struct vk_xcb_surf_create_info_t vk_xcb_info;
839 s32 r;
841 memset(&surf_p, 0, sizeof(surf_p));
842 memset(&vk_xcb_info, 0, sizeof(vk_xcb_info));
843 vk_xcb_info.type = vk_struct_type_xcb_surf_create_info;
844 vk_xcb_info.c = c;
845 vk_xcb_info.win = win_id;
846 vk_create_xcb_surf(&vk_xcb_info);
847 IF_FATALVK("%d:xcb:%p:window id:%#x:unable to create a vulkan surface from this x11 window\n", r, c, win_id);
848 POUTVK("xcb:%p:window id:%#x:created vk_surface=%p\n", c, win_id, surf_p.vk);
850 /*NSPC*/
851 static void swpchn_init_once(void)
853 memset(&surf_p.dev.swpchn, 0, sizeof(surf_p.dev.swpchn));
855 /*NSPC*/
856 static void swpchn_reinit(void)
858 struct vk_swpchn_t *old_swpchn;
859 struct vk_swpchn_create_info_t info;
860 struct phydev_t *p;
861 s32 r;
862 /* first, deal with the previous swpchn, if any */
863 old_swpchn = surf_p.dev.swpchn.vk;
864 surf_p.dev.swpchn.vk = 0;
865 /* lifetime of swpchn imgs is handled by the pe, not us */
866 surf_p.dev.swpchn.imgs_n = 0;
867 memset(surf_p.dev.swpchn.imgs, 0, sizeof(surf_p.dev.swpchn.imgs));
868 /*--------------------------------------------------------------------*/
869 memset(&info, 0, sizeof(info));
870 p = &surf_p.dev.phydev;
871 info.type = vk_struct_type_swpchn_create_info;
872 info.surf = surf_p.vk;
873 info.imgs_n_min = surf_p.dev.phydev.surf_caps.core.imgs_n_min;
874 info.img_texel_mem_blk_fmt = p->selected_texel_mem_blk_conf_core.fmt;
875 info.img_color_space = p->selected_texel_mem_blk_conf_core.color_space;
876 memcpy(&info.img_extent,
877 &surf_p.dev.phydev.surf_caps.core.current_extent,
878 sizeof(info.img_extent));
879 info.img_layers_n = 1;
880 info.img_usage = vk_img_usage_color_attachment_bit
881 | vk_img_usage_transfer_dst_bit;
882 info.img_sharing_mode = vk_sharing_mode_exclusive;
883 info.pre_transform = vk_surf_transform_identity_bit;
884 info.composite_alpha = vk_composite_alpha_opaque_bit;
885 info.present_mode = vk_present_mode_fifo;
886 info.clipped = vk_true;
887 if (old_swpchn != 0)
888 info.old_swpchn = old_swpchn;
889 vk_create_swpchn(&info);
890 IF_FATALVK("%d:device:%p:surface:%p:unable to create the swapchain\n", r, surf_p.dev.vk, surf_p.vk);
891 if (old_swpchn != 0)
892 vk_destroy_swpchn(old_swpchn);
894 /*NSPC*/
895 static void swpchn_imgs_get(void)
897 s32 r;
898 u8 target_imgs_n;
900 * TODO: should try to figure out how to favor double buf over
901 * everything else
903 surf_p.dev.swpchn.imgs_n = swpchn_imgs_n_max;
904 vk_get_swpchn_imgs();
905 IF_FATALVK("%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r, surf_p.dev.vk, surf_p.vk, surf_p.dev.swpchn.vk);
907 /*NSPC*/
908 static void sems_create(void)
910 s32 r;
911 struct vk_sem_create_info_t info;
912 u8 sem;
914 sem = 0;
915 loop {
916 if (sem == sems_n)
917 break;
918 memset(&info, 0, sizeof(info));
919 info.type = vk_struct_type_sem_create_info;
920 vk_create_sem(&info, &surf_p.dev.sems[sem]);
921 IF_FATALVK("%d:device:%p:unable to create a semaphore %u for the synchronization of the swapchain\n", r, surf_p.dev.vk, sem);
922 POUTVK("device:%p:semaphore %u for the synchronization of the swapchain created %p\n", surf_p.dev.vk, sem, surf_p.dev.sems[sem]);
923 ++sem;
926 /*NSPC*/
927 static void swpchn_imgs_cbs_init_once(void)
929 s32 r;
930 struct vk_cb_alloc_info_t alloc_info;
932 memset(&alloc_info, 0, sizeof(alloc_info));
933 alloc_info.type = vk_struct_type_cb_alloc_info;
934 alloc_info.cp = surf_p.dev.cp;
935 alloc_info.lvl = vk_cb_lvl_primary;
936 alloc_info.cbs_n = swpchn_imgs_n_max;
937 vk_alloc_cbs(&alloc_info);
938 IF_FATALVK("%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r, surf_p.dev.vk, surf_p.dev.cp);
939 POUTVK("device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_p.dev.vk, surf_p.dev.swpchn.imgs_n, surf_p.dev.cp);