1 #define INSTANCE_STATIC_SYM(x,y) \
2 dl_##y = vk_get_instance_proc_addr(0, #x); \
4 FATALVK("unable to find vulkan " #x "\n");
5 static void instance_static_syms(void)
7 INSTANCE_STATIC_SYM(vkEnumerateInstanceVersion
,
8 vk_enumerate_instance_version
);
9 INSTANCE_STATIC_SYM(vkEnumerateInstanceExtensionProperties
,
10 vk_enumerate_instance_ext_props
);
11 INSTANCE_STATIC_SYM(vkEnumerateInstanceLayerProperties
,
12 vk_enumerate_instance_layer_props
);
13 INSTANCE_STATIC_SYM(vkCreateInstance
, vk_create_instance
);
15 #undef INSTANCE_STATIC_SYM
16 /*----------------------------------------------------------------------------*/
17 #define INSTANCE_SYM(x,y) \
18 dl_##y = vk_get_instance_proc_addr(instance_l, #x); \
20 FATALVK("unable to find vulkan " #x "\n");
21 static void instance_syms(void)
23 INSTANCE_SYM(vkEnumeratePhysicalDevices
, vk_enumerate_phydevs
);
24 INSTANCE_SYM(vkEnumerateDeviceExtensionProperties
,
25 vk_enumerate_dev_ext_props
);
26 INSTANCE_SYM(vkGetPhysicalDeviceProperties2
, vk_get_phydev_props
);
27 INSTANCE_SYM(vkGetPhysicalDeviceQueueFamilyProperties2
,
28 vk_get_phydev_q_fam_props
);
29 INSTANCE_SYM(vkCreateDevice
, vk_create_dev
);
30 /* wsi related -------------------------------------------------------*/
31 INSTANCE_SYM(vkGetPhysicalDeviceSurfaceSupportKHR
,
32 vk_get_phydev_surf_support
);
33 INSTANCE_SYM(vkGetPhysicalDeviceSurfaceFormats2KHR
,
34 vk_get_phydev_surf_texel_mem_blk_confs
);
35 INSTANCE_SYM(vkCreateXcbSurfaceKHR
, vk_create_xcb_surf
);
36 INSTANCE_SYM(vkGetPhysicalDeviceMemoryProperties2
,
37 vk_get_phydev_mem_props
);
38 INSTANCE_SYM(vkGetPhysicalDeviceSurfaceCapabilities2KHR
,
39 vk_get_phydev_surf_caps
);
40 INSTANCE_SYM(vkGetPhysicalDeviceSurfacePresentModesKHR
,
41 vk_get_phydev_surf_present_modes
);
42 /*--------------------------------------------------------------------*/
45 /*----------------------------------------------------------------------------*/
46 #define DEV_SYM(x,y) \
47 surf_p.dev.dl_##y = vk_get_dev_proc_addr(surf_p.dev.vk, #x); \
48 if (surf_p.dev.dl_##y == 0) \
49 FATALVK("unable to find vulkan device " #x "\n");
50 static void dev_syms(void)
52 DEV_SYM(vkGetDeviceQueue
, vk_get_dev_q
);
53 DEV_SYM(vkCreateCommandPool
, vk_create_cp
);
54 DEV_SYM(vkCreateSwapchainKHR
, vk_create_swpchn
);
55 DEV_SYM(vkDestroySwapchainKHR
, vk_destroy_swpchn
);
56 DEV_SYM(vkGetSwapchainImagesKHR
, vk_get_swpchn_imgs
);
57 DEV_SYM(vkCreateImage
, vk_create_img
);
58 DEV_SYM(vkDestroyImage
, vk_destroy_img
);
59 DEV_SYM(vkGetImageMemoryRequirements2KHR
, vk_get_img_mem_rqmts
);
60 DEV_SYM(vkAllocateMemory
, vk_alloc_mem
);
61 DEV_SYM(vkFreeMemory
, vk_free_mem
);
62 DEV_SYM(vkBindImageMemory2KHR
, vk_bind_img_mem
);
63 DEV_SYM(vkMapMemory
, vk_map_mem
);
64 DEV_SYM(vkUnmapMemory
, vk_unmap_mem
);
65 DEV_SYM(vkAllocateCommandBuffers
, vk_alloc_cbs
);
66 DEV_SYM(vkBeginCommandBuffer
, vk_begin_cb
);
67 DEV_SYM(vkEndCommandBuffer
, vk_end_cb
);
68 DEV_SYM(vkCmdPipelineBarrier
, vk_cmd_pl_barrier
);
69 DEV_SYM(vkQueueSubmit
, vk_q_submit
);
70 DEV_SYM(vkQueueWaitIdle
, vk_q_wait_idle
);
71 DEV_SYM(vkGetImageSubresourceLayout
, vk_get_img_subrsrc_layout
);
72 DEV_SYM(vkAcquireNextImage2KHR
, vk_acquire_next_img
);
73 DEV_SYM(vkResetCommandBuffer
, vk_reset_cb
);
74 DEV_SYM(vkCmdBlitImage
, vk_cmd_blit_img
);
75 DEV_SYM(vkQueuePresentKHR
, vk_q_present
);
76 DEV_SYM(vkCreateSemaphore
, vk_create_sem
);
77 DEV_SYM(vkCmdClearColorImage
, vk_cmd_clr_color_img
);
80 /*----------------------------------------------------------------------------*/
82 dl_##y = dlsym(loader_l, #x); \
84 FATALVK("%s:unable to find " #x "\n", dlerror());
85 static void loader_syms(void)
87 DLSYM(vkGetInstanceProcAddr
, vk_get_instance_proc_addr
);
88 DLSYM(vkGetDeviceProcAddr
, vk_get_dev_proc_addr
);
91 /*----------------------------------------------------------------------------*/
93 static void load_vk_loader(void)
95 /* no '/' in the shared dynamic lib path name, then standard lookup */
96 loader_l
= dlopen("libvulkan.so.1", RTLD_LAZY
);
98 FATALVK("%s:unable to load the vulkan loader dynamic shared library\n", dlerror());
101 static void check_vk_version(void)
106 vk_enumerate_instance_version(&api_version
);
108 FATALVK("%d:unable to enumerate instance version\n", r
);
109 POUTVK("vulkan instance version %#x = %u.%u.%u\n", api_version
, VK_VERSION_MAJOR(api_version
), VK_VERSION_MINOR(api_version
), VK_VERSION_PATCH(api_version
));
110 if (VK_VERSION_MAJOR(api_version
) == 1
111 && VK_VERSION_MINOR(api_version
) == 0)
112 FATALVK("instance version too old\n");
114 #define EXTS_N_MAX 256
116 /* in theory, this could change on the fly */
117 static void instance_exts_dump(void)
119 struct vk_ext_props_t exts
[EXTS_N_MAX
];
123 memset(exts
, 0, sizeof(exts
));
125 vk_enumerate_instance_ext_props(&n
, exts
);
126 if (r
!= vk_success
&& r
!= vk_incomplete
) {
127 WARNINGVK("%d:unable to enumerate instance extension(s)\n", r
);
130 if (r
== vk_incomplete
) {
131 WARNINGVK("too many extensions (%u/%u), dumping disabled", n
, EXTS_N_MAX
);
135 POUTVK("have %u instance extension(s)\n", n
);
139 POUTVK("instance extension:name=%s:specification version=%u\n", exts
[n
- 1].name
, exts
[n
- 1].spec_version
);
144 #define LAYERS_N_MAX 32
146 /* in theory, this could change on the fly */
147 static void instance_layers_dump(void)
149 struct vk_layer_props_t layers
[LAYERS_N_MAX
];
153 memset(layers
, 0, sizeof(layers
));
155 vk_enumerate_instance_layer_props(&n
, layers
);
156 if (r
!= vk_success
&& r
!= vk_incomplete
) {
157 WARNINGVK("%d:unable to enumerate instance layer(s)\n", r
);
160 if (r
== vk_incomplete
) {
161 WARNINGVK("too many layers (%u/%u), dumping disabled", n
, LAYERS_N_MAX
);
165 POUTVK("have %u instance layer(s)\n", n
);
169 POUTVK("instance layer:%u:name=%s:specification version=%u:implementation version=%u:description=%s\n", n
, layers
[n
].name
, layers
[n
].spec_version
, layers
[n
].implementation_version
, layers
[n
].desc
);
175 static void instance_create(void)
178 struct vk_instance_create_info_t info
;
179 static u8
*exts
[] = {
181 * TODO: there is a shabby (coze mess of pixel fmts),
182 * "expensive", promoted YUV extension
185 * XXX: not 1.1 promoted, should not use it, but it is fixing
186 * some non-consistency from 1.0
188 "VK_KHR_get_surface_capabilities2",
190 "VK_KHR_get_physical_device_properties2",
191 "VK_KHR_xcb_surface",
197 if (i
== ARRAY_N(exts
))
199 POUTVK("will use instance extension %s\n", exts
[i
]);
202 memset(&info
, 0, sizeof(info
));
203 info
.type
= vk_struct_type_instance_create_info
;
204 info
.enabled_exts_n
= ARRAY_N(exts
);
205 info
.enabled_ext_names
= exts
;
206 vk_create_instance(&info
);
207 IF_FATALVK("%d:unable to create an instance\n", r
);
208 POUTVK("instance handle %p\n", instance_l
);
211 static void tmp_phydevs_get(void)
213 struct vk_phydev_t
*phydevs
[tmp_phydevs_n_max
];
217 memset(phydevs
, 0, sizeof(phydevs
));
218 n
= tmp_phydevs_n_max
;
219 vk_enumerate_phydevs(&n
, phydevs
);
220 if (r
!= vk_success
&& r
!= vk_incomplete
)
221 FATALVK("%ld:unable to enumerate physical devices\n", r
);
222 if (r
== vk_incomplete
)
223 FATALVK("too many vulkan physical devices %u/%u for our temporary storage\n", n
, tmp_phydevs_n_max
);
225 POUTVK("detected %u physical devices\n", n
);
227 FATALVK("no vulkan physical devices, exiting\n");
229 memset(tmp_phydevs_l
, 0, sizeof(tmp_phydevs_l
));
232 if (n
== tmp_phydevs_n_l
)
234 tmp_phydevs_l
[n
].vk
= phydevs
[n
];
238 #define EXTS_N_MAX 512
240 static void phydev_exts_dump(void *phydev
)
242 struct vk_ext_props_t exts
[EXTS_N_MAX
];
246 memset(exts
, 0, sizeof(exts
));
248 vk_enumerate_dev_ext_props(phydev
, &n
, exts
);
249 if (r
!= vk_success
&& r
!= vk_incomplete
) {
250 WARNINGVK("physical device:%p:%d:unable to enumerate device extension(s)\n", phydev
, r
);
253 if (r
== vk_incomplete
) {
254 WARNINGVK("physical device:%p:too many extensions (%u/%u), dumping disabled", phydev
, n
, EXTS_N_MAX
);
258 POUTVK("physical device:%p:have %u device extension(s)\n", phydev
, n
);
262 POUTVK("physical device:%p:device extension:name=%s:specification version=%u\n", phydev
, exts
[n
- 1].name
, exts
[n
- 1].spec_version
);
268 static void tmp_phydevs_exts_dump(void)
274 if (i
== tmp_phydevs_n_l
)
276 phydev_exts_dump(tmp_phydevs_l
[i
].vk
);
281 static u8
*dev_type_str(u32 type
)
284 case vk_phydev_type_other
:
286 case vk_phydev_type_integrated_gpu
:
287 return "integrated gpu";
288 case vk_phydev_type_discrete_gpu
:
289 return "discrete gpu";
290 case vk_phydev_type_virtual_gpu
:
291 return "virtual gpu";
292 case vk_phydev_type_cpu
:
299 static u8
*uuid_str(u8
*uuid
)
301 static u8 uuid_str
[VK_UUID_SZ
* 2 + 1];
304 memset(uuid_str
, 0, sizeof(uuid_str
));
309 /* XXX: always write a terminating 0, truncated or not */
310 snprintf(uuid_str
+ i
* 2, 3, "%02x", uuid
[i
]);
316 static void tmp_phydevs_props_dump(void)
322 struct vk_phydev_props_t props
;
323 struct tmp_phydev_t
*p
;
325 if (i
== tmp_phydevs_n_l
)
327 p
= &tmp_phydevs_l
[i
];
328 memset(&props
, 0, sizeof(props
));
329 props
.type
= vk_struct_type_phydev_props
;
330 vk_get_phydev_props(p
->vk
, &props
);
331 POUTVK("physical device:%p:properties:api version=%#x=%u.%u.%u\n", p
->vk
, props
.core
.api_version
, VK_VERSION_MAJOR(props
.core
.api_version
), VK_VERSION_MINOR(props
.core
.api_version
), VK_VERSION_PATCH(props
.core
.api_version
));
332 POUTVK("physical device:%p:properties:driver version=%#x=%u.%u.%u\n", p
->vk
, props
.core
.driver_version
, VK_VERSION_MAJOR(props
.core
.driver_version
), VK_VERSION_MINOR(props
.core
.driver_version
), VK_VERSION_PATCH(props
.core
.driver_version
));
333 POUTVK("physical device:%p:properties:vendor id=%#x\n", p
->vk
, props
.core
.vendor_id
);
334 POUTVK("physical device:%p:properties:device id=%#x\n", p
->vk
, props
.core
.dev_id
);
335 POUTVK("physical device:%p:properties:type=%s\n", p
->vk
, dev_type_str(props
.core
.dev_type
));
336 if (props
.core
.dev_type
== vk_phydev_type_discrete_gpu
)
337 p
->is_discret_gpu
= true;
339 p
->is_discret_gpu
= false;
340 POUTVK("physical device:%p:properties:name=%s\n", p
->vk
, props
.core
.name
);
341 POUTVK("physical device:%p:properties:pipeline cache uuid=%s\n", p
->vk
, uuid_str(props
.core
.pl_cache_uuid
));
342 /* disp the limits and sparse props at "higher log lvl", if needed in the end */
347 static void tmp_phydev_mem_props_get(struct tmp_phydev_t
*p
)
349 memset(&p
->mem_props
, 0, sizeof(p
->mem_props
));
350 p
->mem_props
.type
= vk_struct_type_phydev_mem_props
;
351 vk_get_phydev_mem_props(p
->vk
, &p
->mem_props
);
354 static void tmp_phydevs_mem_props_get(void)
360 if (i
== tmp_phydevs_n_l
)
362 tmp_phydev_mem_props_get(&tmp_phydevs_l
[i
]);
367 static void phydev_mem_type_dump(void *phydev
, u8 i
,
368 struct vk_mem_type_t
*type
)
370 POUTVK("physical device:%p:memory type:%u:heap:%u\n", phydev
, i
, type
->heap
);
371 POUTVK("physical device:%p:memory type:%u:flags:%#08x\n", phydev
, i
, type
->prop_flags
);
372 if ((type
->prop_flags
& vk_mem_prop_dev_local_bit
) != 0)
373 POUTVK("physical device:%p:memory type:%u:device local\n", phydev
, i
);
374 if ((type
->prop_flags
& vk_mem_prop_host_visible_bit
) != 0)
375 POUTVK("physical device:%p:memory type:%u:host visible\n", phydev
, i
);
376 if ((type
->prop_flags
& vk_mem_prop_host_cached_bit
) != 0)
377 POUTVK("physical device:%p:memory type:%u:host cached\n", phydev
, i
);
380 static void tmp_phydev_mem_types_dump(struct tmp_phydev_t
*p
)
384 POUTVK("physical device:%p:%u memory types\n", p
->vk
, p
->mem_props
.core
.mem_types_n
);
387 if (i
== p
->mem_props
.core
.mem_types_n
)
389 phydev_mem_type_dump(p
->vk
, i
,
390 &p
->mem_props
.core
.mem_types
[i
]);
395 static void phydev_mem_heap_dump(void *phydev
, u8 i
,
396 struct vk_mem_heap_t
*heap
)
398 POUTVK("physical device:%p:memory heap:%u:size:%u bytes\n", phydev
, i
, heap
->sz
);
399 POUTVK("physical device:%p:memory heap:%u:flags:%#08x\n", phydev
, i
, heap
->flags
);
400 if ((heap
->flags
& vk_mem_heap_dev_local_bit
) != 0)
401 POUTVK("physical device:%p:memory heap:%u:device local\n", phydev
, i
);
402 if ((heap
->flags
& vk_mem_heap_multi_instance_bit
) != 0)
403 POUTVK("physical device:%p:memory type:%u:multi instance\n", phydev
, i
);
406 static void tmp_phydev_mem_heaps_dump(struct tmp_phydev_t
*p
)
410 POUTVK("physical device:%p:%u memory heaps\n", p
->vk
, p
->mem_props
.core
.mem_heaps_n
);
413 if (i
== p
->mem_props
.core
.mem_heaps_n
)
415 phydev_mem_heap_dump(p
->vk
, i
,
416 &p
->mem_props
.core
.mem_heaps
[i
]);
422 static void tmp_phydev_mem_props_dump(struct tmp_phydev_t
*p
)
424 tmp_phydev_mem_types_dump(p
);
425 tmp_phydev_mem_heaps_dump(p
);
428 static void tmp_phydevs_mem_props_dump(void)
434 if (i
== tmp_phydevs_n_l
)
436 tmp_phydev_mem_props_dump(&tmp_phydevs_l
[i
]);
441 static void tmp_phydev_q_fams_get(struct tmp_phydev_t
*p
)
447 vk_get_phydev_q_fam_props(p
->vk
, &n
, 0);
448 if (n
> tmp_phydev_q_fams_n_max
)
449 FATALVK("physical device:%p:too many queue families %u/%u\n", p
->vk
, n
, tmp_phydev_q_fams_n_max
);
450 memset(p
->q_fams
, 0, sizeof(p
->q_fams
));
453 if (i
== tmp_phydev_q_fams_n_max
)
455 p
->q_fams
[i
].type
= vk_struct_type_q_fam_props
;
458 vk_get_phydev_q_fam_props(p
->vk
, &n
, p
->q_fams
);
460 POUTVK("physical device:%p:have %u queue families\n", p
->vk
, p
->q_fams_n
);
463 static void tmp_phydevs_q_fams_get(void)
469 if (i
== tmp_phydevs_n_l
)
471 tmp_phydev_q_fams_get(&tmp_phydevs_l
[i
]);
476 static void tmp_phydev_q_fams_dump(struct tmp_phydev_t
*p
)
482 if (i
== p
->q_fams_n
)
484 if ((p
->q_fams
[i
].core
.flags
& vk_q_gfx_bit
) != 0)
485 POUTVK("physical device:%p:queue family:%u:flags:graphics\n", p
->vk
, i
);
486 if ((p
->q_fams
[i
].core
.flags
& vk_q_compute_bit
) != 0)
487 POUTVK("physical device:%p:queue family:%u:flags:compute\n", p
->vk
, i
);
488 if ((p
->q_fams
[i
].core
.flags
& vk_q_transfer_bit
) != 0)
489 POUTVK("physical device:%p:queue family:%u:flags:transfer\n", p
->vk
, i
);
490 if ((p
->q_fams
[i
].core
.flags
& vk_q_sparse_binding_bit
) != 0)
491 POUTVK("physical device:%p:queue family:%u:flags:sparse binding\n", p
->vk
, i
);
492 if ((p
->q_fams
[i
].core
.flags
& vk_q_protected_bit
) != 0)
493 POUTVK("physical device:%p:queue family:%u:flags:protected\n", p
->vk
, i
);
494 POUTVK("physical device:%p:queue family:%u:%u queues\n", p
->vk
, i
, p
->q_fams
[i
].core
.qs_n
);
495 POUTVK("physical device:%p:queue family:%u:%u bits timestamps\n", p
->vk
, i
, p
->q_fams
[i
].core
.timestamp_valid_bits
);
496 POUTVK("physical device:%p:queue family:%u:(width=%u,height=%u,depth=%u) minimum image transfer granularity\n", p
->vk
, i
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.width
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.height
, p
->q_fams
[i
].core
.min_img_transfer_granularity
.depth
);
501 static void tmp_phydevs_q_fams_dump(void)
507 if (i
== tmp_phydevs_n_l
)
509 tmp_phydev_q_fams_dump(&tmp_phydevs_l
[i
]);
514 * the major obj to use in vk abstraction of gfx hardware is the q. In this
515 * abstraction, many core objs like bufs/imgs are "own" by a specific q, and
516 * transfer of such ownership to other qs can be expensive. we know it's not
517 * really the case on AMD hardware, but if vk abstraction insists on this, it
518 * probably means it is important on some hardware of other vendors.
521 static void tmp_phydevs_q_fams_surf_support_get(void)
527 struct tmp_phydev_t
*p
;
530 if (i
== tmp_phydevs_n_l
)
532 p
= &tmp_phydevs_l
[i
];
538 if (j
== p
->q_fams_n
)
540 supported
= vk_false
;
541 vk_get_phydev_surf_support(p
->vk
, j
, &supported
);
542 IF_FATALVK("%d:physical device:%p:queue family:%u:surface:%p:unable to query queue family wsi/(image presentation to our surface) support\n", r
, p
->vk
, j
, surf_p
.vk
);
543 if (supported
== vk_true
) {
544 POUTVK("physical device:%p:queue family:%u:surface:%p:does support wsi/(image presentation to our surface) \n", p
->vk
, j
, surf_p
.vk
);
545 p
->q_fams_surf_support
[j
] = true;
547 POUTVK("physical device:%p:queue family:%u:surface:%p:does not support wsi/(image presentation to our surface)\n", p
->vk
, j
, surf_p
.vk
);
548 p
->q_fams_surf_support
[j
] = false;
556 static void tmp_selected_phydev_cherry_pick(u8 i
)
558 struct tmp_phydev_t
*p
;
560 p
= &tmp_phydevs_l
[i
];
561 surf_p
.dev
.phydev
.vk
= p
->vk
;
562 surf_p
.dev
.phydev
.is_discret_gpu
= p
->is_discret_gpu
;
563 surf_p
.dev
.phydev
.mem_types_n
= p
->mem_props
.core
.mem_types_n
;
564 memcpy(surf_p
.dev
.phydev
.mem_types
, p
->mem_props
.core
.mem_types
,
565 sizeof(surf_p
.dev
.phydev
.mem_types
));
568 * we ask qs of phydevs which one is able to present imgs to the
569 * external pe surf. Additionally we require this q to support gfx. we
570 * select basically the first q from the first phydev fitting what we are
574 static void tmp_phydev_and_q_fam_select(void)
581 struct tmp_phydev_t
*p
;
583 if (i
== tmp_phydevs_n_l
)
585 p
= &tmp_phydevs_l
[i
];
588 if (j
== p
->q_fams_n
)
591 * we are looking for a q fam with:
592 * - img presentation to our surf
594 * - transfer (implicit with gfx)
596 if (p
->q_fams_surf_support
[j
]
597 && (p
->q_fams
[j
].core
.flags
& vk_q_gfx_bit
)
599 surf_p
.dev
.phydev
.q_fam
= j
;
600 tmp_selected_phydev_cherry_pick(i
);
601 POUTVK("physical device %p selected for (wsi/image presentation to our surface %p) using its queue family %u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.q_fam
);
610 static void texel_mem_blk_confs_dump(u32 confs_n
,
611 struct vk_surf_texel_mem_blk_conf_t
*confs
)
619 POUTVK("physical device:%p:surface:%p:texel memory block configuration:format=%u color_space=%u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, confs
[i
].core
.fmt
, confs
[i
].core
.color_space
);
624 * we only know this phydev/q is "able to present imgs" to the external
625 * pe surf. Here we choose the cfg of textel blk
627 #define CONFS_N_MAX 1024
629 static void phydev_surf_texel_mem_blk_conf_select(void)
631 struct vk_phydev_surf_info_t info
;
632 struct vk_surf_texel_mem_blk_conf_t confs
[CONFS_N_MAX
];
633 struct vk_surf_texel_mem_blk_conf_core_t
*cc
;
638 memset(&info
, 0, sizeof(info
));
639 info
.type
= vk_struct_type_phydev_surf_info
;
640 info
.surf
= surf_p
.vk
;
641 vk_get_phydev_surf_texel_mem_blk_confs(&info
, &confs_n
, 0);
642 IF_FATALVK("%d:physical device:%p:surface:%p:unable get the count of valid surface texel memory block configurations\n", r
, surf_p
.dev
.phydev
.vk
, surf_p
.vk
);
643 if (confs_n
> CONFS_N_MAX
)
644 FATALVK("physical device:%p:surface:%p:too many surface texel memory block configurations %u/%u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, confs_n
, CONFS_N_MAX
);
646 memset(confs
, 0, sizeof(confs
[0]) * confs_n
);
651 confs
[i
].type
= vk_struct_type_surf_texel_mem_blk_conf
;
654 vk_get_phydev_surf_texel_mem_blk_confs(&info
, &confs_n
, confs
);
655 IF_FATALVK("%d:physical device:%p:surface:%p:unable get the valid surface texel memory block configurations\n", r
, surf_p
.dev
.phydev
.vk
, surf_p
.vk
);
657 FATALVK("physical device:%p:surface:%p:no valid surface texel memory block configuration\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
);
658 texel_mem_blk_confs_dump(confs_n
, confs
);
660 cc
= &surf_p
.dev
.phydev
.selected_texel_mem_blk_conf_core
;
662 * the following texel cfg is guaranteed to exist, and this is what we
665 cc
->fmt
= vk_texel_mem_blk_fmt_b8g8r8a8_srgb
;
666 POUTVK("physical device:%p:surface:%p:using our surface texel memory block format %u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, cc
->fmt
);
667 cc
->color_space
= vk_color_space_srgb_nonlinear
;
668 POUTVK("physical device:%p:surface:%p:using prefered surface texel memory block color space %u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, cc
->color_space
);
672 static void phydev_surf_caps_get(void)
675 struct vk_phydev_surf_info_t info
;
677 memset(&info
, 0, sizeof(info
));
678 info
.type
= vk_struct_type_phydev_surf_info
;
679 info
.surf
= surf_p
.vk
;
680 memset(&surf_p
.dev
.phydev
.surf_caps
, 0,
681 sizeof(surf_p
.dev
.phydev
.surf_caps
));
682 surf_p
.dev
.phydev
.surf_caps
.type
= vk_struct_type_surf_caps
;
683 vk_get_phydev_surf_caps(&info
, &surf_p
.dev
.phydev
.surf_caps
);
684 IF_FATALVK("%d:physical device:%p:surface:%p:unable to get our surface capabilities in the context of the selected physical device\n", r
, surf_p
.dev
.phydev
.vk
, surf_p
.vk
);
685 /* we have room for a maximum of 3 images per swapchain */
686 if (surf_p
.dev
.phydev
.surf_caps
.core
.imgs_n_min
> swpchn_imgs_n_max
)
687 FATALVK("physical device:%p:surface:%p:we have room for %u images per swapchain, but this swapchain requires a minimum of %u images\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, swpchn_imgs_n_max
, surf_p
.dev
.phydev
.surf_caps
.core
.imgs_n_min
);
690 static void phydev_surf_caps_dump(void)
692 POUTVK("physical device:%p:surface:%p:imgs_n_min=%u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.imgs_n_min
);
693 POUTVK("physical device:%p:surface:%p:imgs_n_max=%u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.imgs_n_max
);
694 POUTVK("physical device:%p:surface:%p:current extent=(width=%u, height=%u)\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.current_extent
.width
, surf_p
.dev
.phydev
.surf_caps
.core
.current_extent
.height
);
695 POUTVK("physical device:%p:surface:%p:minimal extent=(width=%u, height=%u)\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.img_extent_min
.width
, surf_p
.dev
.phydev
.surf_caps
.core
.img_extent_min
.height
);
696 POUTVK("physical device:%p:surface:%p:maximal extent=(width=%u, height=%u)\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.img_extent_max
.width
, surf_p
.dev
.phydev
.surf_caps
.core
.img_extent_max
.height
);
697 POUTVK("physical device:%p:surface:%p:img_array_layers_n_max=%u\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.img_array_layers_n_max
);
698 POUTVK("physical device:%p:surface:%p:supported_transforms=%#08x\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.supported_transforms
);
699 POUTVK("physical device:%p:surface:%p:current_transform=%#08x\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.current_transform
);
700 POUTVK("physical device:%p:surface:%p:supported_composite_alpha=%#08x\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.supported_composite_alpha
);
701 POUTVK("physical device:%p:surface:%p:supported_img_usage_flags=%#08x\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, surf_p
.dev
.phydev
.surf_caps
.core
.supported_img_usage_flags
);
704 static void tmp_phydev_surf_present_modes_get(void)
708 tmp_present_modes_n_l
= tmp_present_modes_n_max
;
709 vk_get_phydev_surf_present_modes();
710 IF_FATALVK("%d:physical device:%p:surface:%p:unable to get the physical device present mode for our surface\n", r
, surf_p
.dev
.phydev
.vk
, surf_p
.vk
);
713 static u8
*present_mode_to_str(u32 mode
)
716 case vk_present_mode_immediate
:
718 case vk_present_mode_mailbox
:
720 case vk_present_mode_fifo
:
722 case vk_present_mode_fifo_relaxed
:
723 return "fifo relaxed";
729 static void tmp_phydev_surf_present_modes_dump(void)
734 POUTVK("physical device:%p:surface:%p:%u present modes\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, tmp_present_modes_n_l
);
736 if (i
== (u8
)tmp_present_modes_n_l
)
738 POUTVK("physical device:%p:surface:%p:present mode=%s\n", surf_p
.dev
.phydev
.vk
, surf_p
.vk
, present_mode_to_str(tmp_present_modes_l
[i
]));
743 static void phydev_init(void)
746 /*--------------------------------------------------------------------*/
747 tmp_phydevs_exts_dump();
748 tmp_phydevs_props_dump();
749 tmp_phydevs_mem_props_get();
750 tmp_phydevs_mem_props_dump();
751 /*--------------------------------------------------------------------*/
752 tmp_phydevs_q_fams_get();
753 tmp_phydevs_q_fams_dump();
754 /*====================================================================*/
755 /* from here our surf is involved */
756 /*--------------------------------------------------------------------*/
757 /* select the phydev and its q family which can work with our surf */
758 tmp_phydevs_q_fams_surf_support_get();
759 tmp_phydev_and_q_fam_select();
760 /*--------------------------------------------------------------------*/
761 phydev_surf_texel_mem_blk_conf_select();
762 /*--------------------------------------------------------------------*/
763 phydev_surf_caps_get();
764 phydev_surf_caps_dump();
765 /*--------------------------------------------------------------------*/
766 tmp_phydev_surf_present_modes_get();
767 tmp_phydev_surf_present_modes_dump();
769 /* the phydev q fam selected */
771 static void dev_create(void)
773 struct vk_dev_create_info_t info
;
774 struct vk_dev_q_create_info_t q_info
;
776 static u8
*exts
[] = {
778 "VK_KHR_bind_memory2",
780 "VK_KHR_get_memory_requirements2",
785 memset(&info
, 0, sizeof(info
));
786 memset(&q_info
, 0, sizeof(q_info
));
787 /*--------------------------------------------------------------------*/
788 q_info
.type
= vk_struct_type_dev_q_create_info
;
789 q_info
.q_fam
= surf_p
.dev
.phydev
.q_fam
;
791 q_info
.q_prios
= &q_prio
;
793 /*--------------------------------------------------------------------*/
794 info
.type
= vk_struct_type_dev_create_info
;
795 info
.q_create_infos_n
= 1;
796 info
.q_create_infos
= &q_info
;
797 info
.enabled_exts_n
= ARRAY_N(exts
);
798 info
.enabled_ext_names
= exts
;
799 vk_create_dev(&info
);
800 IF_FATALVK("%d:physical device:%p:unable to create a vulkan device\n", r
, surf_p
.dev
.phydev
.vk
);
801 POUTVK("physical device:%p:vulkan device created with one proper queue:%p\n", surf_p
.dev
.phydev
.vk
, surf_p
.dev
.vk
);
804 static void q_get(void)
806 POUTVK("device:%p:getting queue:family=%u queue=0\n", surf_p
.dev
.vk
, surf_p
.dev
.phydev
.q_fam
);
808 POUTVK("device:%p:got queue:%p\n", surf_p
.dev
.vk
, surf_p
.dev
.q
);
811 static void cp_create(void)
814 struct vk_cp_create_info_t info
;
816 memset(&info
, 0, sizeof(info
));
817 info
.type
= vk_struct_type_cp_create_info
;
818 info
.flags
= vk_cp_create_reset_cb_bit
;
819 info
.q_fam
= surf_p
.dev
.phydev
.q_fam
;
821 IF_FATALVK("%d:unable create the commmand pool\n", r
);
822 POUTVK("device:%p:queue family:%u:created command pool %p\n", surf_p
.dev
.vk
, surf_p
.dev
.phydev
.q_fam
, surf_p
.dev
.cp
);
825 static void dev_init(void)
828 /*--------------------------------------------------------------------*/
834 /* XXX: the surf is an obj at the instance lvl, NOT THE [PHYSICAL] * DEV LVL */
836 static void surf_create(xcb_connection_t
*c
, u32 win_id
)
838 struct vk_xcb_surf_create_info_t vk_xcb_info
;
841 memset(&surf_p
, 0, sizeof(surf_p
));
842 memset(&vk_xcb_info
, 0, sizeof(vk_xcb_info
));
843 vk_xcb_info
.type
= vk_struct_type_xcb_surf_create_info
;
845 vk_xcb_info
.win
= win_id
;
846 vk_create_xcb_surf(&vk_xcb_info
);
847 IF_FATALVK("%d:xcb:%p:window id:%#x:unable to create a vulkan surface from this x11 window\n", r
, c
, win_id
);
848 POUTVK("xcb:%p:window id:%#x:created vk_surface=%p\n", c
, win_id
, surf_p
.vk
);
851 static void swpchn_init_once(void)
853 memset(&surf_p
.dev
.swpchn
, 0, sizeof(surf_p
.dev
.swpchn
));
856 static void swpchn_reinit(void)
858 struct vk_swpchn_t
*old_swpchn
;
859 struct vk_swpchn_create_info_t info
;
862 /* first, deal with the previous swpchn, if any */
863 old_swpchn
= surf_p
.dev
.swpchn
.vk
;
864 surf_p
.dev
.swpchn
.vk
= 0;
865 /* lifetime of swpchn imgs is handled by the pe, not us */
866 surf_p
.dev
.swpchn
.imgs_n
= 0;
867 memset(surf_p
.dev
.swpchn
.imgs
, 0, sizeof(surf_p
.dev
.swpchn
.imgs
));
868 /*--------------------------------------------------------------------*/
869 memset(&info
, 0, sizeof(info
));
870 p
= &surf_p
.dev
.phydev
;
871 info
.type
= vk_struct_type_swpchn_create_info
;
872 info
.surf
= surf_p
.vk
;
873 info
.imgs_n_min
= surf_p
.dev
.phydev
.surf_caps
.core
.imgs_n_min
;
874 info
.img_texel_mem_blk_fmt
= p
->selected_texel_mem_blk_conf_core
.fmt
;
875 info
.img_color_space
= p
->selected_texel_mem_blk_conf_core
.color_space
;
876 memcpy(&info
.img_extent
,
877 &surf_p
.dev
.phydev
.surf_caps
.core
.current_extent
,
878 sizeof(info
.img_extent
));
879 info
.img_layers_n
= 1;
880 info
.img_usage
= vk_img_usage_color_attachment_bit
881 | vk_img_usage_transfer_dst_bit
;
882 info
.img_sharing_mode
= vk_sharing_mode_exclusive
;
883 info
.pre_transform
= vk_surf_transform_identity_bit
;
884 info
.composite_alpha
= vk_composite_alpha_opaque_bit
;
885 info
.present_mode
= vk_present_mode_fifo
;
886 info
.clipped
= vk_true
;
888 info
.old_swpchn
= old_swpchn
;
889 vk_create_swpchn(&info
);
890 IF_FATALVK("%d:device:%p:surface:%p:unable to create the swapchain\n", r
, surf_p
.dev
.vk
, surf_p
.vk
);
892 vk_destroy_swpchn(old_swpchn
);
895 static void swpchn_imgs_get(void)
900 * TODO: should try to figure out how to favor double buf over
903 surf_p
.dev
.swpchn
.imgs_n
= swpchn_imgs_n_max
;
904 vk_get_swpchn_imgs();
905 IF_FATALVK("%d:device:%p:surface:%p:swapchain:%p:unable to get the swapchain images\n", r
, surf_p
.dev
.vk
, surf_p
.vk
, surf_p
.dev
.swpchn
.vk
);
908 static void sems_create(void)
911 struct vk_sem_create_info_t info
;
918 memset(&info
, 0, sizeof(info
));
919 info
.type
= vk_struct_type_sem_create_info
;
920 vk_create_sem(&info
, &surf_p
.dev
.sems
[sem
]);
921 IF_FATALVK("%d:device:%p:unable to create a semaphore %u for the synchronization of the swapchain\n", r
, surf_p
.dev
.vk
, sem
);
922 POUTVK("device:%p:semaphore %u for the synchronization of the swapchain created %p\n", surf_p
.dev
.vk
, sem
, surf_p
.dev
.sems
[sem
]);
927 static void swpchn_imgs_cbs_init_once(void)
930 struct vk_cb_alloc_info_t alloc_info
;
932 memset(&alloc_info
, 0, sizeof(alloc_info
));
933 alloc_info
.type
= vk_struct_type_cb_alloc_info
;
934 alloc_info
.cp
= surf_p
.dev
.cp
;
935 alloc_info
.lvl
= vk_cb_lvl_primary
;
936 alloc_info
.cbs_n
= swpchn_imgs_n_max
;
937 vk_alloc_cbs(&alloc_info
);
938 IF_FATALVK("%d:device:%p:unable to allocate command buffers for our swapchain images from %p command pool\n", r
, surf_p
.dev
.vk
, surf_p
.dev
.cp
);
939 POUTVK("device:%p:allocated %u command buffers for our swapchain images from %p command pool\n", surf_p
.dev
.vk
, surf_p
.dev
.swpchn
.imgs_n
, surf_p
.dev
.cp
);