1 /* Wine Vulkan ICD implementation
3 * Copyright 2017 Roderick Colenbrander
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
27 #include "vulkan_private.h"
28 #include "wine/vulkan_driver.h"
32 WINE_DEFAULT_DEBUG_CHANNEL(vulkan
);
35 static BOOL
is_wow64(void)
37 return sizeof(void *) == sizeof(UINT64
) && NtCurrentTeb()->WowTebOffset
;
40 static BOOL
use_external_memory(void)
45 static ULONG_PTR zero_bits
= 0;
47 #define wine_vk_count_struct(s, t) wine_vk_count_struct_((void *)s, VK_STRUCTURE_TYPE_##t)
48 static uint32_t wine_vk_count_struct_(void *s
, VkStructureType t
)
50 const VkBaseInStructure
*header
;
53 for (header
= s
; header
; header
= header
->pNext
)
55 if (header
->sType
== t
)
62 static const struct vulkan_funcs
*vk_funcs
;
64 #define WINE_VK_ADD_DISPATCHABLE_MAPPING(instance, client_handle, native_handle, object) \
65 wine_vk_add_handle_mapping((instance), (uintptr_t)(client_handle), (uintptr_t)(native_handle), &(object)->mapping)
66 #define WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance, client_handle, native_handle, object) \
67 wine_vk_add_handle_mapping((instance), (uintptr_t)(client_handle), (native_handle), &(object)->mapping)
68 static void wine_vk_add_handle_mapping(struct wine_instance
*instance
, uint64_t wrapped_handle
,
69 uint64_t native_handle
, struct wine_vk_mapping
*mapping
)
71 if (instance
->enable_wrapper_list
)
73 mapping
->native_handle
= native_handle
;
74 mapping
->wine_wrapped_handle
= wrapped_handle
;
75 pthread_rwlock_wrlock(&instance
->wrapper_lock
);
76 list_add_tail(&instance
->wrappers
, &mapping
->link
);
77 pthread_rwlock_unlock(&instance
->wrapper_lock
);
81 #define WINE_VK_REMOVE_HANDLE_MAPPING(instance, object) \
82 wine_vk_remove_handle_mapping((instance), &(object)->mapping)
83 static void wine_vk_remove_handle_mapping(struct wine_instance
*instance
, struct wine_vk_mapping
*mapping
)
85 if (instance
->enable_wrapper_list
)
87 pthread_rwlock_wrlock(&instance
->wrapper_lock
);
88 list_remove(&mapping
->link
);
89 pthread_rwlock_unlock(&instance
->wrapper_lock
);
93 static uint64_t wine_vk_get_wrapper(struct wine_instance
*instance
, uint64_t native_handle
)
95 struct wine_vk_mapping
*mapping
;
98 pthread_rwlock_rdlock(&instance
->wrapper_lock
);
99 LIST_FOR_EACH_ENTRY(mapping
, &instance
->wrappers
, struct wine_vk_mapping
, link
)
101 if (mapping
->native_handle
== native_handle
)
103 result
= mapping
->wine_wrapped_handle
;
107 pthread_rwlock_unlock(&instance
->wrapper_lock
);
111 static VkBool32
debug_utils_callback_conversion(VkDebugUtilsMessageSeverityFlagBitsEXT severity
,
112 VkDebugUtilsMessageTypeFlagsEXT message_types
,
113 const VkDebugUtilsMessengerCallbackDataEXT
*callback_data
,
116 struct wine_vk_debug_utils_params params
;
117 VkDebugUtilsObjectNameInfoEXT
*object_name_infos
;
118 struct wine_debug_utils_messenger
*object
;
124 TRACE("%i, %u, %p, %p\n", severity
, message_types
, callback_data
, user_data
);
128 if (!object
->instance
->instance
)
130 /* instance wasn't yet created, this is a message from the native loader */
134 /* FIXME: we should pack all referenced structs instead of passing pointers */
135 params
.user_callback
= object
->user_callback
;
136 params
.user_data
= object
->user_data
;
137 params
.severity
= severity
;
138 params
.message_types
= message_types
;
139 params
.data
= *((VkDebugUtilsMessengerCallbackDataEXT
*) callback_data
);
141 object_name_infos
= calloc(params
.data
.objectCount
, sizeof(*object_name_infos
));
143 for (i
= 0; i
< params
.data
.objectCount
; i
++)
145 object_name_infos
[i
].sType
= callback_data
->pObjects
[i
].sType
;
146 object_name_infos
[i
].pNext
= callback_data
->pObjects
[i
].pNext
;
147 object_name_infos
[i
].objectType
= callback_data
->pObjects
[i
].objectType
;
148 object_name_infos
[i
].pObjectName
= callback_data
->pObjects
[i
].pObjectName
;
150 if (wine_vk_is_type_wrapped(callback_data
->pObjects
[i
].objectType
))
152 object_name_infos
[i
].objectHandle
= wine_vk_get_wrapper(object
->instance
, callback_data
->pObjects
[i
].objectHandle
);
153 if (!object_name_infos
[i
].objectHandle
)
155 WARN("handle conversion failed 0x%s\n", wine_dbgstr_longlong(callback_data
->pObjects
[i
].objectHandle
));
156 free(object_name_infos
);
162 object_name_infos
[i
].objectHandle
= callback_data
->pObjects
[i
].objectHandle
;
166 params
.data
.pObjects
= object_name_infos
;
168 /* applications should always return VK_FALSE */
169 result
= KeUserModeCallback( NtUserCallVulkanDebugUtilsCallback
, ¶ms
, sizeof(params
),
170 &ret_ptr
, &ret_len
);
172 free(object_name_infos
);
177 static VkBool32
debug_report_callback_conversion(VkDebugReportFlagsEXT flags
, VkDebugReportObjectTypeEXT object_type
,
178 uint64_t object_handle
, size_t location
, int32_t code
, const char *layer_prefix
, const char *message
, void *user_data
)
180 struct wine_vk_debug_report_params params
;
181 struct wine_debug_report_callback
*object
;
185 TRACE("%#x, %#x, 0x%s, 0x%s, %d, %p, %p, %p\n", flags
, object_type
, wine_dbgstr_longlong(object_handle
),
186 wine_dbgstr_longlong(location
), code
, layer_prefix
, message
, user_data
);
190 if (!object
->instance
->instance
)
192 /* instance wasn't yet created, this is a message from the native loader */
196 /* FIXME: we should pack all referenced structs instead of passing pointers */
197 params
.user_callback
= object
->user_callback
;
198 params
.user_data
= object
->user_data
;
199 params
.flags
= flags
;
200 params
.object_type
= object_type
;
201 params
.location
= location
;
203 params
.layer_prefix
= layer_prefix
;
204 params
.message
= message
;
206 params
.object_handle
= wine_vk_get_wrapper(object
->instance
, object_handle
);
207 if (!params
.object_handle
)
208 params
.object_type
= VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT
;
210 return KeUserModeCallback( NtUserCallVulkanDebugReportCallback
, ¶ms
, sizeof(params
),
211 &ret_ptr
, &ret_len
);
214 static void wine_vk_physical_device_free(struct wine_phys_dev
*phys_dev
)
219 WINE_VK_REMOVE_HANDLE_MAPPING(phys_dev
->instance
, phys_dev
);
220 free(phys_dev
->extensions
);
224 static struct wine_phys_dev
*wine_vk_physical_device_alloc(struct wine_instance
*instance
,
225 VkPhysicalDevice phys_dev
, VkPhysicalDevice handle
)
227 struct wine_phys_dev
*object
;
228 uint32_t num_host_properties
, num_properties
= 0;
229 VkExtensionProperties
*host_properties
= NULL
;
230 BOOL have_external_memory_host
= FALSE
;
234 if (!(object
= calloc(1, sizeof(*object
))))
237 object
->instance
= instance
;
238 object
->handle
= handle
;
239 object
->phys_dev
= phys_dev
;
241 handle
->base
.unix_handle
= (uintptr_t)object
;
242 WINE_VK_ADD_DISPATCHABLE_MAPPING(instance
, handle
, phys_dev
, object
);
244 instance
->funcs
.p_vkGetPhysicalDeviceMemoryProperties(phys_dev
, &object
->memory_properties
);
246 res
= instance
->funcs
.p_vkEnumerateDeviceExtensionProperties(phys_dev
,
247 NULL
, &num_host_properties
, NULL
);
248 if (res
!= VK_SUCCESS
)
250 ERR("Failed to enumerate device extensions, res=%d\n", res
);
254 host_properties
= calloc(num_host_properties
, sizeof(*host_properties
));
255 if (!host_properties
)
257 ERR("Failed to allocate memory for device properties!\n");
261 res
= instance
->funcs
.p_vkEnumerateDeviceExtensionProperties(phys_dev
,
262 NULL
, &num_host_properties
, host_properties
);
263 if (res
!= VK_SUCCESS
)
265 ERR("Failed to enumerate device extensions, res=%d\n", res
);
269 /* Count list of extensions for which we have an implementation.
270 * TODO: perform translation for platform specific extensions.
272 for (i
= 0; i
< num_host_properties
; i
++)
274 if (wine_vk_device_extension_supported(host_properties
[i
].extensionName
))
276 TRACE("Enabling extension '%s' for physical device %p\n", host_properties
[i
].extensionName
, object
);
281 TRACE("Skipping extension '%s', no implementation found in winevulkan.\n", host_properties
[i
].extensionName
);
283 if (!strcmp(host_properties
[i
].extensionName
, "VK_EXT_external_memory_host"))
284 have_external_memory_host
= TRUE
;
287 TRACE("Host supported extensions %u, Wine supported extensions %u\n", num_host_properties
, num_properties
);
289 if (!(object
->extensions
= calloc(num_properties
, sizeof(*object
->extensions
))))
291 ERR("Failed to allocate memory for device extensions!\n");
295 for (i
= 0, j
= 0; i
< num_host_properties
; i
++)
297 if (wine_vk_device_extension_supported(host_properties
[i
].extensionName
))
299 object
->extensions
[j
] = host_properties
[i
];
303 object
->extension_count
= num_properties
;
305 if (use_external_memory() && have_external_memory_host
)
307 VkPhysicalDeviceExternalMemoryHostPropertiesEXT host_mem_props
=
309 .sType
= VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT
,
311 VkPhysicalDeviceProperties2 props
=
313 .sType
= VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2
,
314 .pNext
= &host_mem_props
,
316 instance
->funcs
.p_vkGetPhysicalDeviceProperties2KHR(phys_dev
, &props
);
317 object
->external_memory_align
= host_mem_props
.minImportedHostPointerAlignment
;
318 if (object
->external_memory_align
)
319 TRACE("Using VK_EXT_external_memory_host for memory mapping with alignment: %u\n",
320 object
->external_memory_align
);
323 free(host_properties
);
327 wine_vk_physical_device_free(object
);
328 free(host_properties
);
332 static void wine_vk_free_command_buffers(struct wine_device
*device
,
333 struct wine_cmd_pool
*pool
, uint32_t count
, const VkCommandBuffer
*buffers
)
337 for (i
= 0; i
< count
; i
++)
339 struct wine_cmd_buffer
*buffer
= wine_cmd_buffer_from_handle(buffers
[i
]);
344 device
->funcs
.p_vkFreeCommandBuffers(device
->device
, pool
->command_pool
, 1, &buffer
->command_buffer
);
345 WINE_VK_REMOVE_HANDLE_MAPPING(device
->phys_dev
->instance
, buffer
);
346 buffer
->handle
->base
.unix_handle
= 0;
351 static void wine_vk_device_get_queues(struct wine_device
*device
,
352 uint32_t family_index
, uint32_t queue_count
, VkDeviceQueueCreateFlags flags
,
353 struct wine_queue
*queues
, VkQueue
*handles
)
355 VkDeviceQueueInfo2 queue_info
;
358 for (i
= 0; i
< queue_count
; i
++)
360 struct wine_queue
*queue
= &queues
[i
];
362 queue
->device
= device
;
363 queue
->handle
= (*handles
)++;
364 queue
->family_index
= family_index
;
365 queue
->queue_index
= i
;
366 queue
->flags
= flags
;
368 /* The Vulkan spec says:
370 * "vkGetDeviceQueue must only be used to get queues that were created
371 * with the flags parameter of VkDeviceQueueCreateInfo set to zero."
373 if (flags
&& device
->funcs
.p_vkGetDeviceQueue2
)
375 queue_info
.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
;
376 queue_info
.pNext
= NULL
;
377 queue_info
.flags
= flags
;
378 queue_info
.queueFamilyIndex
= family_index
;
379 queue_info
.queueIndex
= i
;
380 device
->funcs
.p_vkGetDeviceQueue2(device
->device
, &queue_info
, &queue
->queue
);
384 device
->funcs
.p_vkGetDeviceQueue(device
->device
, family_index
, i
, &queue
->queue
);
387 queue
->handle
->base
.unix_handle
= (uintptr_t)queue
;
388 WINE_VK_ADD_DISPATCHABLE_MAPPING(device
->phys_dev
->instance
, queue
->handle
, queue
->queue
, queue
);
392 static VkResult
wine_vk_device_convert_create_info(struct wine_phys_dev
*phys_dev
,
393 struct conversion_context
*ctx
, const VkDeviceCreateInfo
*src
, VkDeviceCreateInfo
*dst
)
399 /* Should be filtered out by loader as ICDs don't support layers. */
400 dst
->enabledLayerCount
= 0;
401 dst
->ppEnabledLayerNames
= NULL
;
403 TRACE("Enabled %u extensions.\n", dst
->enabledExtensionCount
);
404 for (i
= 0; i
< dst
->enabledExtensionCount
; i
++)
406 const char *extension_name
= dst
->ppEnabledExtensionNames
[i
];
407 TRACE("Extension %u: %s.\n", i
, debugstr_a(extension_name
));
408 if (!wine_vk_device_extension_supported(extension_name
))
410 WARN("Extension %s is not supported.\n", debugstr_a(extension_name
));
411 return VK_ERROR_EXTENSION_NOT_PRESENT
;
415 if (phys_dev
->external_memory_align
)
417 const char **new_extensions
;
419 new_extensions
= conversion_context_alloc(ctx
, (dst
->enabledExtensionCount
+ 2) *
420 sizeof(*dst
->ppEnabledExtensionNames
));
421 memcpy(new_extensions
, src
->ppEnabledExtensionNames
,
422 dst
->enabledExtensionCount
* sizeof(*dst
->ppEnabledExtensionNames
));
423 new_extensions
[dst
->enabledExtensionCount
++] = "VK_KHR_external_memory";
424 new_extensions
[dst
->enabledExtensionCount
++] = "VK_EXT_external_memory_host";
425 dst
->ppEnabledExtensionNames
= new_extensions
;
431 /* Helper function used for freeing a device structure. This function supports full
432 * and partial object cleanups and can thus be used for vkCreateDevice failures.
434 static void wine_vk_device_free(struct wine_device
*device
)
436 struct wine_queue
*queue
;
444 for (i
= 0; i
< device
->queue_count
; i
++)
446 queue
= &device
->queues
[i
];
447 if (queue
&& queue
->queue
)
448 WINE_VK_REMOVE_HANDLE_MAPPING(device
->phys_dev
->instance
, queue
);
450 free(device
->queues
);
451 device
->queues
= NULL
;
454 if (device
->device
&& device
->funcs
.p_vkDestroyDevice
)
456 WINE_VK_REMOVE_HANDLE_MAPPING(device
->phys_dev
->instance
, device
);
457 device
->funcs
.p_vkDestroyDevice(device
->device
, NULL
/* pAllocator */);
463 NTSTATUS
init_vulkan(void *args
)
465 vk_funcs
= __wine_get_vulkan_driver(WINE_VULKAN_DRIVER_VERSION
);
468 ERR("Failed to load Wine graphics driver supporting Vulkan.\n");
469 return STATUS_UNSUCCESSFUL
;
474 SYSTEM_BASIC_INFORMATION info
;
476 NtQuerySystemInformation(SystemEmulationBasicInformation
, &info
, sizeof(info
), NULL
);
477 zero_bits
= (ULONG_PTR
)info
.HighestUserAddress
| 0x7fffffff;
480 return STATUS_SUCCESS
;
483 /* Helper function for converting between win32 and host compatible VkInstanceCreateInfo.
484 * This function takes care of extensions handled at winevulkan layer, a Wine graphics
485 * driver is responsible for handling e.g. surface extensions.
487 static VkResult
wine_vk_instance_convert_create_info(struct conversion_context
*ctx
,
488 const VkInstanceCreateInfo
*src
, VkInstanceCreateInfo
*dst
, struct wine_instance
*object
)
490 VkDebugUtilsMessengerCreateInfoEXT
*debug_utils_messenger
;
491 VkDebugReportCallbackCreateInfoEXT
*debug_report_callback
;
492 VkBaseInStructure
*header
;
497 object
->utils_messenger_count
= wine_vk_count_struct(dst
, DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT
);
498 object
->utils_messengers
= calloc(object
->utils_messenger_count
, sizeof(*object
->utils_messengers
));
499 header
= (VkBaseInStructure
*) dst
;
500 for (i
= 0; i
< object
->utils_messenger_count
; i
++)
502 header
= find_next_struct(header
->pNext
, VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT
);
503 debug_utils_messenger
= (VkDebugUtilsMessengerCreateInfoEXT
*) header
;
505 object
->utils_messengers
[i
].instance
= object
;
506 object
->utils_messengers
[i
].debug_messenger
= VK_NULL_HANDLE
;
507 object
->utils_messengers
[i
].user_callback
= debug_utils_messenger
->pfnUserCallback
;
508 object
->utils_messengers
[i
].user_data
= debug_utils_messenger
->pUserData
;
510 /* convert_VkInstanceCreateInfo_* already copied the chain, so we can modify it in-place. */
511 debug_utils_messenger
->pfnUserCallback
= (void *) &debug_utils_callback_conversion
;
512 debug_utils_messenger
->pUserData
= &object
->utils_messengers
[i
];
515 debug_report_callback
= find_next_struct(header
->pNext
,
516 VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT
);
517 if (debug_report_callback
)
519 object
->default_callback
.instance
= object
;
520 object
->default_callback
.debug_callback
= VK_NULL_HANDLE
;
521 object
->default_callback
.user_callback
= debug_report_callback
->pfnCallback
;
522 object
->default_callback
.user_data
= debug_report_callback
->pUserData
;
524 debug_report_callback
->pfnCallback
= (void *) &debug_report_callback_conversion
;
525 debug_report_callback
->pUserData
= &object
->default_callback
;
528 /* ICDs don't support any layers, so nothing to copy. Modern versions of the loader
529 * filter this data out as well.
531 if (object
->quirks
& WINEVULKAN_QUIRK_IGNORE_EXPLICIT_LAYERS
) {
532 dst
->enabledLayerCount
= 0;
533 dst
->ppEnabledLayerNames
= NULL
;
534 WARN("Ignoring explicit layers!\n");
535 } else if (dst
->enabledLayerCount
) {
536 FIXME("Loading explicit layers is not supported by winevulkan!\n");
537 return VK_ERROR_LAYER_NOT_PRESENT
;
540 TRACE("Enabled %u instance extensions.\n", dst
->enabledExtensionCount
);
541 for (i
= 0; i
< dst
->enabledExtensionCount
; i
++)
543 const char *extension_name
= dst
->ppEnabledExtensionNames
[i
];
544 TRACE("Extension %u: %s.\n", i
, debugstr_a(extension_name
));
545 if (!wine_vk_instance_extension_supported(extension_name
))
547 WARN("Extension %s is not supported.\n", debugstr_a(extension_name
));
548 return VK_ERROR_EXTENSION_NOT_PRESENT
;
550 if (!strcmp(extension_name
, "VK_EXT_debug_utils") || !strcmp(extension_name
, "VK_EXT_debug_report"))
552 object
->enable_wrapper_list
= VK_TRUE
;
556 if (use_external_memory())
558 const char **new_extensions
;
560 new_extensions
= conversion_context_alloc(ctx
, (dst
->enabledExtensionCount
+ 2) *
561 sizeof(*dst
->ppEnabledExtensionNames
));
562 memcpy(new_extensions
, src
->ppEnabledExtensionNames
,
563 dst
->enabledExtensionCount
* sizeof(*dst
->ppEnabledExtensionNames
));
564 new_extensions
[dst
->enabledExtensionCount
++] = "VK_KHR_get_physical_device_properties2";
565 new_extensions
[dst
->enabledExtensionCount
++] = "VK_KHR_external_memory_capabilities";
566 dst
->ppEnabledExtensionNames
= new_extensions
;
572 /* Helper function which stores wrapped physical devices in the instance object. */
573 static VkResult
wine_vk_instance_load_physical_devices(struct wine_instance
*instance
)
575 VkPhysicalDevice
*tmp_phys_devs
;
576 uint32_t phys_dev_count
;
580 res
= instance
->funcs
.p_vkEnumeratePhysicalDevices(instance
->instance
, &phys_dev_count
, NULL
);
581 if (res
!= VK_SUCCESS
)
583 ERR("Failed to enumerate physical devices, res=%d\n", res
);
589 if (phys_dev_count
> instance
->handle
->phys_dev_count
)
591 instance
->handle
->phys_dev_count
= phys_dev_count
;
592 return VK_ERROR_OUT_OF_POOL_MEMORY
;
594 instance
->handle
->phys_dev_count
= phys_dev_count
;
596 if (!(tmp_phys_devs
= calloc(phys_dev_count
, sizeof(*tmp_phys_devs
))))
597 return VK_ERROR_OUT_OF_HOST_MEMORY
;
599 res
= instance
->funcs
.p_vkEnumeratePhysicalDevices(instance
->instance
, &phys_dev_count
, tmp_phys_devs
);
600 if (res
!= VK_SUCCESS
)
606 instance
->phys_devs
= calloc(phys_dev_count
, sizeof(*instance
->phys_devs
));
607 if (!instance
->phys_devs
)
610 return VK_ERROR_OUT_OF_HOST_MEMORY
;
613 /* Wrap each native physical device handle into a dispatchable object for the ICD loader. */
614 for (i
= 0; i
< phys_dev_count
; i
++)
616 struct wine_phys_dev
*phys_dev
= wine_vk_physical_device_alloc(instance
, tmp_phys_devs
[i
],
617 &instance
->handle
->phys_devs
[i
]);
620 ERR("Unable to allocate memory for physical device!\n");
622 return VK_ERROR_OUT_OF_HOST_MEMORY
;
625 instance
->phys_devs
[i
] = phys_dev
;
626 instance
->phys_dev_count
= i
+ 1;
628 instance
->phys_dev_count
= phys_dev_count
;
634 static struct wine_phys_dev
*wine_vk_instance_wrap_physical_device(struct wine_instance
*instance
,
635 VkPhysicalDevice physical_device
)
639 for (i
= 0; i
< instance
->phys_dev_count
; ++i
)
641 struct wine_phys_dev
*current
= instance
->phys_devs
[i
];
642 if (current
->phys_dev
== physical_device
)
646 ERR("Unrecognized physical device %p.\n", physical_device
);
650 /* Helper function used for freeing an instance structure. This function supports full
651 * and partial object cleanups and can thus be used for vkCreateInstance failures.
653 static void wine_vk_instance_free(struct wine_instance
*instance
)
658 if (instance
->phys_devs
)
662 for (i
= 0; i
< instance
->phys_dev_count
; i
++)
664 wine_vk_physical_device_free(instance
->phys_devs
[i
]);
666 free(instance
->phys_devs
);
669 if (instance
->instance
)
671 vk_funcs
->p_vkDestroyInstance(instance
->instance
, NULL
/* allocator */);
672 WINE_VK_REMOVE_HANDLE_MAPPING(instance
, instance
);
675 pthread_rwlock_destroy(&instance
->wrapper_lock
);
676 free(instance
->utils_messengers
);
681 VkResult
wine_vkAllocateCommandBuffers(VkDevice handle
, const VkCommandBufferAllocateInfo
*allocate_info
,
682 VkCommandBuffer
*buffers
)
684 struct wine_device
*device
= wine_device_from_handle(handle
);
685 struct wine_cmd_buffer
*buffer
;
686 struct wine_cmd_pool
*pool
;
687 VkResult res
= VK_SUCCESS
;
690 pool
= wine_cmd_pool_from_handle(allocate_info
->commandPool
);
692 for (i
= 0; i
< allocate_info
->commandBufferCount
; i
++)
694 VkCommandBufferAllocateInfo allocate_info_host
;
696 /* TODO: future extensions (none yet) may require pNext conversion. */
697 allocate_info_host
.pNext
= allocate_info
->pNext
;
698 allocate_info_host
.sType
= allocate_info
->sType
;
699 allocate_info_host
.commandPool
= pool
->command_pool
;
700 allocate_info_host
.level
= allocate_info
->level
;
701 allocate_info_host
.commandBufferCount
= 1;
703 TRACE("Allocating command buffer %u from pool 0x%s.\n",
704 i
, wine_dbgstr_longlong(allocate_info_host
.commandPool
));
706 if (!(buffer
= calloc(1, sizeof(*buffer
))))
708 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
712 buffer
->handle
= buffers
[i
];
713 buffer
->device
= device
;
714 res
= device
->funcs
.p_vkAllocateCommandBuffers(device
->device
,
715 &allocate_info_host
, &buffer
->command_buffer
);
716 buffer
->handle
->base
.unix_handle
= (uintptr_t)buffer
;
717 WINE_VK_ADD_DISPATCHABLE_MAPPING(device
->phys_dev
->instance
, buffer
->handle
,
718 buffer
->command_buffer
, buffer
);
719 if (res
!= VK_SUCCESS
)
721 ERR("Failed to allocate command buffer, res=%d.\n", res
);
722 buffer
->command_buffer
= VK_NULL_HANDLE
;
727 if (res
!= VK_SUCCESS
)
728 wine_vk_free_command_buffers(device
, pool
, i
+ 1, buffers
);
733 VkResult
wine_vkCreateDevice(VkPhysicalDevice phys_dev_handle
, const VkDeviceCreateInfo
*create_info
,
734 const VkAllocationCallbacks
*allocator
, VkDevice
*ret_device
,
737 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(phys_dev_handle
);
738 VkDevice device_handle
= client_ptr
;
739 VkDeviceCreateInfo create_info_host
;
740 struct VkQueue_T
*queue_handles
;
741 struct wine_queue
*next_queue
;
742 struct conversion_context ctx
;
743 struct wine_device
*object
;
748 FIXME("Support for allocation callbacks not implemented yet\n");
750 if (TRACE_ON(vulkan
))
752 VkPhysicalDeviceProperties properties
;
754 phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceProperties(phys_dev
->phys_dev
, &properties
);
756 TRACE("Device name: %s.\n", debugstr_a(properties
.deviceName
));
757 TRACE("Vendor ID: %#x, Device ID: %#x.\n", properties
.vendorID
, properties
.deviceID
);
758 TRACE("Driver version: %#x.\n", properties
.driverVersion
);
761 if (!(object
= calloc(1, sizeof(*object
))))
762 return VK_ERROR_OUT_OF_HOST_MEMORY
;
764 object
->phys_dev
= phys_dev
;
766 init_conversion_context(&ctx
);
767 res
= wine_vk_device_convert_create_info(phys_dev
, &ctx
, create_info
, &create_info_host
);
768 if (res
== VK_SUCCESS
)
769 res
= phys_dev
->instance
->funcs
.p_vkCreateDevice(phys_dev
->phys_dev
,
770 &create_info_host
, NULL
/* allocator */, &object
->device
);
771 free_conversion_context(&ctx
);
772 WINE_VK_ADD_DISPATCHABLE_MAPPING(phys_dev
->instance
, device_handle
, object
->device
, object
);
773 if (res
!= VK_SUCCESS
)
775 WARN("Failed to create device, res=%d.\n", res
);
779 /* Just load all function pointers we are aware off. The loader takes care of filtering.
780 * We use vkGetDeviceProcAddr as opposed to vkGetInstanceProcAddr for efficiency reasons
781 * as functions pass through fewer dispatch tables within the loader.
783 #define USE_VK_FUNC(name) \
784 object->funcs.p_##name = (void *)vk_funcs->p_vkGetDeviceProcAddr(object->device, #name); \
785 if (object->funcs.p_##name == NULL) \
786 TRACE("Not found '%s'.\n", #name);
787 ALL_VK_DEVICE_FUNCS()
790 /* We need to cache all queues within the device as each requires wrapping since queues are
791 * dispatchable objects.
793 for (i
= 0; i
< create_info_host
.queueCreateInfoCount
; i
++)
795 object
->queue_count
+= create_info_host
.pQueueCreateInfos
[i
].queueCount
;
798 if (!(object
->queues
= calloc(object
->queue_count
, sizeof(*object
->queues
))))
800 res
= VK_ERROR_OUT_OF_HOST_MEMORY
;
804 next_queue
= object
->queues
;
805 queue_handles
= device_handle
->queues
;
806 for (i
= 0; i
< create_info_host
.queueCreateInfoCount
; i
++)
808 uint32_t flags
= create_info_host
.pQueueCreateInfos
[i
].flags
;
809 uint32_t family_index
= create_info_host
.pQueueCreateInfos
[i
].queueFamilyIndex
;
810 uint32_t queue_count
= create_info_host
.pQueueCreateInfos
[i
].queueCount
;
812 TRACE("Queue family index %u, queue count %u.\n", family_index
, queue_count
);
814 wine_vk_device_get_queues(object
, family_index
, queue_count
, flags
, next_queue
, &queue_handles
);
815 next_queue
+= queue_count
;
818 device_handle
->quirks
= phys_dev
->instance
->quirks
;
819 device_handle
->base
.unix_handle
= (uintptr_t)object
;
820 *ret_device
= device_handle
;
821 TRACE("Created device %p (native device %p).\n", object
, object
->device
);
825 wine_vk_device_free(object
);
829 VkResult
wine_vkCreateInstance(const VkInstanceCreateInfo
*create_info
,
830 const VkAllocationCallbacks
*allocator
, VkInstance
*instance
,
833 VkInstance client_instance
= client_ptr
;
834 VkInstanceCreateInfo create_info_host
;
835 const VkApplicationInfo
*app_info
;
836 struct conversion_context ctx
;
837 struct wine_instance
*object
;
841 FIXME("Support for allocation callbacks not implemented yet\n");
843 if (!(object
= calloc(1, sizeof(*object
))))
845 ERR("Failed to allocate memory for instance\n");
846 return VK_ERROR_OUT_OF_HOST_MEMORY
;
848 list_init(&object
->wrappers
);
849 pthread_rwlock_init(&object
->wrapper_lock
, NULL
);
851 init_conversion_context(&ctx
);
852 res
= wine_vk_instance_convert_create_info(&ctx
, create_info
, &create_info_host
, object
);
853 if (res
== VK_SUCCESS
)
854 res
= vk_funcs
->p_vkCreateInstance(&create_info_host
, NULL
/* allocator */, &object
->instance
);
855 free_conversion_context(&ctx
);
856 if (res
!= VK_SUCCESS
)
858 ERR("Failed to create instance, res=%d\n", res
);
859 wine_vk_instance_free(object
);
863 object
->handle
= client_instance
;
864 WINE_VK_ADD_DISPATCHABLE_MAPPING(object
, object
->handle
, object
->instance
, object
);
866 /* Load all instance functions we are aware of. Note the loader takes care
867 * of any filtering for extensions which were not requested, but which the
870 #define USE_VK_FUNC(name) \
871 object->funcs.p_##name = (void *)vk_funcs->p_vkGetInstanceProcAddr(object->instance, #name);
872 ALL_VK_INSTANCE_FUNCS()
875 /* Cache physical devices for vkEnumeratePhysicalDevices within the instance as
876 * each vkPhysicalDevice is a dispatchable object, which means we need to wrap
877 * the native physical devices and present those to the application.
878 * Cleanup happens as part of wine_vkDestroyInstance.
880 res
= wine_vk_instance_load_physical_devices(object
);
881 if (res
!= VK_SUCCESS
)
883 ERR("Failed to load physical devices, res=%d\n", res
);
884 wine_vk_instance_free(object
);
888 if ((app_info
= create_info
->pApplicationInfo
))
890 TRACE("Application name %s, application version %#x.\n",
891 debugstr_a(app_info
->pApplicationName
), app_info
->applicationVersion
);
892 TRACE("Engine name %s, engine version %#x.\n", debugstr_a(app_info
->pEngineName
),
893 app_info
->engineVersion
);
894 TRACE("API version %#x.\n", app_info
->apiVersion
);
896 if (app_info
->pEngineName
&& !strcmp(app_info
->pEngineName
, "idTech"))
897 object
->quirks
|= WINEVULKAN_QUIRK_GET_DEVICE_PROC_ADDR
;
900 object
->quirks
|= WINEVULKAN_QUIRK_ADJUST_MAX_IMAGE_COUNT
;
902 client_instance
->base
.unix_handle
= (uintptr_t)object
;
903 *instance
= client_instance
;
904 TRACE("Created instance %p (native instance %p).\n", object
, object
->instance
);
908 void wine_vkDestroyDevice(VkDevice handle
, const VkAllocationCallbacks
*allocator
)
910 struct wine_device
*device
= wine_device_from_handle(handle
);
913 FIXME("Support for allocation callbacks not implemented yet\n");
915 wine_vk_device_free(device
);
918 void wine_vkDestroyInstance(VkInstance handle
, const VkAllocationCallbacks
*allocator
)
920 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
923 FIXME("Support allocation allocators\n");
925 wine_vk_instance_free(instance
);
928 VkResult
wine_vkEnumerateDeviceExtensionProperties(VkPhysicalDevice phys_dev_handle
, const char *layer_name
,
929 uint32_t *count
, VkExtensionProperties
*properties
)
931 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(phys_dev_handle
);
933 /* This shouldn't get called with layer_name set, the ICD loader prevents it. */
936 ERR("Layer enumeration not supported from ICD.\n");
937 return VK_ERROR_LAYER_NOT_PRESENT
;
942 *count
= phys_dev
->extension_count
;
946 *count
= min(*count
, phys_dev
->extension_count
);
947 memcpy(properties
, phys_dev
->extensions
, *count
* sizeof(*properties
));
949 TRACE("Returning %u extensions.\n", *count
);
950 return *count
< phys_dev
->extension_count
? VK_INCOMPLETE
: VK_SUCCESS
;
953 VkResult
wine_vkEnumerateInstanceExtensionProperties(const char *name
, uint32_t *count
,
954 VkExtensionProperties
*properties
)
956 uint32_t num_properties
= 0, num_host_properties
;
957 VkExtensionProperties
*host_properties
;
961 res
= vk_funcs
->p_vkEnumerateInstanceExtensionProperties(NULL
, &num_host_properties
, NULL
);
962 if (res
!= VK_SUCCESS
)
965 if (!(host_properties
= calloc(num_host_properties
, sizeof(*host_properties
))))
966 return VK_ERROR_OUT_OF_HOST_MEMORY
;
968 res
= vk_funcs
->p_vkEnumerateInstanceExtensionProperties(NULL
, &num_host_properties
, host_properties
);
969 if (res
!= VK_SUCCESS
)
971 ERR("Failed to retrieve host properties, res=%d.\n", res
);
972 free(host_properties
);
976 /* The Wine graphics driver provides us with all extensions supported by the host side
977 * including extension fixup (e.g. VK_KHR_xlib_surface -> VK_KHR_win32_surface). It is
978 * up to us here to filter the list down to extensions for which we have thunks.
980 for (i
= 0; i
< num_host_properties
; i
++)
982 if (wine_vk_instance_extension_supported(host_properties
[i
].extensionName
))
985 TRACE("Instance extension '%s' is not supported.\n", host_properties
[i
].extensionName
);
990 TRACE("Returning %u extensions.\n", num_properties
);
991 *count
= num_properties
;
992 free(host_properties
);
996 for (i
= 0, j
= 0; i
< num_host_properties
&& j
< *count
; i
++)
998 if (wine_vk_instance_extension_supported(host_properties
[i
].extensionName
))
1000 TRACE("Enabling extension '%s'.\n", host_properties
[i
].extensionName
);
1001 properties
[j
++] = host_properties
[i
];
1004 *count
= min(*count
, num_properties
);
1006 free(host_properties
);
1007 return *count
< num_properties
? VK_INCOMPLETE
: VK_SUCCESS
;
1010 VkResult
wine_vkEnumerateDeviceLayerProperties(VkPhysicalDevice phys_dev
, uint32_t *count
,
1011 VkLayerProperties
*properties
)
1017 VkResult
wine_vkEnumerateInstanceVersion(uint32_t *version
)
1021 static VkResult (*p_vkEnumerateInstanceVersion
)(uint32_t *version
);
1022 if (!p_vkEnumerateInstanceVersion
)
1023 p_vkEnumerateInstanceVersion
= vk_funcs
->p_vkGetInstanceProcAddr(NULL
, "vkEnumerateInstanceVersion");
1025 if (p_vkEnumerateInstanceVersion
)
1027 res
= p_vkEnumerateInstanceVersion(version
);
1031 *version
= VK_API_VERSION_1_0
;
1035 TRACE("API version %u.%u.%u.\n",
1036 VK_VERSION_MAJOR(*version
), VK_VERSION_MINOR(*version
), VK_VERSION_PATCH(*version
));
1037 *version
= min(WINE_VK_VERSION
, *version
);
1041 VkResult
wine_vkEnumeratePhysicalDevices(VkInstance handle
, uint32_t *count
, VkPhysicalDevice
*devices
)
1043 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1048 *count
= instance
->phys_dev_count
;
1052 *count
= min(*count
, instance
->phys_dev_count
);
1053 for (i
= 0; i
< *count
; i
++)
1055 devices
[i
] = instance
->phys_devs
[i
]->handle
;
1058 TRACE("Returning %u devices.\n", *count
);
1059 return *count
< instance
->phys_dev_count
? VK_INCOMPLETE
: VK_SUCCESS
;
1062 void wine_vkFreeCommandBuffers(VkDevice handle
, VkCommandPool command_pool
, uint32_t count
,
1063 const VkCommandBuffer
*buffers
)
1065 struct wine_device
*device
= wine_device_from_handle(handle
);
1066 struct wine_cmd_pool
*pool
= wine_cmd_pool_from_handle(command_pool
);
1068 wine_vk_free_command_buffers(device
, pool
, count
, buffers
);
1071 static VkQueue
wine_vk_device_find_queue(VkDevice handle
, const VkDeviceQueueInfo2
*info
)
1073 struct wine_device
*device
= wine_device_from_handle(handle
);
1074 struct wine_queue
*queue
;
1077 for (i
= 0; i
< device
->queue_count
; i
++)
1079 queue
= &device
->queues
[i
];
1080 if (queue
->family_index
== info
->queueFamilyIndex
1081 && queue
->queue_index
== info
->queueIndex
1082 && queue
->flags
== info
->flags
)
1084 return queue
->handle
;
1088 return VK_NULL_HANDLE
;
1091 void wine_vkGetDeviceQueue(VkDevice device
, uint32_t family_index
, uint32_t queue_index
, VkQueue
*queue
)
1093 VkDeviceQueueInfo2 queue_info
;
1095 queue_info
.sType
= VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2
;
1096 queue_info
.pNext
= NULL
;
1097 queue_info
.flags
= 0;
1098 queue_info
.queueFamilyIndex
= family_index
;
1099 queue_info
.queueIndex
= queue_index
;
1101 *queue
= wine_vk_device_find_queue(device
, &queue_info
);
1104 void wine_vkGetDeviceQueue2(VkDevice device
, const VkDeviceQueueInfo2
*info
, VkQueue
*queue
)
1106 const VkBaseInStructure
*chain
;
1108 if ((chain
= info
->pNext
))
1109 FIXME("Ignoring a linked structure of type %u.\n", chain
->sType
);
1111 *queue
= wine_vk_device_find_queue(device
, info
);
1114 VkResult
wine_vkCreateCommandPool(VkDevice device_handle
, const VkCommandPoolCreateInfo
*info
,
1115 const VkAllocationCallbacks
*allocator
, VkCommandPool
*command_pool
,
1118 struct wine_device
*device
= wine_device_from_handle(device_handle
);
1119 struct vk_command_pool
*handle
= client_ptr
;
1120 struct wine_cmd_pool
*object
;
1124 FIXME("Support for allocation callbacks not implemented yet\n");
1126 if (!(object
= calloc(1, sizeof(*object
))))
1127 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1129 res
= device
->funcs
.p_vkCreateCommandPool(device
->device
, info
, NULL
, &object
->command_pool
);
1131 if (res
== VK_SUCCESS
)
1133 object
->handle
= (uintptr_t)handle
;
1134 handle
->unix_handle
= (uintptr_t)object
;
1135 WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(device
->phys_dev
->instance
, object
->handle
,
1136 object
->command_pool
, object
);
1137 *command_pool
= object
->handle
;
1147 void wine_vkDestroyCommandPool(VkDevice device_handle
, VkCommandPool handle
,
1148 const VkAllocationCallbacks
*allocator
)
1150 struct wine_device
*device
= wine_device_from_handle(device_handle
);
1151 struct wine_cmd_pool
*pool
= wine_cmd_pool_from_handle(handle
);
1154 FIXME("Support for allocation callbacks not implemented yet\n");
1156 WINE_VK_REMOVE_HANDLE_MAPPING(device
->phys_dev
->instance
, pool
);
1158 device
->funcs
.p_vkDestroyCommandPool(device
->device
, pool
->command_pool
, NULL
);
1162 static VkResult
wine_vk_enumerate_physical_device_groups(struct wine_instance
*instance
,
1163 VkResult (*p_vkEnumeratePhysicalDeviceGroups
)(VkInstance
, uint32_t *, VkPhysicalDeviceGroupProperties
*),
1164 uint32_t *count
, VkPhysicalDeviceGroupProperties
*properties
)
1169 res
= p_vkEnumeratePhysicalDeviceGroups(instance
->instance
, count
, properties
);
1170 if (res
< 0 || !properties
)
1173 for (i
= 0; i
< *count
; ++i
)
1175 VkPhysicalDeviceGroupProperties
*current
= &properties
[i
];
1176 for (j
= 0; j
< current
->physicalDeviceCount
; ++j
)
1178 VkPhysicalDevice dev
= current
->physicalDevices
[j
];
1179 struct wine_phys_dev
*phys_dev
= wine_vk_instance_wrap_physical_device(instance
, dev
);
1181 return VK_ERROR_INITIALIZATION_FAILED
;
1182 current
->physicalDevices
[j
] = phys_dev
->handle
;
1189 VkResult
wine_vkEnumeratePhysicalDeviceGroups(VkInstance handle
, uint32_t *count
,
1190 VkPhysicalDeviceGroupProperties
*properties
)
1192 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1194 return wine_vk_enumerate_physical_device_groups(instance
,
1195 instance
->funcs
.p_vkEnumeratePhysicalDeviceGroups
, count
, properties
);
1198 VkResult
wine_vkEnumeratePhysicalDeviceGroupsKHR(VkInstance handle
, uint32_t *count
,
1199 VkPhysicalDeviceGroupProperties
*properties
)
1201 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1203 return wine_vk_enumerate_physical_device_groups(instance
,
1204 instance
->funcs
.p_vkEnumeratePhysicalDeviceGroupsKHR
, count
, properties
);
1207 void wine_vkGetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice phys_dev
,
1208 const VkPhysicalDeviceExternalFenceInfo
*fence_info
,
1209 VkExternalFenceProperties
*properties
)
1211 properties
->exportFromImportedHandleTypes
= 0;
1212 properties
->compatibleHandleTypes
= 0;
1213 properties
->externalFenceFeatures
= 0;
1216 void wine_vkGetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice phys_dev
,
1217 const VkPhysicalDeviceExternalFenceInfo
*fence_info
,
1218 VkExternalFenceProperties
*properties
)
1220 properties
->exportFromImportedHandleTypes
= 0;
1221 properties
->compatibleHandleTypes
= 0;
1222 properties
->externalFenceFeatures
= 0;
1225 void wine_vkGetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice phys_dev
,
1226 const VkPhysicalDeviceExternalBufferInfo
*buffer_info
,
1227 VkExternalBufferProperties
*properties
)
1229 memset(&properties
->externalMemoryProperties
, 0, sizeof(properties
->externalMemoryProperties
));
1232 void wine_vkGetPhysicalDeviceExternalBufferPropertiesKHR(VkPhysicalDevice phys_dev
,
1233 const VkPhysicalDeviceExternalBufferInfo
*buffer_info
,
1234 VkExternalBufferProperties
*properties
)
1236 memset(&properties
->externalMemoryProperties
, 0, sizeof(properties
->externalMemoryProperties
));
1239 VkResult
wine_vkGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice phys_dev_handle
,
1240 const VkPhysicalDeviceImageFormatInfo2
*format_info
,
1241 VkImageFormatProperties2
*properties
)
1243 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(phys_dev_handle
);
1244 VkExternalImageFormatProperties
*external_image_properties
;
1247 res
= phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceImageFormatProperties2(phys_dev
->phys_dev
,
1248 format_info
, properties
);
1250 if ((external_image_properties
= find_next_struct(properties
,
1251 VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES
)))
1253 VkExternalMemoryProperties
*p
= &external_image_properties
->externalMemoryProperties
;
1254 p
->externalMemoryFeatures
= 0;
1255 p
->exportFromImportedHandleTypes
= 0;
1256 p
->compatibleHandleTypes
= 0;
1262 VkResult
wine_vkGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice phys_dev_handle
,
1263 const VkPhysicalDeviceImageFormatInfo2
*format_info
,
1264 VkImageFormatProperties2
*properties
)
1266 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(phys_dev_handle
);
1267 VkExternalImageFormatProperties
*external_image_properties
;
1270 res
= phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceImageFormatProperties2KHR(phys_dev
->phys_dev
,
1271 format_info
, properties
);
1273 if ((external_image_properties
= find_next_struct(properties
,
1274 VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES
)))
1276 VkExternalMemoryProperties
*p
= &external_image_properties
->externalMemoryProperties
;
1277 p
->externalMemoryFeatures
= 0;
1278 p
->exportFromImportedHandleTypes
= 0;
1279 p
->compatibleHandleTypes
= 0;
1285 /* From ntdll/unix/sync.c */
1286 #define NANOSECONDS_IN_A_SECOND 1000000000
1287 #define TICKSPERSEC 10000000
1289 static inline VkTimeDomainEXT
get_performance_counter_time_domain(void)
1291 #if !defined(__APPLE__) && defined(HAVE_CLOCK_GETTIME)
1292 # ifdef CLOCK_MONOTONIC_RAW
1293 return VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT
;
1295 return VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT
;
1298 FIXME("No mapping for VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT on this platform.\n");
1299 return VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT
;
1303 static VkTimeDomainEXT
map_to_host_time_domain(VkTimeDomainEXT domain
)
1305 /* Matches ntdll/unix/sync.c's performance counter implementation. */
1306 if (domain
== VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT
)
1307 return get_performance_counter_time_domain();
1312 static inline uint64_t convert_monotonic_timestamp(uint64_t value
)
1314 return value
/ (NANOSECONDS_IN_A_SECOND
/ TICKSPERSEC
);
1317 static inline uint64_t convert_timestamp(VkTimeDomainEXT host_domain
, VkTimeDomainEXT target_domain
, uint64_t value
)
1319 if (host_domain
== target_domain
)
1322 /* Convert between MONOTONIC time in ns -> QueryPerformanceCounter */
1323 if ((host_domain
== VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT
|| host_domain
== VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT
)
1324 && target_domain
== VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT
)
1325 return convert_monotonic_timestamp(value
);
1327 FIXME("Couldn't translate between host domain %d and target domain %d\n", host_domain
, target_domain
);
1331 VkResult
wine_vkGetCalibratedTimestampsEXT(VkDevice handle
, uint32_t timestamp_count
,
1332 const VkCalibratedTimestampInfoEXT
*timestamp_infos
,
1333 uint64_t *timestamps
, uint64_t *max_deviation
)
1335 struct wine_device
*device
= wine_device_from_handle(handle
);
1336 VkCalibratedTimestampInfoEXT
* host_timestamp_infos
;
1339 TRACE("%p, %u, %p, %p, %p\n", device
, timestamp_count
, timestamp_infos
, timestamps
, max_deviation
);
1341 if (!(host_timestamp_infos
= malloc(sizeof(VkCalibratedTimestampInfoEXT
) * timestamp_count
)))
1342 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1344 for (i
= 0; i
< timestamp_count
; i
++)
1346 host_timestamp_infos
[i
].sType
= timestamp_infos
[i
].sType
;
1347 host_timestamp_infos
[i
].pNext
= timestamp_infos
[i
].pNext
;
1348 host_timestamp_infos
[i
].timeDomain
= map_to_host_time_domain(timestamp_infos
[i
].timeDomain
);
1351 res
= device
->funcs
.p_vkGetCalibratedTimestampsEXT(device
->device
, timestamp_count
, host_timestamp_infos
, timestamps
, max_deviation
);
1352 if (res
!= VK_SUCCESS
)
1355 for (i
= 0; i
< timestamp_count
; i
++)
1356 timestamps
[i
] = convert_timestamp(host_timestamp_infos
[i
].timeDomain
, timestamp_infos
[i
].timeDomain
, timestamps
[i
]);
1358 free(host_timestamp_infos
);
1363 VkResult
wine_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice handle
,
1364 uint32_t *time_domain_count
,
1365 VkTimeDomainEXT
*time_domains
)
1367 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(handle
);
1368 BOOL supports_device
= FALSE
, supports_monotonic
= FALSE
, supports_monotonic_raw
= FALSE
;
1369 const VkTimeDomainEXT performance_counter_domain
= get_performance_counter_time_domain();
1370 VkTimeDomainEXT
*host_time_domains
;
1371 uint32_t host_time_domain_count
;
1372 VkTimeDomainEXT out_time_domains
[2];
1373 uint32_t out_time_domain_count
;
1377 /* Find out the time domains supported on the host */
1378 res
= phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(phys_dev
->phys_dev
, &host_time_domain_count
, NULL
);
1379 if (res
!= VK_SUCCESS
)
1382 if (!(host_time_domains
= malloc(sizeof(VkTimeDomainEXT
) * host_time_domain_count
)))
1383 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1385 res
= phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(phys_dev
->phys_dev
, &host_time_domain_count
, host_time_domains
);
1386 if (res
!= VK_SUCCESS
)
1388 free(host_time_domains
);
1392 for (i
= 0; i
< host_time_domain_count
; i
++)
1394 if (host_time_domains
[i
] == VK_TIME_DOMAIN_DEVICE_EXT
)
1395 supports_device
= TRUE
;
1396 else if (host_time_domains
[i
] == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT
)
1397 supports_monotonic
= TRUE
;
1398 else if (host_time_domains
[i
] == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT
)
1399 supports_monotonic_raw
= TRUE
;
1401 FIXME("Unknown time domain %d\n", host_time_domains
[i
]);
1404 free(host_time_domains
);
1406 out_time_domain_count
= 0;
1408 /* Map our monotonic times -> QPC */
1409 if (supports_monotonic_raw
&& performance_counter_domain
== VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT
)
1410 out_time_domains
[out_time_domain_count
++] = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT
;
1411 else if (supports_monotonic
&& performance_counter_domain
== VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT
)
1412 out_time_domains
[out_time_domain_count
++] = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT
;
1414 FIXME("VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT not supported on this platform.\n");
1416 /* Forward the device domain time */
1417 if (supports_device
)
1418 out_time_domains
[out_time_domain_count
++] = VK_TIME_DOMAIN_DEVICE_EXT
;
1420 /* Send the count/domains back to the app */
1423 *time_domain_count
= out_time_domain_count
;
1427 for (i
= 0; i
< min(*time_domain_count
, out_time_domain_count
); i
++)
1428 time_domains
[i
] = out_time_domains
[i
];
1430 res
= *time_domain_count
< out_time_domain_count
? VK_INCOMPLETE
: VK_SUCCESS
;
1431 *time_domain_count
= out_time_domain_count
;
1435 void wine_vkGetPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice phys_dev
,
1436 const VkPhysicalDeviceExternalSemaphoreInfo
*info
,
1437 VkExternalSemaphoreProperties
*properties
)
1439 properties
->exportFromImportedHandleTypes
= 0;
1440 properties
->compatibleHandleTypes
= 0;
1441 properties
->externalSemaphoreFeatures
= 0;
1444 void wine_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice phys_dev
,
1445 const VkPhysicalDeviceExternalSemaphoreInfo
*info
,
1446 VkExternalSemaphoreProperties
*properties
)
1448 properties
->exportFromImportedHandleTypes
= 0;
1449 properties
->compatibleHandleTypes
= 0;
1450 properties
->externalSemaphoreFeatures
= 0;
1453 VkResult
wine_vkCreateWin32SurfaceKHR(VkInstance handle
, const VkWin32SurfaceCreateInfoKHR
*createInfo
,
1454 const VkAllocationCallbacks
*allocator
, VkSurfaceKHR
*surface
)
1456 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1457 struct wine_surface
*object
;
1461 FIXME("Support for allocation callbacks not implemented yet\n");
1463 object
= calloc(1, sizeof(*object
));
1466 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1468 res
= instance
->funcs
.p_vkCreateWin32SurfaceKHR(instance
->instance
, createInfo
, NULL
, &object
->driver_surface
);
1470 if (res
!= VK_SUCCESS
)
1476 object
->surface
= vk_funcs
->p_wine_get_native_surface(object
->driver_surface
);
1478 WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance
, object
, object
->surface
, object
);
1480 *surface
= wine_surface_to_handle(object
);
1485 void wine_vkDestroySurfaceKHR(VkInstance handle
, VkSurfaceKHR surface
,
1486 const VkAllocationCallbacks
*allocator
)
1488 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1489 struct wine_surface
*object
= wine_surface_from_handle(surface
);
1494 instance
->funcs
.p_vkDestroySurfaceKHR(instance
->instance
, object
->driver_surface
, NULL
);
1496 WINE_VK_REMOVE_HANDLE_MAPPING(instance
, object
);
1500 VkResult
wine_vkAllocateMemory(VkDevice handle
, const VkMemoryAllocateInfo
*alloc_info
,
1501 const VkAllocationCallbacks
*allocator
, VkDeviceMemory
*ret
)
1503 struct wine_device
*device
= wine_device_from_handle(handle
);
1504 struct wine_device_memory
*memory
;
1505 VkMemoryAllocateInfo info
= *alloc_info
;
1506 VkImportMemoryHostPointerInfoEXT host_pointer_info
;
1508 void *mapping
= NULL
;
1511 /* For host visible memory, we try to use VK_EXT_external_memory_host on wow64
1512 * to ensure that mapped pointer is 32-bit. */
1513 mem_flags
= device
->phys_dev
->memory_properties
.memoryTypes
[alloc_info
->memoryTypeIndex
].propertyFlags
;
1514 if (device
->phys_dev
->external_memory_align
&& (mem_flags
& VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
) &&
1515 !find_next_struct(alloc_info
->pNext
, VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT
))
1517 VkMemoryHostPointerPropertiesEXT props
=
1519 .sType
= VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT
,
1521 uint32_t i
, align
= device
->phys_dev
->external_memory_align
- 1;
1522 SIZE_T alloc_size
= info
.allocationSize
;
1526 FIXME("Using VK_EXT_external_memory_host\n");
1528 if (NtAllocateVirtualMemory(GetCurrentProcess(), &mapping
, zero_bits
, &alloc_size
,
1529 MEM_COMMIT
, PAGE_READWRITE
))
1531 ERR("NtAllocateVirtualMemory failed\n");
1532 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1535 result
= device
->funcs
.p_vkGetMemoryHostPointerPropertiesEXT(device
->device
,
1536 VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT
, mapping
, &props
);
1537 if (result
!= VK_SUCCESS
)
1539 ERR("vkGetMemoryHostPointerPropertiesEXT failed: %d\n", result
);
1543 if (!(props
.memoryTypeBits
& (1u << info
.memoryTypeIndex
)))
1545 /* If requested memory type is not allowed to use external memory,
1546 * try to find a supported compatible type. */
1547 uint32_t mask
= mem_flags
& ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
;
1548 for (i
= 0; i
< device
->phys_dev
->memory_properties
.memoryTypeCount
; i
++)
1550 if (!(props
.memoryTypeBits
& (1u << i
)))
1552 if ((device
->phys_dev
->memory_properties
.memoryTypes
[i
].propertyFlags
& mask
) != mask
)
1555 TRACE("Memory type not compatible with host memory, using %u instead\n", i
);
1556 info
.memoryTypeIndex
= i
;
1559 if (i
== device
->phys_dev
->memory_properties
.memoryTypeCount
)
1561 FIXME("Not found compatible memory type\n");
1563 NtFreeVirtualMemory(GetCurrentProcess(), &mapping
, &alloc_size
, MEM_RELEASE
);
1567 if (props
.memoryTypeBits
& (1u << info
.memoryTypeIndex
))
1569 host_pointer_info
.sType
= VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT
;
1570 host_pointer_info
.handleType
= VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT
;
1571 host_pointer_info
.pHostPointer
= mapping
;
1572 host_pointer_info
.pNext
= info
.pNext
;
1573 info
.pNext
= &host_pointer_info
;
1575 info
.allocationSize
= (info
.allocationSize
+ align
) & ~align
;
1579 if (!(memory
= malloc(sizeof(*memory
))))
1580 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1582 result
= device
->funcs
.p_vkAllocateMemory(device
->device
, &info
, NULL
, &memory
->memory
);
1583 if (result
!= VK_SUCCESS
)
1589 memory
->mapping
= mapping
;
1590 *ret
= (VkDeviceMemory
)(uintptr_t)memory
;
1594 void wine_vkFreeMemory(VkDevice handle
, VkDeviceMemory memory_handle
, const VkAllocationCallbacks
*allocator
)
1596 struct wine_device
*device
= wine_device_from_handle(handle
);
1597 struct wine_device_memory
*memory
;
1601 memory
= wine_device_memory_from_handle(memory_handle
);
1603 device
->funcs
.p_vkFreeMemory(device
->device
, memory
->memory
, NULL
);
1605 if (memory
->mapping
)
1607 SIZE_T alloc_size
= 0;
1608 NtFreeVirtualMemory(GetCurrentProcess(), &memory
->mapping
, &alloc_size
, MEM_RELEASE
);
1614 VkResult
wine_vkMapMemory(VkDevice device
, VkDeviceMemory memory
, VkDeviceSize offset
,
1615 VkDeviceSize size
, VkMemoryMapFlags flags
, void **data
)
1617 const VkMemoryMapInfoKHR info
=
1619 .sType
= VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR
,
1626 return wine_vkMapMemory2KHR(device
, &info
, data
);
1629 VkResult
wine_vkMapMemory2KHR(VkDevice handle
, const VkMemoryMapInfoKHR
*map_info
, void **data
)
1631 struct wine_device
*device
= wine_device_from_handle(handle
);
1632 struct wine_device_memory
*memory
= wine_device_memory_from_handle(map_info
->memory
);
1633 VkMemoryMapInfoKHR info
= *map_info
;
1636 info
.memory
= memory
->memory
;
1637 if (memory
->mapping
)
1639 *data
= (char *)memory
->mapping
+ info
.offset
;
1640 TRACE("returning %p\n", *data
);
1644 if (device
->funcs
.p_vkMapMemory2KHR
)
1646 result
= device
->funcs
.p_vkMapMemory2KHR(device
->device
, &info
, data
);
1650 assert(!info
.pNext
);
1651 result
= device
->funcs
.p_vkMapMemory(device
->device
, info
.memory
, info
.offset
,
1652 info
.size
, info
.flags
, data
);
1656 if (NtCurrentTeb()->WowTebOffset
&& result
== VK_SUCCESS
&& (UINT_PTR
)*data
>> 32)
1658 FIXME("returned mapping %p does not fit 32-bit pointer\n", *data
);
1659 device
->funcs
.p_vkUnmapMemory(device
->device
, memory
->memory
);
1661 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1668 void wine_vkUnmapMemory(VkDevice device
, VkDeviceMemory memory
)
1670 const VkMemoryUnmapInfoKHR info
=
1672 .sType
= VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR
,
1676 wine_vkUnmapMemory2KHR(device
, &info
);
1679 VkResult
wine_vkUnmapMemory2KHR(VkDevice handle
, const VkMemoryUnmapInfoKHR
*unmap_info
)
1681 struct wine_device
*device
= wine_device_from_handle(handle
);
1682 struct wine_device_memory
*memory
= wine_device_memory_from_handle(unmap_info
->memory
);
1683 VkMemoryUnmapInfoKHR info
;
1685 if (memory
->mapping
)
1688 if (!device
->funcs
.p_vkUnmapMemory2KHR
)
1690 assert(!unmap_info
->pNext
);
1691 device
->funcs
.p_vkUnmapMemory(device
->device
, memory
->memory
);
1696 info
.memory
= memory
->memory
;
1697 return device
->funcs
.p_vkUnmapMemory2KHR(device
->device
, &info
);
1700 VkResult
wine_vkCreateBuffer(VkDevice handle
, const VkBufferCreateInfo
*create_info
,
1701 const VkAllocationCallbacks
*allocator
, VkBuffer
*buffer
)
1703 struct wine_device
*device
= wine_device_from_handle(handle
);
1704 VkExternalMemoryBufferCreateInfo external_memory_info
;
1705 VkBufferCreateInfo info
= *create_info
;
1707 if (device
->phys_dev
->external_memory_align
&&
1708 !find_next_struct(info
.pNext
, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO
))
1710 external_memory_info
.sType
= VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO
;
1711 external_memory_info
.pNext
= info
.pNext
;
1712 external_memory_info
.handleTypes
= VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT
;
1713 info
.pNext
= &external_memory_info
;
1716 return device
->funcs
.p_vkCreateBuffer(device
->device
, &info
, NULL
, buffer
);
1719 VkResult
wine_vkCreateImage(VkDevice handle
, const VkImageCreateInfo
*create_info
,
1720 const VkAllocationCallbacks
*allocator
, VkImage
*image
)
1722 struct wine_device
*device
= wine_device_from_handle(handle
);
1723 VkExternalMemoryImageCreateInfo external_memory_info
;
1724 VkImageCreateInfo info
= *create_info
;
1726 if (device
->phys_dev
->external_memory_align
&&
1727 !find_next_struct(info
.pNext
, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO
))
1729 external_memory_info
.sType
= VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO
;
1730 external_memory_info
.pNext
= info
.pNext
;
1731 external_memory_info
.handleTypes
= VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT
;
1732 info
.pNext
= &external_memory_info
;
1735 return device
->funcs
.p_vkCreateImage(device
->device
, &info
, NULL
, image
);
1738 static inline void adjust_max_image_count(struct wine_phys_dev
*phys_dev
, VkSurfaceCapabilitiesKHR
* capabilities
)
1740 /* Many Windows games, for example Strange Brigade, No Man's Sky, Path of Exile
1741 * and World War Z, do not expect that maxImageCount can be set to 0.
1742 * A value of 0 means that there is no limit on the number of images.
1743 * Nvidia reports 8 on Windows, AMD 16.
1744 * https://vulkan.gpuinfo.org/displayreport.php?id=9122#surface
1745 * https://vulkan.gpuinfo.org/displayreport.php?id=9121#surface
1747 if ((phys_dev
->instance
->quirks
& WINEVULKAN_QUIRK_ADJUST_MAX_IMAGE_COUNT
) && !capabilities
->maxImageCount
)
1749 capabilities
->maxImageCount
= max(capabilities
->minImageCount
, 16);
1753 VkResult
wine_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice handle
, VkSurfaceKHR surface_handle
,
1754 VkSurfaceCapabilitiesKHR
*capabilities
)
1756 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(handle
);
1757 struct wine_surface
*surface
= wine_surface_from_handle(surface_handle
);
1760 res
= phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(phys_dev
->phys_dev
,
1761 surface
->driver_surface
, capabilities
);
1763 if (res
== VK_SUCCESS
)
1764 adjust_max_image_count(phys_dev
, capabilities
);
1769 VkResult
wine_vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice handle
,
1770 const VkPhysicalDeviceSurfaceInfo2KHR
*surface_info
,
1771 VkSurfaceCapabilities2KHR
*capabilities
)
1773 struct wine_phys_dev
*phys_dev
= wine_phys_dev_from_handle(handle
);
1774 struct wine_surface
*surface
= wine_surface_from_handle(surface_info
->surface
);
1775 VkPhysicalDeviceSurfaceInfo2KHR host_info
;
1778 host_info
.sType
= surface_info
->sType
;
1779 host_info
.pNext
= surface_info
->pNext
;
1780 host_info
.surface
= surface
->driver_surface
;
1781 res
= phys_dev
->instance
->funcs
.p_vkGetPhysicalDeviceSurfaceCapabilities2KHR(phys_dev
->phys_dev
,
1782 &host_info
, capabilities
);
1784 if (res
== VK_SUCCESS
)
1785 adjust_max_image_count(phys_dev
, &capabilities
->surfaceCapabilities
);
1790 VkResult
wine_vkCreateDebugUtilsMessengerEXT(VkInstance handle
,
1791 const VkDebugUtilsMessengerCreateInfoEXT
*create_info
,
1792 const VkAllocationCallbacks
*allocator
,
1793 VkDebugUtilsMessengerEXT
*messenger
)
1795 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1796 VkDebugUtilsMessengerCreateInfoEXT wine_create_info
;
1797 struct wine_debug_utils_messenger
*object
;
1801 FIXME("Support for allocation callbacks not implemented yet\n");
1803 if (!(object
= calloc(1, sizeof(*object
))))
1804 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1806 object
->instance
= instance
;
1807 object
->user_callback
= create_info
->pfnUserCallback
;
1808 object
->user_data
= create_info
->pUserData
;
1810 wine_create_info
= *create_info
;
1812 wine_create_info
.pfnUserCallback
= (void *) &debug_utils_callback_conversion
;
1813 wine_create_info
.pUserData
= object
;
1815 res
= instance
->funcs
.p_vkCreateDebugUtilsMessengerEXT(instance
->instance
, &wine_create_info
, NULL
, &object
->debug_messenger
);
1817 if (res
!= VK_SUCCESS
)
1823 WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance
, object
, object
->debug_messenger
, object
);
1824 *messenger
= wine_debug_utils_messenger_to_handle(object
);
1829 void wine_vkDestroyDebugUtilsMessengerEXT(VkInstance handle
, VkDebugUtilsMessengerEXT messenger
,
1830 const VkAllocationCallbacks
*allocator
)
1832 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1833 struct wine_debug_utils_messenger
*object
;
1835 object
= wine_debug_utils_messenger_from_handle(messenger
);
1840 instance
->funcs
.p_vkDestroyDebugUtilsMessengerEXT(instance
->instance
, object
->debug_messenger
, NULL
);
1841 WINE_VK_REMOVE_HANDLE_MAPPING(instance
, object
);
1846 VkResult
wine_vkCreateDebugReportCallbackEXT(VkInstance handle
,
1847 const VkDebugReportCallbackCreateInfoEXT
*create_info
,
1848 const VkAllocationCallbacks
*allocator
,
1849 VkDebugReportCallbackEXT
*callback
)
1851 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1852 VkDebugReportCallbackCreateInfoEXT wine_create_info
;
1853 struct wine_debug_report_callback
*object
;
1857 FIXME("Support for allocation callbacks not implemented yet\n");
1859 if (!(object
= calloc(1, sizeof(*object
))))
1860 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1862 object
->instance
= instance
;
1863 object
->user_callback
= create_info
->pfnCallback
;
1864 object
->user_data
= create_info
->pUserData
;
1866 wine_create_info
= *create_info
;
1868 wine_create_info
.pfnCallback
= (void *) debug_report_callback_conversion
;
1869 wine_create_info
.pUserData
= object
;
1871 res
= instance
->funcs
.p_vkCreateDebugReportCallbackEXT(instance
->instance
, &wine_create_info
, NULL
, &object
->debug_callback
);
1873 if (res
!= VK_SUCCESS
)
1879 WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance
, object
, object
->debug_callback
, object
);
1880 *callback
= wine_debug_report_callback_to_handle(object
);
1885 void wine_vkDestroyDebugReportCallbackEXT(VkInstance handle
, VkDebugReportCallbackEXT callback
,
1886 const VkAllocationCallbacks
*allocator
)
1888 struct wine_instance
*instance
= wine_instance_from_handle(handle
);
1889 struct wine_debug_report_callback
*object
;
1891 object
= wine_debug_report_callback_from_handle(callback
);
1896 instance
->funcs
.p_vkDestroyDebugReportCallbackEXT(instance
->instance
, object
->debug_callback
, NULL
);
1898 WINE_VK_REMOVE_HANDLE_MAPPING(instance
, object
);
1903 VkResult
wine_vkCreateDeferredOperationKHR(VkDevice handle
,
1904 const VkAllocationCallbacks
* allocator
,
1905 VkDeferredOperationKHR
* deferredOperation
)
1907 struct wine_device
*device
= wine_device_from_handle(handle
);
1908 struct wine_deferred_operation
*object
;
1912 FIXME("Support for allocation callbacks not implemented yet\n");
1914 if (!(object
= calloc(1, sizeof(*object
))))
1915 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1917 res
= device
->funcs
.p_vkCreateDeferredOperationKHR(device
->device
, NULL
, &object
->deferred_operation
);
1919 if (res
!= VK_SUCCESS
)
1925 init_conversion_context(&object
->ctx
);
1927 WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(device
->phys_dev
->instance
, object
, object
->deferred_operation
, object
);
1928 *deferredOperation
= wine_deferred_operation_to_handle(object
);
1933 void wine_vkDestroyDeferredOperationKHR(VkDevice handle
,
1934 VkDeferredOperationKHR operation
,
1935 const VkAllocationCallbacks
* allocator
)
1937 struct wine_device
*device
= wine_device_from_handle(handle
);
1938 struct wine_deferred_operation
*object
;
1940 object
= wine_deferred_operation_from_handle(operation
);
1945 device
->funcs
.p_vkDestroyDeferredOperationKHR(device
->device
, object
->deferred_operation
, NULL
);
1947 WINE_VK_REMOVE_HANDLE_MAPPING(device
->phys_dev
->instance
, object
);
1949 free_conversion_context(&object
->ctx
);
1955 NTSTATUS
vk_is_available_instance_function(void *arg
)
1957 struct is_available_instance_function_params
*params
= arg
;
1958 struct wine_instance
*instance
= wine_instance_from_handle(params
->instance
);
1959 return !!vk_funcs
->p_vkGetInstanceProcAddr(instance
->instance
, params
->name
);
1962 NTSTATUS
vk_is_available_device_function(void *arg
)
1964 struct is_available_device_function_params
*params
= arg
;
1965 struct wine_device
*device
= wine_device_from_handle(params
->device
);
1966 return !!vk_funcs
->p_vkGetDeviceProcAddr(device
->device
, params
->name
);
1971 NTSTATUS
vk_is_available_instance_function32(void *arg
)
1978 struct wine_instance
*instance
= wine_instance_from_handle(UlongToPtr(params
->instance
));
1979 return !!vk_funcs
->p_vkGetInstanceProcAddr(instance
->instance
, UlongToPtr(params
->name
));
1982 NTSTATUS
vk_is_available_device_function32(void *arg
)
1989 struct wine_device
*device
= wine_device_from_handle(UlongToPtr(params
->device
));
1990 return !!vk_funcs
->p_vkGetDeviceProcAddr(device
->device
, UlongToPtr(params
->name
));