include/mscvpdb.h: Use flexible array members for the rest of structures.
[wine.git] / dlls / winevulkan / vulkan.c
blobf6e06bcc085aec5191fda14372e0cd540f89aef6
1 /* Wine Vulkan ICD implementation
3 * Copyright 2017 Roderick Colenbrander
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 #if 0
21 #pragma makedep unix
22 #endif
24 #include "config.h"
25 #include <time.h>
27 #include "vulkan_private.h"
28 #include "wine/vulkan_driver.h"
29 #include "wine/rbtree.h"
30 #include "ntgdi.h"
31 #include "ntuser.h"
33 WINE_DEFAULT_DEBUG_CHANNEL(vulkan);
34 WINE_DECLARE_DEBUG_CHANNEL(fps);
36 static PFN_vkCreateInstance p_vkCreateInstance;
37 static PFN_vkEnumerateInstanceVersion p_vkEnumerateInstanceVersion;
38 static PFN_vkEnumerateInstanceExtensionProperties p_vkEnumerateInstanceExtensionProperties;
41 /**********************************************************************
42 * get_win_monitor_dpi
44 static UINT get_win_monitor_dpi( HWND hwnd )
46 return NtUserGetSystemDpiForProcess( NULL ); /* FIXME: get monitor dpi */
50 static int window_surface_compare(const void *key, const struct rb_entry *entry)
52 const struct wine_surface *surface = RB_ENTRY_VALUE(entry, struct wine_surface, window_entry);
53 HWND key_hwnd = (HWND)key;
55 if (key_hwnd < surface->hwnd) return -1;
56 if (key_hwnd > surface->hwnd) return 1;
57 return 0;
60 static pthread_mutex_t window_surfaces_lock = PTHREAD_MUTEX_INITIALIZER;
61 static struct rb_tree window_surfaces = {.compare = window_surface_compare};
63 static void window_surfaces_insert(struct wine_surface *surface)
65 struct wine_surface *previous;
66 struct rb_entry *ptr;
68 pthread_mutex_lock(&window_surfaces_lock);
70 if (!(ptr = rb_get(&window_surfaces, surface->hwnd)))
71 rb_put(&window_surfaces, surface->hwnd, &surface->window_entry);
72 else
74 previous = RB_ENTRY_VALUE(ptr, struct wine_surface, window_entry);
75 rb_replace(&window_surfaces, &previous->window_entry, &surface->window_entry);
76 previous->hwnd = 0; /* make sure previous surface becomes invalid */
79 pthread_mutex_unlock(&window_surfaces_lock);
82 static void window_surfaces_remove(struct wine_surface *surface)
84 pthread_mutex_lock(&window_surfaces_lock);
85 if (surface->hwnd) rb_remove(&window_surfaces, &surface->window_entry);
86 pthread_mutex_unlock(&window_surfaces_lock);
89 static BOOL is_wow64(void)
91 return sizeof(void *) == sizeof(UINT64) && NtCurrentTeb()->WowTebOffset;
94 static BOOL use_external_memory(void)
96 return is_wow64();
99 static ULONG_PTR zero_bits = 0;
101 #define wine_vk_count_struct(s, t) wine_vk_count_struct_((void *)s, VK_STRUCTURE_TYPE_##t)
102 static uint32_t wine_vk_count_struct_(void *s, VkStructureType t)
104 const VkBaseInStructure *header;
105 uint32_t result = 0;
107 for (header = s; header; header = header->pNext)
109 if (header->sType == t)
110 result++;
113 return result;
116 static const struct vulkan_funcs *vk_funcs;
118 static int wrapper_entry_compare(const void *key, const struct rb_entry *entry)
120 struct wrapper_entry *wrapper = RB_ENTRY_VALUE(entry, struct wrapper_entry, entry);
121 const uint64_t *host_handle = key;
122 if (*host_handle < wrapper->host_handle) return -1;
123 if (*host_handle > wrapper->host_handle) return 1;
124 return 0;
127 static void add_handle_mapping(struct wine_instance *instance, uint64_t client_handle,
128 uint64_t host_handle, struct wrapper_entry *entry)
130 if (instance->enable_wrapper_list)
132 entry->host_handle = host_handle;
133 entry->client_handle = client_handle;
135 pthread_rwlock_wrlock(&instance->wrapper_lock);
136 rb_put(&instance->wrappers, &host_handle, &entry->entry);
137 pthread_rwlock_unlock(&instance->wrapper_lock);
141 static void add_handle_mapping_ptr(struct wine_instance *instance, void *client_handle,
142 void *host_handle, struct wrapper_entry *entry)
144 add_handle_mapping(instance, (uintptr_t)client_handle, (uintptr_t)host_handle, entry);
147 static void remove_handle_mapping(struct wine_instance *instance, struct wrapper_entry *entry)
149 if (instance->enable_wrapper_list)
151 pthread_rwlock_wrlock(&instance->wrapper_lock);
152 rb_remove(&instance->wrappers, &entry->entry);
153 pthread_rwlock_unlock(&instance->wrapper_lock);
157 static uint64_t client_handle_from_host(struct wine_instance *instance, uint64_t host_handle)
159 struct rb_entry *entry;
160 uint64_t result = 0;
162 pthread_rwlock_rdlock(&instance->wrapper_lock);
163 if ((entry = rb_get(&instance->wrappers, &host_handle)))
165 struct wrapper_entry *wrapper = RB_ENTRY_VALUE(entry, struct wrapper_entry, entry);
166 result = wrapper->client_handle;
168 pthread_rwlock_unlock(&instance->wrapper_lock);
169 return result;
172 static VkBool32 debug_utils_callback_conversion(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
173 VkDebugUtilsMessageTypeFlagsEXT message_types,
174 const VkDebugUtilsMessengerCallbackDataEXT *callback_data,
175 void *user_data)
177 struct wine_vk_debug_utils_params params;
178 VkDebugUtilsObjectNameInfoEXT *object_name_infos;
179 struct wine_debug_utils_messenger *object;
180 void *ret_ptr;
181 ULONG ret_len;
182 unsigned int i;
184 TRACE("%i, %u, %p, %p\n", severity, message_types, callback_data, user_data);
186 object = user_data;
188 if (!object->instance->host_instance)
190 /* instance wasn't yet created, this is a message from the host loader */
191 return VK_FALSE;
194 /* FIXME: we should pack all referenced structs instead of passing pointers */
195 params.user_callback = object->user_callback;
196 params.user_data = object->user_data;
197 params.severity = severity;
198 params.message_types = message_types;
199 params.data = *((VkDebugUtilsMessengerCallbackDataEXT *) callback_data);
201 object_name_infos = calloc(params.data.objectCount, sizeof(*object_name_infos));
203 for (i = 0; i < params.data.objectCount; i++)
205 object_name_infos[i].sType = callback_data->pObjects[i].sType;
206 object_name_infos[i].pNext = callback_data->pObjects[i].pNext;
207 object_name_infos[i].objectType = callback_data->pObjects[i].objectType;
208 object_name_infos[i].pObjectName = callback_data->pObjects[i].pObjectName;
210 if (wine_vk_is_type_wrapped(callback_data->pObjects[i].objectType))
212 object_name_infos[i].objectHandle = client_handle_from_host(object->instance, callback_data->pObjects[i].objectHandle);
213 if (!object_name_infos[i].objectHandle)
215 WARN("handle conversion failed 0x%s\n", wine_dbgstr_longlong(callback_data->pObjects[i].objectHandle));
216 free(object_name_infos);
217 return VK_FALSE;
220 else
222 object_name_infos[i].objectHandle = callback_data->pObjects[i].objectHandle;
226 params.data.pObjects = object_name_infos;
228 /* applications should always return VK_FALSE */
229 KeUserModeCallback( NtUserCallVulkanDebugUtilsCallback, &params, sizeof(params), &ret_ptr, &ret_len );
231 free(object_name_infos);
232 if (ret_len == sizeof(VkBool32)) return *(VkBool32 *)ret_ptr;
233 return VK_FALSE;
236 static VkBool32 debug_report_callback_conversion(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT object_type,
237 uint64_t object_handle, size_t location, int32_t code, const char *layer_prefix, const char *message, void *user_data)
239 struct wine_vk_debug_report_params params;
240 struct wine_debug_report_callback *object;
241 void *ret_ptr;
242 ULONG ret_len;
244 TRACE("%#x, %#x, 0x%s, 0x%s, %d, %p, %p, %p\n", flags, object_type, wine_dbgstr_longlong(object_handle),
245 wine_dbgstr_longlong(location), code, layer_prefix, message, user_data);
247 object = user_data;
249 if (!object->instance->host_instance)
251 /* instance wasn't yet created, this is a message from the host loader */
252 return VK_FALSE;
255 /* FIXME: we should pack all referenced structs instead of passing pointers */
256 params.user_callback = object->user_callback;
257 params.user_data = object->user_data;
258 params.flags = flags;
259 params.object_type = object_type;
260 params.location = location;
261 params.code = code;
262 params.layer_prefix = layer_prefix;
263 params.message = message;
265 params.object_handle = client_handle_from_host(object->instance, object_handle);
266 if (!params.object_handle)
267 params.object_type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT;
269 KeUserModeCallback( NtUserCallVulkanDebugReportCallback, &params, sizeof(params), &ret_ptr, &ret_len );
270 if (ret_len == sizeof(VkBool32)) return *(VkBool32 *)ret_ptr;
271 return VK_FALSE;
274 static void wine_phys_dev_cleanup(struct wine_phys_dev *phys_dev)
276 free(phys_dev->extensions);
279 static VkResult wine_vk_physical_device_init(struct wine_phys_dev *object, VkPhysicalDevice host_handle,
280 VkPhysicalDevice client_handle, struct wine_instance *instance)
282 BOOL have_memory_placed = FALSE, have_map_memory2 = FALSE;
283 uint32_t num_host_properties, num_properties = 0;
284 VkExtensionProperties *host_properties = NULL;
285 BOOL have_external_memory_host = FALSE;
286 VkResult res;
287 unsigned int i, j;
289 object->instance = instance;
290 object->handle = client_handle;
291 object->host_physical_device = host_handle;
293 client_handle->base.unix_handle = (uintptr_t)object;
295 instance->funcs.p_vkGetPhysicalDeviceMemoryProperties(host_handle, &object->memory_properties);
297 res = instance->funcs.p_vkEnumerateDeviceExtensionProperties(host_handle,
298 NULL, &num_host_properties, NULL);
299 if (res != VK_SUCCESS)
301 ERR("Failed to enumerate device extensions, res=%d\n", res);
302 goto err;
305 host_properties = calloc(num_host_properties, sizeof(*host_properties));
306 if (!host_properties)
308 ERR("Failed to allocate memory for device properties!\n");
309 goto err;
312 res = instance->funcs.p_vkEnumerateDeviceExtensionProperties(host_handle,
313 NULL, &num_host_properties, host_properties);
314 if (res != VK_SUCCESS)
316 ERR("Failed to enumerate device extensions, res=%d\n", res);
317 goto err;
320 /* Count list of extensions for which we have an implementation.
321 * TODO: perform translation for platform specific extensions.
323 for (i = 0; i < num_host_properties; i++)
325 if (wine_vk_device_extension_supported(host_properties[i].extensionName))
327 TRACE("Enabling extension '%s' for physical device %p\n", host_properties[i].extensionName, object);
328 num_properties++;
330 else
332 TRACE("Skipping extension '%s', no implementation found in winevulkan.\n", host_properties[i].extensionName);
334 if (!strcmp(host_properties[i].extensionName, "VK_EXT_external_memory_host"))
335 have_external_memory_host = TRUE;
336 else if (!strcmp(host_properties[i].extensionName, "VK_EXT_map_memory_placed"))
337 have_memory_placed = TRUE;
338 else if (!strcmp(host_properties[i].extensionName, "VK_KHR_map_memory2"))
339 have_map_memory2 = TRUE;
342 TRACE("Host supported extensions %u, Wine supported extensions %u\n", num_host_properties, num_properties);
344 if (!(object->extensions = calloc(num_properties, sizeof(*object->extensions))))
346 ERR("Failed to allocate memory for device extensions!\n");
347 goto err;
350 for (i = 0, j = 0; i < num_host_properties; i++)
352 if (wine_vk_device_extension_supported(host_properties[i].extensionName))
354 object->extensions[j] = host_properties[i];
355 j++;
358 object->extension_count = num_properties;
360 if (zero_bits && have_memory_placed && have_map_memory2)
362 VkPhysicalDeviceMapMemoryPlacedFeaturesEXT map_placed_feature =
364 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT,
366 VkPhysicalDeviceFeatures2 features =
368 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
369 .pNext = &map_placed_feature,
372 instance->funcs.p_vkGetPhysicalDeviceFeatures2KHR(host_handle, &features);
373 if (map_placed_feature.memoryMapPlaced && map_placed_feature.memoryUnmapReserve)
375 VkPhysicalDeviceMapMemoryPlacedPropertiesEXT map_placed_props =
377 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_PROPERTIES_EXT,
379 VkPhysicalDeviceProperties2 props =
381 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
382 .pNext = &map_placed_props,
385 instance->funcs.p_vkGetPhysicalDeviceProperties2(host_handle, &props);
386 object->map_placed_align = map_placed_props.minPlacedMemoryMapAlignment;
387 TRACE( "Using placed map with alignment %u\n", object->map_placed_align );
391 if (zero_bits && have_external_memory_host && !object->map_placed_align)
393 VkPhysicalDeviceExternalMemoryHostPropertiesEXT host_mem_props =
395 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT,
397 VkPhysicalDeviceProperties2 props =
399 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
400 .pNext = &host_mem_props,
402 instance->funcs.p_vkGetPhysicalDeviceProperties2KHR(host_handle, &props);
403 object->external_memory_align = host_mem_props.minImportedHostPointerAlignment;
404 if (object->external_memory_align)
405 TRACE("Using VK_EXT_external_memory_host for memory mapping with alignment: %u\n",
406 object->external_memory_align);
409 free(host_properties);
410 return VK_SUCCESS;
412 err:
413 wine_phys_dev_cleanup(object);
414 free(host_properties);
415 return res;
418 static void wine_vk_free_command_buffers(struct wine_device *device,
419 struct wine_cmd_pool *pool, uint32_t count, const VkCommandBuffer *buffers)
421 unsigned int i;
423 for (i = 0; i < count; i++)
425 struct wine_cmd_buffer *buffer = wine_cmd_buffer_from_handle(buffers[i]);
427 if (!buffer)
428 continue;
430 device->funcs.p_vkFreeCommandBuffers(device->host_device, pool->host_command_pool, 1,
431 &buffer->host_command_buffer);
432 remove_handle_mapping(device->phys_dev->instance, &buffer->wrapper_entry);
433 buffer->handle->base.unix_handle = 0;
434 free(buffer);
438 static void wine_vk_device_init_queues(struct wine_device *device, const VkDeviceQueueCreateInfo *info,
439 VkQueue *handles)
441 VkDeviceQueueInfo2 queue_info;
442 UINT i;
444 TRACE("Queue family index %u, queue count %u.\n", info->queueFamilyIndex, info->queueCount);
446 for (i = 0; i < info->queueCount; i++)
448 struct wine_queue *queue = device->queues + device->queue_count + i;
450 queue->device = device;
451 queue->handle = (*handles)++;
452 queue->family_index = info->queueFamilyIndex;
453 queue->queue_index = i;
454 queue->flags = info->flags;
456 /* The Vulkan spec says:
458 * "vkGetDeviceQueue must only be used to get queues that were created
459 * with the flags parameter of VkDeviceQueueCreateInfo set to zero."
461 if (info->flags && device->funcs.p_vkGetDeviceQueue2)
463 queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2;
464 queue_info.pNext = NULL;
465 queue_info.flags = info->flags;
466 queue_info.queueFamilyIndex = info->queueFamilyIndex;
467 queue_info.queueIndex = i;
468 device->funcs.p_vkGetDeviceQueue2(device->host_device, &queue_info, &queue->host_queue);
470 else
472 device->funcs.p_vkGetDeviceQueue(device->host_device, info->queueFamilyIndex, i, &queue->host_queue);
475 queue->handle->base.unix_handle = (uintptr_t)queue;
476 TRACE("Got device %p queue %p, host_queue %p.\n", device, queue, queue->host_queue);
479 device->queue_count += info->queueCount;
482 static const char *find_extension(const char *const *extensions, uint32_t count, const char *ext)
484 while (count--)
486 if (!strcmp(extensions[count], ext))
487 return extensions[count];
489 return NULL;
492 static VkResult wine_vk_device_convert_create_info(struct wine_phys_dev *phys_dev,
493 struct conversion_context *ctx, const VkDeviceCreateInfo *src, VkDeviceCreateInfo *dst)
495 const char *extra_extensions[2], * const*extensions = src->ppEnabledExtensionNames;
496 unsigned int i, extra_count = 0, extensions_count = src->enabledExtensionCount;
498 *dst = *src;
500 /* Should be filtered out by loader as ICDs don't support layers. */
501 dst->enabledLayerCount = 0;
502 dst->ppEnabledLayerNames = NULL;
504 TRACE("Enabled %u extensions.\n", extensions_count);
505 for (i = 0; i < extensions_count; i++)
507 const char *extension_name = extensions[i];
508 TRACE("Extension %u: %s.\n", i, debugstr_a(extension_name));
509 if (!wine_vk_device_extension_supported(extension_name))
511 WARN("Extension %s is not supported.\n", debugstr_a(extension_name));
512 return VK_ERROR_EXTENSION_NOT_PRESENT;
516 if (phys_dev->map_placed_align)
518 VkPhysicalDeviceMapMemoryPlacedFeaturesEXT *map_placed_features;
519 map_placed_features = conversion_context_alloc(ctx, sizeof(*map_placed_features));
520 map_placed_features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT;
521 map_placed_features->pNext = (void *)dst->pNext;
522 map_placed_features->memoryMapPlaced = VK_TRUE;
523 map_placed_features->memoryMapRangePlaced = VK_FALSE;
524 map_placed_features->memoryUnmapReserve = VK_TRUE;
525 dst->pNext = map_placed_features;
527 if (!find_extension(extensions, extensions_count, "VK_EXT_map_memory_placed"))
528 extra_extensions[extra_count++] = "VK_EXT_map_memory_placed";
529 if (!find_extension(extensions, extensions_count, "VK_KHR_map_memory2"))
530 extra_extensions[extra_count++] = "VK_KHR_map_memory2";
532 else if (phys_dev->external_memory_align)
534 if (!find_extension(extensions, extensions_count, "VK_KHR_external_memory"))
535 extra_extensions[extra_count++] = "VK_KHR_external_memory";
536 if (!find_extension(extensions, extensions_count, "VK_EXT_external_memory_host"))
537 extra_extensions[extra_count++] = "VK_EXT_external_memory_host";
540 if (extra_count)
542 const char **new_extensions;
544 dst->enabledExtensionCount += extra_count;
545 new_extensions = conversion_context_alloc(ctx, dst->enabledExtensionCount * sizeof(*new_extensions));
546 memcpy(new_extensions, extensions, extensions_count * sizeof(*new_extensions));
547 memcpy(new_extensions + extensions_count, extra_extensions, extra_count * sizeof(*new_extensions));
548 dst->ppEnabledExtensionNames = new_extensions;
551 return VK_SUCCESS;
554 NTSTATUS init_vulkan(void *args)
556 vk_funcs = __wine_get_vulkan_driver(WINE_VULKAN_DRIVER_VERSION);
557 if (!vk_funcs)
559 ERR("Failed to load Wine graphics driver supporting Vulkan.\n");
560 return STATUS_UNSUCCESSFUL;
563 p_vkCreateInstance = vk_funcs->p_vkGetInstanceProcAddr(NULL, "vkCreateInstance");
564 p_vkEnumerateInstanceVersion = vk_funcs->p_vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
565 p_vkEnumerateInstanceExtensionProperties = vk_funcs->p_vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
567 if (is_wow64())
569 SYSTEM_BASIC_INFORMATION info;
571 NtQuerySystemInformation(SystemEmulationBasicInformation, &info, sizeof(info), NULL);
572 zero_bits = (ULONG_PTR)info.HighestUserAddress | 0x7fffffff;
575 return STATUS_SUCCESS;
578 /* Helper function for converting between win32 and host compatible VkInstanceCreateInfo.
579 * This function takes care of extensions handled at winevulkan layer, a Wine graphics
580 * driver is responsible for handling e.g. surface extensions.
582 static VkResult wine_vk_instance_convert_create_info(struct conversion_context *ctx,
583 const VkInstanceCreateInfo *src, VkInstanceCreateInfo *dst, struct wine_instance *object)
585 VkDebugUtilsMessengerCreateInfoEXT *debug_utils_messenger;
586 VkDebugReportCallbackCreateInfoEXT *debug_report_callback;
587 const char **new_extensions;
588 VkBaseInStructure *header;
589 unsigned int i;
591 *dst = *src;
593 object->utils_messenger_count = wine_vk_count_struct(dst, DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
594 object->utils_messengers = calloc(object->utils_messenger_count, sizeof(*object->utils_messengers));
595 header = (VkBaseInStructure *) dst;
596 for (i = 0; i < object->utils_messenger_count; i++)
598 header = find_next_struct(header->pNext, VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
599 debug_utils_messenger = (VkDebugUtilsMessengerCreateInfoEXT *) header;
601 object->utils_messengers[i].instance = object;
602 object->utils_messengers[i].host_debug_messenger = VK_NULL_HANDLE;
603 object->utils_messengers[i].user_callback = debug_utils_messenger->pfnUserCallback;
604 object->utils_messengers[i].user_data = debug_utils_messenger->pUserData;
606 /* convert_VkInstanceCreateInfo_* already copied the chain, so we can modify it in-place. */
607 debug_utils_messenger->pfnUserCallback = (void *) &debug_utils_callback_conversion;
608 debug_utils_messenger->pUserData = &object->utils_messengers[i];
611 if ((debug_report_callback = find_next_struct(dst->pNext, VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT)))
613 object->default_callback.instance = object;
614 object->default_callback.host_debug_callback = VK_NULL_HANDLE;
615 object->default_callback.user_callback = debug_report_callback->pfnCallback;
616 object->default_callback.user_data = debug_report_callback->pUserData;
618 debug_report_callback->pfnCallback = (void *) &debug_report_callback_conversion;
619 debug_report_callback->pUserData = &object->default_callback;
622 /* ICDs don't support any layers, so nothing to copy. Modern versions of the loader
623 * filter this data out as well.
625 if (dst->enabledLayerCount)
627 FIXME("Loading explicit layers is not supported by winevulkan!\n");
628 return VK_ERROR_LAYER_NOT_PRESENT;
631 for (i = 0; i < src->enabledExtensionCount; i++)
633 const char *extension_name = src->ppEnabledExtensionNames[i];
634 TRACE("Extension %u: %s.\n", i, debugstr_a(extension_name));
635 if (!wine_vk_instance_extension_supported(extension_name))
637 WARN("Extension %s is not supported.\n", debugstr_a(extension_name));
638 return VK_ERROR_EXTENSION_NOT_PRESENT;
642 new_extensions = conversion_context_alloc(ctx, (src->enabledExtensionCount + 2) *
643 sizeof(*src->ppEnabledExtensionNames));
644 memcpy(new_extensions, src->ppEnabledExtensionNames,
645 dst->enabledExtensionCount * sizeof(*dst->ppEnabledExtensionNames));
646 dst->ppEnabledExtensionNames = new_extensions;
647 dst->enabledExtensionCount = src->enabledExtensionCount;
649 for (i = 0; i < dst->enabledExtensionCount; i++)
651 const char *extension_name = dst->ppEnabledExtensionNames[i];
652 if (!strcmp(extension_name, "VK_EXT_debug_utils") || !strcmp(extension_name, "VK_EXT_debug_report"))
654 object->enable_wrapper_list = VK_TRUE;
656 if (!strcmp(extension_name, "VK_KHR_win32_surface"))
658 new_extensions[i] = vk_funcs->p_get_host_surface_extension();
659 object->enable_win32_surface = VK_TRUE;
663 if (use_external_memory())
665 new_extensions[dst->enabledExtensionCount++] = "VK_KHR_get_physical_device_properties2";
666 new_extensions[dst->enabledExtensionCount++] = "VK_KHR_external_memory_capabilities";
669 TRACE("Enabled %u instance extensions.\n", dst->enabledExtensionCount);
671 return VK_SUCCESS;
674 /* Helper function which stores wrapped physical devices in the instance object. */
675 static VkResult wine_vk_instance_init_physical_devices(struct wine_instance *instance)
677 VkPhysicalDevice *host_handles;
678 uint32_t phys_dev_count;
679 unsigned int i;
680 VkResult res;
682 res = instance->funcs.p_vkEnumeratePhysicalDevices(instance->host_instance, &phys_dev_count, NULL);
683 if (res != VK_SUCCESS)
685 ERR("Failed to enumerate physical devices, res=%d\n", res);
686 return res;
688 if (!phys_dev_count)
689 return res;
691 if (phys_dev_count > instance->handle->phys_dev_count)
693 instance->handle->phys_dev_count = phys_dev_count;
694 return VK_ERROR_OUT_OF_POOL_MEMORY;
696 instance->handle->phys_dev_count = phys_dev_count;
698 if (!(host_handles = calloc(phys_dev_count, sizeof(*host_handles))))
699 return VK_ERROR_OUT_OF_HOST_MEMORY;
701 res = instance->funcs.p_vkEnumeratePhysicalDevices(instance->host_instance, &phys_dev_count, host_handles);
702 if (res != VK_SUCCESS)
704 free(host_handles);
705 return res;
708 /* Wrap each host physical device handle into a dispatchable object for the ICD loader. */
709 for (i = 0; i < phys_dev_count; i++)
711 struct wine_phys_dev *phys_dev = instance->phys_devs + i;
712 res = wine_vk_physical_device_init(phys_dev, host_handles[i], &instance->handle->phys_devs[i], instance);
713 if (res != VK_SUCCESS)
714 goto err;
716 instance->phys_dev_count = phys_dev_count;
718 free(host_handles);
719 return VK_SUCCESS;
721 err:
722 while (i) wine_phys_dev_cleanup(&instance->phys_devs[--i]);
723 free(host_handles);
724 return res;
727 static struct wine_phys_dev *wine_vk_instance_wrap_physical_device(struct wine_instance *instance,
728 VkPhysicalDevice host_handle)
730 unsigned int i;
732 for (i = 0; i < instance->phys_dev_count; ++i)
734 struct wine_phys_dev *current = instance->phys_devs + i;
735 if (current->host_physical_device == host_handle) return current;
738 ERR("Unrecognized physical device %p.\n", host_handle);
739 return NULL;
742 VkResult wine_vkAllocateCommandBuffers(VkDevice handle, const VkCommandBufferAllocateInfo *allocate_info,
743 VkCommandBuffer *buffers )
745 struct wine_device *device = wine_device_from_handle(handle);
746 struct wine_cmd_buffer *buffer;
747 struct wine_cmd_pool *pool;
748 VkResult res = VK_SUCCESS;
749 unsigned int i;
751 pool = wine_cmd_pool_from_handle(allocate_info->commandPool);
753 for (i = 0; i < allocate_info->commandBufferCount; i++)
755 VkCommandBufferAllocateInfo allocate_info_host;
757 /* TODO: future extensions (none yet) may require pNext conversion. */
758 allocate_info_host.pNext = allocate_info->pNext;
759 allocate_info_host.sType = allocate_info->sType;
760 allocate_info_host.commandPool = pool->host_command_pool;
761 allocate_info_host.level = allocate_info->level;
762 allocate_info_host.commandBufferCount = 1;
764 TRACE("Allocating command buffer %u from pool 0x%s.\n",
765 i, wine_dbgstr_longlong(allocate_info_host.commandPool));
767 if (!(buffer = calloc(1, sizeof(*buffer))))
769 res = VK_ERROR_OUT_OF_HOST_MEMORY;
770 break;
773 buffer->handle = buffers[i];
774 buffer->device = device;
775 res = device->funcs.p_vkAllocateCommandBuffers(device->host_device, &allocate_info_host,
776 &buffer->host_command_buffer);
777 buffer->handle->base.unix_handle = (uintptr_t)buffer;
778 add_handle_mapping_ptr(device->phys_dev->instance, buffer->handle, buffer->host_command_buffer, &buffer->wrapper_entry);
779 if (res != VK_SUCCESS)
781 ERR("Failed to allocate command buffer, res=%d.\n", res);
782 buffer->host_command_buffer = VK_NULL_HANDLE;
783 break;
787 if (res != VK_SUCCESS)
788 wine_vk_free_command_buffers(device, pool, i + 1, buffers);
790 return res;
793 VkResult wine_vkCreateDevice(VkPhysicalDevice phys_dev_handle, const VkDeviceCreateInfo *create_info,
794 const VkAllocationCallbacks *allocator, VkDevice *ret_device,
795 void *client_ptr)
797 struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle);
798 struct wine_instance *instance = phys_dev->instance;
799 VkDevice device_handle = client_ptr;
800 VkDeviceCreateInfo create_info_host;
801 struct VkQueue_T *queue_handles;
802 struct conversion_context ctx;
803 struct wine_device *object;
804 unsigned int queue_count, i;
805 VkResult res;
807 if (allocator)
808 FIXME("Support for allocation callbacks not implemented yet\n");
810 if (TRACE_ON(vulkan))
812 VkPhysicalDeviceProperties properties;
814 instance->funcs.p_vkGetPhysicalDeviceProperties(phys_dev->host_physical_device, &properties);
816 TRACE("Device name: %s.\n", debugstr_a(properties.deviceName));
817 TRACE("Vendor ID: %#x, Device ID: %#x.\n", properties.vendorID, properties.deviceID);
818 TRACE("Driver version: %#x.\n", properties.driverVersion);
821 /* We need to cache all queues within the device as each requires wrapping since queues are dispatchable objects. */
822 for (queue_count = 0, i = 0; i < create_info->queueCreateInfoCount; i++)
823 queue_count += create_info->pQueueCreateInfos[i].queueCount;
825 if (!(object = calloc(1, offsetof(struct wine_device, queues[queue_count]))))
826 return VK_ERROR_OUT_OF_HOST_MEMORY;
828 object->phys_dev = phys_dev;
830 init_conversion_context(&ctx);
831 res = wine_vk_device_convert_create_info(phys_dev, &ctx, create_info, &create_info_host);
832 if (res == VK_SUCCESS)
833 res = instance->funcs.p_vkCreateDevice(phys_dev->host_physical_device, &create_info_host,
834 NULL /* allocator */, &object->host_device);
835 free_conversion_context(&ctx);
836 if (res != VK_SUCCESS)
838 WARN("Failed to create device, res=%d.\n", res);
839 free(object);
840 return res;
843 /* Just load all function pointers we are aware off. The loader takes care of filtering.
844 * We use vkGetDeviceProcAddr as opposed to vkGetInstanceProcAddr for efficiency reasons
845 * as functions pass through fewer dispatch tables within the loader.
847 #define USE_VK_FUNC(name) \
848 object->funcs.p_##name = (void *)vk_funcs->p_vkGetDeviceProcAddr(object->host_device, #name); \
849 if (object->funcs.p_##name == NULL) TRACE("Not found '%s'.\n", #name);
850 ALL_VK_DEVICE_FUNCS()
851 #undef USE_VK_FUNC
853 queue_handles = device_handle->queues;
854 for (i = 0; i < create_info_host.queueCreateInfoCount; i++)
855 wine_vk_device_init_queues(object, create_info_host.pQueueCreateInfos + i, &queue_handles);
857 device_handle->quirks = instance->quirks;
858 device_handle->base.unix_handle = (uintptr_t)object;
860 TRACE("Created device %p, host_device %p.\n", object, object->host_device);
861 for (i = 0; i < object->queue_count; i++)
863 struct wine_queue *queue = object->queues + i;
864 add_handle_mapping_ptr(instance, queue->handle, queue->host_queue, &queue->wrapper_entry);
867 *ret_device = device_handle;
868 add_handle_mapping_ptr(instance, *ret_device, object->host_device, &object->wrapper_entry);
869 return VK_SUCCESS;
872 VkResult wine_vkCreateInstance(const VkInstanceCreateInfo *create_info,
873 const VkAllocationCallbacks *allocator, VkInstance *instance,
874 void *client_ptr)
876 VkInstance client_instance = client_ptr;
877 VkInstanceCreateInfo create_info_host;
878 const VkApplicationInfo *app_info;
879 struct conversion_context ctx;
880 struct wine_instance *object;
881 unsigned int i;
882 VkResult res;
884 if (allocator)
885 FIXME("Support for allocation callbacks not implemented yet\n");
887 if (!(object = calloc(1, offsetof(struct wine_instance, phys_devs[client_instance->phys_dev_count]))))
889 ERR("Failed to allocate memory for instance\n");
890 return VK_ERROR_OUT_OF_HOST_MEMORY;
893 init_conversion_context(&ctx);
894 res = wine_vk_instance_convert_create_info(&ctx, create_info, &create_info_host, object);
895 if (res == VK_SUCCESS)
896 res = p_vkCreateInstance(&create_info_host, NULL /* allocator */, &object->host_instance);
897 free_conversion_context(&ctx);
898 if (res != VK_SUCCESS)
900 ERR("Failed to create instance, res=%d\n", res);
901 free(object->utils_messengers);
902 free(object);
903 return res;
906 object->handle = client_instance;
908 /* Load all instance functions we are aware of. Note the loader takes care
909 * of any filtering for extensions which were not requested, but which the
910 * ICD may support.
912 #define USE_VK_FUNC(name) \
913 object->funcs.p_##name = (void *)vk_funcs->p_vkGetInstanceProcAddr(object->host_instance, #name);
914 ALL_VK_INSTANCE_FUNCS()
915 #undef USE_VK_FUNC
917 /* Cache physical devices for vkEnumeratePhysicalDevices within the instance as
918 * each vkPhysicalDevice is a dispatchable object, which means we need to wrap
919 * the host physical devices and present those to the application.
920 * Cleanup happens as part of wine_vkDestroyInstance.
922 res = wine_vk_instance_init_physical_devices(object);
923 if (res != VK_SUCCESS)
925 ERR("Failed to load physical devices, res=%d\n", res);
926 object->funcs.p_vkDestroyInstance(object->host_instance, NULL /* allocator */);
927 free(object->utils_messengers);
928 free(object);
929 return res;
932 if ((app_info = create_info->pApplicationInfo))
934 TRACE("Application name %s, application version %#x.\n",
935 debugstr_a(app_info->pApplicationName), app_info->applicationVersion);
936 TRACE("Engine name %s, engine version %#x.\n", debugstr_a(app_info->pEngineName),
937 app_info->engineVersion);
938 TRACE("API version %#x.\n", app_info->apiVersion);
940 if (app_info->pEngineName && !strcmp(app_info->pEngineName, "idTech"))
941 object->quirks |= WINEVULKAN_QUIRK_GET_DEVICE_PROC_ADDR;
944 client_instance->base.unix_handle = (uintptr_t)object;
946 TRACE("Created instance %p, host_instance %p.\n", object, object->host_instance);
948 rb_init(&object->wrappers, wrapper_entry_compare);
949 pthread_rwlock_init(&object->wrapper_lock, NULL);
951 for (i = 0; i < object->phys_dev_count; i++)
953 struct wine_phys_dev *phys_dev = &object->phys_devs[i];
954 add_handle_mapping_ptr(object, phys_dev->handle, phys_dev->host_physical_device, &phys_dev->wrapper_entry);
957 *instance = client_instance;
958 add_handle_mapping_ptr(object, *instance, object->host_instance, &object->wrapper_entry);
959 return VK_SUCCESS;
962 void wine_vkDestroyDevice(VkDevice handle, const VkAllocationCallbacks *allocator)
964 struct wine_device *device = wine_device_from_handle(handle);
965 unsigned int i;
967 if (allocator)
968 FIXME("Support for allocation callbacks not implemented yet\n");
969 if (!device)
970 return;
972 device->funcs.p_vkDestroyDevice(device->host_device, NULL /* pAllocator */);
973 for (i = 0; i < device->queue_count; i++)
974 remove_handle_mapping(device->phys_dev->instance, &device->queues[i].wrapper_entry);
975 remove_handle_mapping(device->phys_dev->instance, &device->wrapper_entry);
977 free(device);
980 void wine_vkDestroyInstance(VkInstance handle, const VkAllocationCallbacks *allocator)
982 struct wine_instance *instance = wine_instance_from_handle(handle);
983 unsigned int i;
985 if (allocator)
986 FIXME("Support allocation allocators\n");
987 if (!instance)
988 return;
990 instance->funcs.p_vkDestroyInstance(instance->host_instance, NULL /* allocator */);
991 for (i = 0; i < instance->phys_dev_count; i++)
993 remove_handle_mapping(instance, &instance->phys_devs[i].wrapper_entry);
994 wine_phys_dev_cleanup(&instance->phys_devs[i]);
996 remove_handle_mapping(instance, &instance->wrapper_entry);
998 pthread_rwlock_destroy(&instance->wrapper_lock);
999 free(instance->utils_messengers);
1000 free(instance);
1003 VkResult wine_vkEnumerateDeviceExtensionProperties(VkPhysicalDevice phys_dev_handle, const char *layer_name,
1004 uint32_t *count, VkExtensionProperties *properties)
1006 struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle);
1008 /* This shouldn't get called with layer_name set, the ICD loader prevents it. */
1009 if (layer_name)
1011 ERR("Layer enumeration not supported from ICD.\n");
1012 return VK_ERROR_LAYER_NOT_PRESENT;
1015 if (!properties)
1017 *count = phys_dev->extension_count;
1018 return VK_SUCCESS;
1021 *count = min(*count, phys_dev->extension_count);
1022 memcpy(properties, phys_dev->extensions, *count * sizeof(*properties));
1024 TRACE("Returning %u extensions.\n", *count);
1025 return *count < phys_dev->extension_count ? VK_INCOMPLETE : VK_SUCCESS;
1028 VkResult wine_vkEnumerateInstanceExtensionProperties(const char *name, uint32_t *count,
1029 VkExtensionProperties *properties)
1031 uint32_t num_properties = 0, num_host_properties;
1032 VkExtensionProperties *host_properties;
1033 unsigned int i, j, surface;
1034 VkResult res;
1036 res = p_vkEnumerateInstanceExtensionProperties(NULL, &num_host_properties, NULL);
1037 if (res != VK_SUCCESS)
1038 return res;
1040 if (!(host_properties = calloc(num_host_properties, sizeof(*host_properties))))
1041 return VK_ERROR_OUT_OF_HOST_MEMORY;
1043 res = p_vkEnumerateInstanceExtensionProperties(NULL, &num_host_properties, host_properties);
1044 if (res != VK_SUCCESS)
1046 ERR("Failed to retrieve host properties, res=%d.\n", res);
1047 free(host_properties);
1048 return res;
1051 /* The Wine graphics driver provides us with all extensions supported by the host side
1052 * including extension fixup (e.g. VK_KHR_xlib_surface -> VK_KHR_win32_surface). It is
1053 * up to us here to filter the list down to extensions for which we have thunks.
1055 for (i = 0, surface = 0; i < num_host_properties; i++)
1057 if (wine_vk_instance_extension_supported(host_properties[i].extensionName)
1058 || (wine_vk_is_host_surface_extension(host_properties[i].extensionName) && !surface++))
1059 num_properties++;
1060 else
1061 TRACE("Instance extension '%s' is not supported.\n", host_properties[i].extensionName);
1064 if (!properties)
1066 TRACE("Returning %u extensions.\n", num_properties);
1067 *count = num_properties;
1068 free(host_properties);
1069 return VK_SUCCESS;
1072 for (i = 0, j = 0, surface = 0; i < num_host_properties && j < *count; i++)
1074 if (wine_vk_instance_extension_supported(host_properties[i].extensionName))
1076 TRACE("Enabling extension '%s'.\n", host_properties[i].extensionName);
1077 properties[j++] = host_properties[i];
1079 else if (wine_vk_is_host_surface_extension(host_properties[i].extensionName) && !surface++)
1081 VkExtensionProperties win32_surface = {VK_KHR_WIN32_SURFACE_EXTENSION_NAME, VK_KHR_WIN32_SURFACE_SPEC_VERSION};
1082 TRACE("Enabling VK_KHR_win32_surface.\n");
1083 properties[j++] = win32_surface;
1086 *count = min(*count, num_properties);
1088 free(host_properties);
1089 return *count < num_properties ? VK_INCOMPLETE : VK_SUCCESS;
1092 VkResult wine_vkEnumerateDeviceLayerProperties(VkPhysicalDevice phys_dev, uint32_t *count,
1093 VkLayerProperties *properties)
1095 *count = 0;
1096 return VK_SUCCESS;
1099 VkResult wine_vkEnumerateInstanceVersion(uint32_t *version)
1101 VkResult res;
1103 if (p_vkEnumerateInstanceVersion)
1105 res = p_vkEnumerateInstanceVersion(version);
1107 else
1109 *version = VK_API_VERSION_1_0;
1110 res = VK_SUCCESS;
1113 TRACE("API version %u.%u.%u.\n",
1114 VK_VERSION_MAJOR(*version), VK_VERSION_MINOR(*version), VK_VERSION_PATCH(*version));
1115 *version = min(WINE_VK_VERSION, *version);
1116 return res;
1119 VkResult wine_vkEnumeratePhysicalDevices(VkInstance handle, uint32_t *count, VkPhysicalDevice *devices)
1121 struct wine_instance *instance = wine_instance_from_handle(handle);
1122 unsigned int i;
1124 if (!devices)
1126 *count = instance->phys_dev_count;
1127 return VK_SUCCESS;
1130 *count = min(*count, instance->phys_dev_count);
1131 for (i = 0; i < *count; i++)
1133 devices[i] = instance->phys_devs[i].handle;
1136 TRACE("Returning %u devices.\n", *count);
1137 return *count < instance->phys_dev_count ? VK_INCOMPLETE : VK_SUCCESS;
1140 void wine_vkFreeCommandBuffers(VkDevice handle, VkCommandPool command_pool, uint32_t count,
1141 const VkCommandBuffer *buffers)
1143 struct wine_device *device = wine_device_from_handle(handle);
1144 struct wine_cmd_pool *pool = wine_cmd_pool_from_handle(command_pool);
1146 wine_vk_free_command_buffers(device, pool, count, buffers);
1149 static VkQueue wine_vk_device_find_queue(VkDevice handle, const VkDeviceQueueInfo2 *info)
1151 struct wine_device *device = wine_device_from_handle(handle);
1152 struct wine_queue *queue;
1153 uint32_t i;
1155 for (i = 0; i < device->queue_count; i++)
1157 queue = &device->queues[i];
1158 if (queue->family_index == info->queueFamilyIndex
1159 && queue->queue_index == info->queueIndex
1160 && queue->flags == info->flags)
1162 return queue->handle;
1166 return VK_NULL_HANDLE;
1169 void wine_vkGetDeviceQueue(VkDevice device, uint32_t family_index, uint32_t queue_index, VkQueue *queue)
1171 VkDeviceQueueInfo2 queue_info;
1173 queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2;
1174 queue_info.pNext = NULL;
1175 queue_info.flags = 0;
1176 queue_info.queueFamilyIndex = family_index;
1177 queue_info.queueIndex = queue_index;
1179 *queue = wine_vk_device_find_queue(device, &queue_info);
1182 void wine_vkGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *info, VkQueue *queue)
1184 const VkBaseInStructure *chain;
1186 if ((chain = info->pNext))
1187 FIXME("Ignoring a linked structure of type %u.\n", chain->sType);
1189 *queue = wine_vk_device_find_queue(device, info);
1192 VkResult wine_vkCreateCommandPool(VkDevice device_handle, const VkCommandPoolCreateInfo *info,
1193 const VkAllocationCallbacks *allocator, VkCommandPool *command_pool,
1194 void *client_ptr)
1196 struct wine_device *device = wine_device_from_handle(device_handle);
1197 struct vk_command_pool *handle = client_ptr;
1198 struct wine_cmd_pool *object;
1199 VkResult res;
1201 if (allocator)
1202 FIXME("Support for allocation callbacks not implemented yet\n");
1204 if (!(object = calloc(1, sizeof(*object))))
1205 return VK_ERROR_OUT_OF_HOST_MEMORY;
1207 res = device->funcs.p_vkCreateCommandPool(device->host_device, info, NULL, &object->host_command_pool);
1208 if (res != VK_SUCCESS)
1210 free(object);
1211 return res;
1214 object->handle = (uintptr_t)handle;
1215 handle->unix_handle = (uintptr_t)object;
1217 *command_pool = object->handle;
1218 add_handle_mapping(device->phys_dev->instance, *command_pool, object->host_command_pool, &object->wrapper_entry);
1219 return VK_SUCCESS;
1222 void wine_vkDestroyCommandPool(VkDevice device_handle, VkCommandPool handle,
1223 const VkAllocationCallbacks *allocator)
1225 struct wine_device *device = wine_device_from_handle(device_handle);
1226 struct wine_cmd_pool *pool = wine_cmd_pool_from_handle(handle);
1228 if (allocator)
1229 FIXME("Support for allocation callbacks not implemented yet\n");
1231 device->funcs.p_vkDestroyCommandPool(device->host_device, pool->host_command_pool, NULL);
1232 remove_handle_mapping(device->phys_dev->instance, &pool->wrapper_entry);
1233 free(pool);
1236 static VkResult wine_vk_enumerate_physical_device_groups(struct wine_instance *instance,
1237 VkResult (*p_vkEnumeratePhysicalDeviceGroups)(VkInstance, uint32_t *, VkPhysicalDeviceGroupProperties *),
1238 uint32_t *count, VkPhysicalDeviceGroupProperties *properties)
1240 unsigned int i, j;
1241 VkResult res;
1243 res = p_vkEnumeratePhysicalDeviceGroups(instance->host_instance, count, properties);
1244 if (res < 0 || !properties)
1245 return res;
1247 for (i = 0; i < *count; ++i)
1249 VkPhysicalDeviceGroupProperties *current = &properties[i];
1250 for (j = 0; j < current->physicalDeviceCount; ++j)
1252 VkPhysicalDevice host_handle = current->physicalDevices[j];
1253 struct wine_phys_dev *phys_dev = wine_vk_instance_wrap_physical_device(instance, host_handle);
1254 if (!phys_dev)
1255 return VK_ERROR_INITIALIZATION_FAILED;
1256 current->physicalDevices[j] = phys_dev->handle;
1260 return res;
1263 VkResult wine_vkEnumeratePhysicalDeviceGroups(VkInstance handle, uint32_t *count,
1264 VkPhysicalDeviceGroupProperties *properties)
1266 struct wine_instance *instance = wine_instance_from_handle(handle);
1268 return wine_vk_enumerate_physical_device_groups(instance,
1269 instance->funcs.p_vkEnumeratePhysicalDeviceGroups, count, properties);
1272 VkResult wine_vkEnumeratePhysicalDeviceGroupsKHR(VkInstance handle, uint32_t *count,
1273 VkPhysicalDeviceGroupProperties *properties)
1275 struct wine_instance *instance = wine_instance_from_handle(handle);
1277 return wine_vk_enumerate_physical_device_groups(instance,
1278 instance->funcs.p_vkEnumeratePhysicalDeviceGroupsKHR, count, properties);
1281 void wine_vkGetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice phys_dev,
1282 const VkPhysicalDeviceExternalFenceInfo *fence_info,
1283 VkExternalFenceProperties *properties)
1285 properties->exportFromImportedHandleTypes = 0;
1286 properties->compatibleHandleTypes = 0;
1287 properties->externalFenceFeatures = 0;
1290 void wine_vkGetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice phys_dev,
1291 const VkPhysicalDeviceExternalFenceInfo *fence_info,
1292 VkExternalFenceProperties *properties)
1294 properties->exportFromImportedHandleTypes = 0;
1295 properties->compatibleHandleTypes = 0;
1296 properties->externalFenceFeatures = 0;
1299 void wine_vkGetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice phys_dev,
1300 const VkPhysicalDeviceExternalBufferInfo *buffer_info,
1301 VkExternalBufferProperties *properties)
1303 memset(&properties->externalMemoryProperties, 0, sizeof(properties->externalMemoryProperties));
1306 void wine_vkGetPhysicalDeviceExternalBufferPropertiesKHR(VkPhysicalDevice phys_dev,
1307 const VkPhysicalDeviceExternalBufferInfo *buffer_info,
1308 VkExternalBufferProperties *properties)
1310 memset(&properties->externalMemoryProperties, 0, sizeof(properties->externalMemoryProperties));
1313 VkResult wine_vkGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice phys_dev_handle,
1314 const VkPhysicalDeviceImageFormatInfo2 *format_info,
1315 VkImageFormatProperties2 *properties)
1317 struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle);
1318 VkExternalImageFormatProperties *external_image_properties;
1319 VkResult res;
1321 res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceImageFormatProperties2(phys_dev->host_physical_device,
1322 format_info, properties);
1324 if ((external_image_properties = find_next_struct(properties,
1325 VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES)))
1327 VkExternalMemoryProperties *p = &external_image_properties->externalMemoryProperties;
1328 p->externalMemoryFeatures = 0;
1329 p->exportFromImportedHandleTypes = 0;
1330 p->compatibleHandleTypes = 0;
1333 return res;
1336 VkResult wine_vkGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice phys_dev_handle,
1337 const VkPhysicalDeviceImageFormatInfo2 *format_info,
1338 VkImageFormatProperties2 *properties)
1340 struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle);
1341 VkExternalImageFormatProperties *external_image_properties;
1342 VkResult res;
1344 res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceImageFormatProperties2KHR(phys_dev->host_physical_device,
1345 format_info, properties);
1347 if ((external_image_properties = find_next_struct(properties,
1348 VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES)))
1350 VkExternalMemoryProperties *p = &external_image_properties->externalMemoryProperties;
1351 p->externalMemoryFeatures = 0;
1352 p->exportFromImportedHandleTypes = 0;
1353 p->compatibleHandleTypes = 0;
1356 return res;
1359 /* From ntdll/unix/sync.c */
1360 #define NANOSECONDS_IN_A_SECOND 1000000000
1361 #define TICKSPERSEC 10000000
1363 static inline VkTimeDomainEXT get_performance_counter_time_domain(void)
1365 #if !defined(__APPLE__) && defined(HAVE_CLOCK_GETTIME)
1366 # ifdef CLOCK_MONOTONIC_RAW
1367 return VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT;
1368 # else
1369 return VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT;
1370 # endif
1371 #else
1372 FIXME("No mapping for VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT on this platform.\n");
1373 return VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT;
1374 #endif
1377 static VkTimeDomainEXT map_to_host_time_domain(VkTimeDomainEXT domain)
1379 /* Matches ntdll/unix/sync.c's performance counter implementation. */
1380 if (domain == VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT)
1381 return get_performance_counter_time_domain();
1383 return domain;
1386 static inline uint64_t convert_monotonic_timestamp(uint64_t value)
1388 return value / (NANOSECONDS_IN_A_SECOND / TICKSPERSEC);
1391 static inline uint64_t convert_timestamp(VkTimeDomainEXT host_domain, VkTimeDomainEXT target_domain, uint64_t value)
1393 if (host_domain == target_domain)
1394 return value;
1396 /* Convert between MONOTONIC time in ns -> QueryPerformanceCounter */
1397 if ((host_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT || host_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT)
1398 && target_domain == VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT)
1399 return convert_monotonic_timestamp(value);
1401 FIXME("Couldn't translate between host domain %d and target domain %d\n", host_domain, target_domain);
1402 return value;
1405 static VkResult wine_vk_get_timestamps(struct wine_device *device, uint32_t timestamp_count,
1406 const VkCalibratedTimestampInfoEXT *timestamp_infos,
1407 uint64_t *timestamps, uint64_t *max_deviation,
1408 VkResult (*get_timestamps)(VkDevice, uint32_t, const VkCalibratedTimestampInfoEXT *, uint64_t *, uint64_t *))
1410 VkCalibratedTimestampInfoEXT* host_timestamp_infos;
1411 unsigned int i;
1412 VkResult res;
1414 if (timestamp_count == 0)
1415 return VK_SUCCESS;
1417 if (!(host_timestamp_infos = calloc(sizeof(VkCalibratedTimestampInfoEXT), timestamp_count)))
1418 return VK_ERROR_OUT_OF_HOST_MEMORY;
1420 for (i = 0; i < timestamp_count; i++)
1422 host_timestamp_infos[i].sType = timestamp_infos[i].sType;
1423 host_timestamp_infos[i].pNext = timestamp_infos[i].pNext;
1424 host_timestamp_infos[i].timeDomain = map_to_host_time_domain(timestamp_infos[i].timeDomain);
1427 res = get_timestamps(device->host_device, timestamp_count, host_timestamp_infos, timestamps, max_deviation);
1428 if (res == VK_SUCCESS)
1430 for (i = 0; i < timestamp_count; i++)
1431 timestamps[i] = convert_timestamp(host_timestamp_infos[i].timeDomain, timestamp_infos[i].timeDomain, timestamps[i]);
1434 free(host_timestamp_infos);
1436 return res;
1439 static VkResult wine_vk_get_time_domains(struct wine_phys_dev *phys_dev,
1440 uint32_t *time_domain_count,
1441 VkTimeDomainEXT *time_domains,
1442 VkResult (*get_domains)(VkPhysicalDevice, uint32_t *, VkTimeDomainEXT *))
1444 BOOL supports_device = FALSE, supports_monotonic = FALSE, supports_monotonic_raw = FALSE;
1445 const VkTimeDomainEXT performance_counter_domain = get_performance_counter_time_domain();
1446 VkTimeDomainEXT *host_time_domains;
1447 uint32_t host_time_domain_count;
1448 VkTimeDomainEXT out_time_domains[2];
1449 uint32_t out_time_domain_count;
1450 unsigned int i;
1451 VkResult res;
1453 /* Find out the time domains supported on the host */
1454 res = get_domains(phys_dev->host_physical_device, &host_time_domain_count, NULL);
1455 if (res != VK_SUCCESS)
1456 return res;
1458 if (!(host_time_domains = malloc(sizeof(VkTimeDomainEXT) * host_time_domain_count)))
1459 return VK_ERROR_OUT_OF_HOST_MEMORY;
1461 res = get_domains(phys_dev->host_physical_device, &host_time_domain_count, host_time_domains);
1462 if (res != VK_SUCCESS)
1464 free(host_time_domains);
1465 return res;
1468 for (i = 0; i < host_time_domain_count; i++)
1470 if (host_time_domains[i] == VK_TIME_DOMAIN_DEVICE_EXT)
1471 supports_device = TRUE;
1472 else if (host_time_domains[i] == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT)
1473 supports_monotonic = TRUE;
1474 else if (host_time_domains[i] == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT)
1475 supports_monotonic_raw = TRUE;
1476 else
1477 FIXME("Unknown time domain %d\n", host_time_domains[i]);
1480 free(host_time_domains);
1482 out_time_domain_count = 0;
1484 /* Map our monotonic times -> QPC */
1485 if (supports_monotonic_raw && performance_counter_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT)
1486 out_time_domains[out_time_domain_count++] = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT;
1487 else if (supports_monotonic && performance_counter_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT)
1488 out_time_domains[out_time_domain_count++] = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT;
1489 else
1490 FIXME("VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT not supported on this platform.\n");
1492 /* Forward the device domain time */
1493 if (supports_device)
1494 out_time_domains[out_time_domain_count++] = VK_TIME_DOMAIN_DEVICE_EXT;
1496 /* Send the count/domains back to the app */
1497 if (!time_domains)
1499 *time_domain_count = out_time_domain_count;
1500 return VK_SUCCESS;
1503 for (i = 0; i < min(*time_domain_count, out_time_domain_count); i++)
1504 time_domains[i] = out_time_domains[i];
1506 res = *time_domain_count < out_time_domain_count ? VK_INCOMPLETE : VK_SUCCESS;
1507 *time_domain_count = out_time_domain_count;
1508 return res;
1511 VkResult wine_vkGetCalibratedTimestampsEXT(VkDevice handle, uint32_t timestamp_count,
1512 const VkCalibratedTimestampInfoEXT *timestamp_infos,
1513 uint64_t *timestamps, uint64_t *max_deviation)
1515 struct wine_device *device = wine_device_from_handle(handle);
1517 TRACE("%p, %u, %p, %p, %p\n", device, timestamp_count, timestamp_infos, timestamps, max_deviation);
1519 return wine_vk_get_timestamps(device, timestamp_count, timestamp_infos, timestamps, max_deviation,
1520 device->funcs.p_vkGetCalibratedTimestampsEXT);
1523 VkResult wine_vkGetCalibratedTimestampsKHR(VkDevice handle, uint32_t timestamp_count,
1524 const VkCalibratedTimestampInfoKHR *timestamp_infos,
1525 uint64_t *timestamps, uint64_t *max_deviation)
1527 struct wine_device *device = wine_device_from_handle(handle);
1529 TRACE("%p, %u, %p, %p, %p\n", device, timestamp_count, timestamp_infos, timestamps, max_deviation);
1531 return wine_vk_get_timestamps(device, timestamp_count, timestamp_infos, timestamps, max_deviation,
1532 device->funcs.p_vkGetCalibratedTimestampsKHR);
1535 VkResult wine_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice handle,
1536 uint32_t *time_domain_count,
1537 VkTimeDomainEXT *time_domains)
1539 struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(handle);
1541 TRACE("%p, %p, %p\n", phys_dev, time_domain_count, time_domains);
1543 return wine_vk_get_time_domains(phys_dev, time_domain_count, time_domains,
1544 phys_dev->instance->funcs.p_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT);
1547 VkResult wine_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR(VkPhysicalDevice handle,
1548 uint32_t *time_domain_count,
1549 VkTimeDomainKHR *time_domains)
1551 struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(handle);
1553 TRACE("%p, %p, %p\n", phys_dev, time_domain_count, time_domains);
1555 return wine_vk_get_time_domains(phys_dev, time_domain_count, time_domains,
1556 phys_dev->instance->funcs.p_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR);
1561 void wine_vkGetPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice phys_dev,
1562 const VkPhysicalDeviceExternalSemaphoreInfo *info,
1563 VkExternalSemaphoreProperties *properties)
1565 properties->exportFromImportedHandleTypes = 0;
1566 properties->compatibleHandleTypes = 0;
1567 properties->externalSemaphoreFeatures = 0;
1570 void wine_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice phys_dev,
1571 const VkPhysicalDeviceExternalSemaphoreInfo *info,
1572 VkExternalSemaphoreProperties *properties)
1574 properties->exportFromImportedHandleTypes = 0;
1575 properties->compatibleHandleTypes = 0;
1576 properties->externalSemaphoreFeatures = 0;
1579 VkResult wine_vkCreateWin32SurfaceKHR(VkInstance handle, const VkWin32SurfaceCreateInfoKHR *create_info,
1580 const VkAllocationCallbacks *allocator, VkSurfaceKHR *surface)
1582 struct wine_instance *instance = wine_instance_from_handle(handle);
1583 VkWin32SurfaceCreateInfoKHR create_info_host = *create_info;
1584 struct wine_surface *object;
1585 HWND dummy = NULL;
1586 VkResult res;
1588 if (allocator) FIXME("Support for allocation callbacks not implemented yet\n");
1590 if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY;
1592 /* Windows allows surfaces to be created with no HWND, they return VK_ERROR_SURFACE_LOST_KHR later */
1593 if (!(object->hwnd = create_info->hwnd))
1595 static const WCHAR staticW[] = {'s','t','a','t','i','c',0};
1596 UNICODE_STRING static_us = RTL_CONSTANT_STRING(staticW);
1597 dummy = NtUserCreateWindowEx(0, &static_us, &static_us, &static_us, WS_POPUP,
1598 0, 0, 0, 0, NULL, NULL, NULL, NULL, 0, NULL, 0, FALSE);
1599 WARN("Created dummy window %p for null surface window\n", dummy);
1600 create_info_host.hwnd = object->hwnd = dummy;
1603 res = instance->funcs.p_vkCreateWin32SurfaceKHR(instance->host_instance, &create_info_host,
1604 NULL /* allocator */, &object->driver_surface);
1605 if (res != VK_SUCCESS)
1607 if (dummy) NtUserDestroyWindow(dummy);
1608 free(object);
1609 return res;
1612 object->host_surface = vk_funcs->p_wine_get_host_surface(object->driver_surface);
1613 if (dummy) NtUserDestroyWindow(dummy);
1614 window_surfaces_insert(object);
1616 *surface = wine_surface_to_handle(object);
1617 add_handle_mapping(instance, *surface, object->host_surface, &object->wrapper_entry);
1618 return VK_SUCCESS;
1621 void wine_vkDestroySurfaceKHR(VkInstance handle, VkSurfaceKHR surface,
1622 const VkAllocationCallbacks *allocator)
1624 struct wine_instance *instance = wine_instance_from_handle(handle);
1625 struct wine_surface *object = wine_surface_from_handle(surface);
1627 if (!object)
1628 return;
1630 instance->funcs.p_vkDestroySurfaceKHR(instance->host_instance, object->driver_surface, NULL);
1631 remove_handle_mapping(instance, &object->wrapper_entry);
1632 window_surfaces_remove(object);
1634 free(object);
1637 static BOOL extents_equals(const VkExtent2D *extents, const RECT *rect)
1639 return extents->width == rect->right - rect->left &&
1640 extents->height == rect->bottom - rect->top;
1643 VkResult wine_vkAcquireNextImage2KHR(VkDevice device_handle, const VkAcquireNextImageInfoKHR *acquire_info,
1644 uint32_t *image_index)
1646 struct wine_swapchain *swapchain = wine_swapchain_from_handle(acquire_info->swapchain);
1647 struct wine_device *device = wine_device_from_handle(device_handle);
1648 VkAcquireNextImageInfoKHR acquire_info_host = *acquire_info;
1649 struct wine_surface *surface = swapchain->surface;
1650 RECT client_rect;
1651 VkResult res;
1653 acquire_info_host.swapchain = swapchain->host_swapchain;
1654 res = device->funcs.p_vkAcquireNextImage2KHR(device->host_device, &acquire_info_host, image_index);
1656 if (res == VK_SUCCESS && NtUserGetClientRect(surface->hwnd, &client_rect, get_win_monitor_dpi(surface->hwnd)) &&
1657 !extents_equals(&swapchain->extents, &client_rect))
1659 WARN("Swapchain size %dx%d does not match client rect %s, returning VK_SUBOPTIMAL_KHR\n",
1660 swapchain->extents.width, swapchain->extents.height, wine_dbgstr_rect(&client_rect));
1661 return VK_SUBOPTIMAL_KHR;
1664 return res;
1667 VkResult wine_vkAcquireNextImageKHR(VkDevice device_handle, VkSwapchainKHR swapchain_handle, uint64_t timeout,
1668 VkSemaphore semaphore, VkFence fence, uint32_t *image_index)
1670 struct wine_swapchain *swapchain = wine_swapchain_from_handle(swapchain_handle);
1671 struct wine_device *device = wine_device_from_handle(device_handle);
1672 struct wine_surface *surface = swapchain->surface;
1673 RECT client_rect;
1674 VkResult res;
1676 res = device->funcs.p_vkAcquireNextImageKHR(device->host_device, swapchain->host_swapchain, timeout,
1677 semaphore, fence, image_index);
1679 if (res == VK_SUCCESS && NtUserGetClientRect(surface->hwnd, &client_rect, get_win_monitor_dpi(surface->hwnd)) &&
1680 !extents_equals(&swapchain->extents, &client_rect))
1682 WARN("Swapchain size %dx%d does not match client rect %s, returning VK_SUBOPTIMAL_KHR\n",
1683 swapchain->extents.width, swapchain->extents.height, wine_dbgstr_rect(&client_rect));
1684 return VK_SUBOPTIMAL_KHR;
1687 return res;
1690 VkResult wine_vkCreateSwapchainKHR(VkDevice device_handle, const VkSwapchainCreateInfoKHR *create_info,
1691 const VkAllocationCallbacks *allocator, VkSwapchainKHR *swapchain_handle)
1693 struct wine_swapchain *object, *old_swapchain = wine_swapchain_from_handle(create_info->oldSwapchain);
1694 struct wine_surface *surface = wine_surface_from_handle(create_info->surface);
1695 struct wine_device *device = wine_device_from_handle(device_handle);
1696 struct wine_phys_dev *physical_device = device->phys_dev;
1697 struct wine_instance *instance = physical_device->instance;
1698 VkSwapchainCreateInfoKHR create_info_host = *create_info;
1699 VkSurfaceCapabilitiesKHR capabilities;
1700 VkResult res;
1702 if (!NtUserIsWindow(surface->hwnd))
1704 ERR("surface %p, hwnd %p is invalid!\n", surface, surface->hwnd);
1705 return VK_ERROR_INITIALIZATION_FAILED;
1708 if (surface) create_info_host.surface = surface->host_surface;
1709 if (old_swapchain) create_info_host.oldSwapchain = old_swapchain->host_swapchain;
1711 /* Windows allows client rect to be empty, but host Vulkan often doesn't, adjust extents back to the host capabilities */
1712 res = instance->funcs.p_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device->host_physical_device,
1713 surface->host_surface, &capabilities);
1714 if (res != VK_SUCCESS) return res;
1716 create_info_host.imageExtent.width = max(create_info_host.imageExtent.width, capabilities.minImageExtent.width);
1717 create_info_host.imageExtent.height = max(create_info_host.imageExtent.height, capabilities.minImageExtent.height);
1719 if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY;
1720 res = device->funcs.p_vkCreateSwapchainKHR(device->host_device, &create_info_host, NULL, &object->host_swapchain);
1721 if (res != VK_SUCCESS)
1723 free(object);
1724 return res;
1727 object->surface = surface;
1728 object->extents = create_info->imageExtent;
1730 *swapchain_handle = wine_swapchain_to_handle(object);
1731 add_handle_mapping(instance, *swapchain_handle, object->host_swapchain, &object->wrapper_entry);
1732 return VK_SUCCESS;
1735 void wine_vkDestroySwapchainKHR(VkDevice device_handle, VkSwapchainKHR swapchain_handle,
1736 const VkAllocationCallbacks *allocator)
1738 struct wine_device *device = wine_device_from_handle(device_handle);
1739 struct wine_swapchain *swapchain = wine_swapchain_from_handle(swapchain_handle);
1741 if (allocator) FIXME("Support for allocation callbacks not implemented yet\n");
1742 if (!swapchain) return;
1744 device->funcs.p_vkDestroySwapchainKHR(device->host_device, swapchain->host_swapchain, NULL);
1745 remove_handle_mapping(device->phys_dev->instance, &swapchain->wrapper_entry);
1747 free(swapchain);
1750 VkResult wine_vkQueuePresentKHR(VkQueue queue_handle, const VkPresentInfoKHR *present_info)
1752 VkSwapchainKHR swapchains_buffer[16], *swapchains = swapchains_buffer;
1753 VkSurfaceKHR surfaces_buffer[ARRAY_SIZE(swapchains_buffer)], *surfaces = surfaces_buffer;
1754 struct wine_queue *queue = wine_queue_from_handle(queue_handle);
1755 VkPresentInfoKHR present_info_host = *present_info;
1756 VkResult res;
1757 UINT i;
1759 if (present_info->swapchainCount > ARRAY_SIZE(swapchains_buffer) &&
1760 (!(swapchains = malloc(present_info->swapchainCount * sizeof(*swapchains))) ||
1761 !(surfaces = malloc(present_info->swapchainCount * sizeof(*surfaces)))))
1763 free(swapchains);
1764 return VK_ERROR_OUT_OF_HOST_MEMORY;
1767 for (i = 0; i < present_info->swapchainCount; i++)
1769 struct wine_swapchain *swapchain = wine_swapchain_from_handle(present_info->pSwapchains[i]);
1770 struct wine_surface *surface = swapchain->surface;
1771 swapchains[i] = swapchain->host_swapchain;
1772 surfaces[i] = surface->driver_surface;
1775 present_info_host.pSwapchains = swapchains;
1777 res = vk_funcs->p_vkQueuePresentKHR(queue->host_queue, &present_info_host, surfaces);
1779 for (i = 0; i < present_info->swapchainCount; i++)
1781 struct wine_swapchain *swapchain = wine_swapchain_from_handle(present_info->pSwapchains[i]);
1782 VkResult swapchain_res = present_info->pResults ? present_info->pResults[i] : res;
1783 struct wine_surface *surface = swapchain->surface;
1784 RECT client_rect;
1786 if (swapchain_res < VK_SUCCESS) continue;
1787 if (!NtUserGetClientRect(surface->hwnd, &client_rect, get_win_monitor_dpi(surface->hwnd)))
1789 WARN("Swapchain window %p is invalid, returning VK_ERROR_OUT_OF_DATE_KHR\n", surface->hwnd);
1790 if (present_info->pResults) present_info->pResults[i] = VK_ERROR_OUT_OF_DATE_KHR;
1791 if (res >= VK_SUCCESS) res = VK_ERROR_OUT_OF_DATE_KHR;
1793 else if (swapchain_res != VK_SUCCESS)
1794 WARN("Present returned status %d for swapchain %p\n", swapchain_res, swapchain);
1795 else if (!extents_equals(&swapchain->extents, &client_rect))
1797 WARN("Swapchain size %dx%d does not match client rect %s, returning VK_SUBOPTIMAL_KHR\n",
1798 swapchain->extents.width, swapchain->extents.height, wine_dbgstr_rect(&client_rect));
1799 if (present_info->pResults) present_info->pResults[i] = VK_SUBOPTIMAL_KHR;
1800 if (res == VK_SUCCESS) res = VK_SUBOPTIMAL_KHR;
1804 if (swapchains != swapchains_buffer) free(swapchains);
1805 if (surfaces != surfaces_buffer) free(surfaces);
1807 if (TRACE_ON(fps))
1809 static unsigned long frames, frames_total;
1810 static long prev_time, start_time;
1811 DWORD time;
1813 time = NtGetTickCount();
1814 frames++;
1815 frames_total++;
1817 if (time - prev_time > 1500)
1819 TRACE_(fps)("%p @ approx %.2ffps, total %.2ffps\n", queue,
1820 1000.0 * frames / (time - prev_time),
1821 1000.0 * frames_total / (time - start_time));
1822 prev_time = time;
1823 frames = 0;
1825 if (!start_time) start_time = time;
1829 return res;
1832 VkResult wine_vkAllocateMemory(VkDevice handle, const VkMemoryAllocateInfo *alloc_info,
1833 const VkAllocationCallbacks *allocator, VkDeviceMemory *ret)
1835 struct wine_device *device = wine_device_from_handle(handle);
1836 struct wine_device_memory *memory;
1837 VkMemoryAllocateInfo info = *alloc_info;
1838 VkImportMemoryHostPointerInfoEXT host_pointer_info;
1839 uint32_t mem_flags;
1840 void *mapping = NULL;
1841 VkResult result;
1843 /* For host visible memory, we try to use VK_EXT_external_memory_host on wow64
1844 * to ensure that mapped pointer is 32-bit. */
1845 mem_flags = device->phys_dev->memory_properties.memoryTypes[alloc_info->memoryTypeIndex].propertyFlags;
1846 if (device->phys_dev->external_memory_align && (mem_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
1847 !find_next_struct(alloc_info->pNext, VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT))
1849 VkMemoryHostPointerPropertiesEXT props =
1851 .sType = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT,
1853 uint32_t i, align = device->phys_dev->external_memory_align - 1;
1854 SIZE_T alloc_size = info.allocationSize;
1855 static int once;
1857 if (!once++)
1858 FIXME("Using VK_EXT_external_memory_host\n");
1860 if (NtAllocateVirtualMemory(GetCurrentProcess(), &mapping, zero_bits, &alloc_size,
1861 MEM_COMMIT, PAGE_READWRITE))
1863 ERR("NtAllocateVirtualMemory failed\n");
1864 return VK_ERROR_OUT_OF_HOST_MEMORY;
1867 result = device->funcs.p_vkGetMemoryHostPointerPropertiesEXT(device->host_device,
1868 VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, mapping, &props);
1869 if (result != VK_SUCCESS)
1871 ERR("vkGetMemoryHostPointerPropertiesEXT failed: %d\n", result);
1872 return result;
1875 if (!(props.memoryTypeBits & (1u << info.memoryTypeIndex)))
1877 /* If requested memory type is not allowed to use external memory,
1878 * try to find a supported compatible type. */
1879 uint32_t mask = mem_flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1880 for (i = 0; i < device->phys_dev->memory_properties.memoryTypeCount; i++)
1882 if (!(props.memoryTypeBits & (1u << i)))
1883 continue;
1884 if ((device->phys_dev->memory_properties.memoryTypes[i].propertyFlags & mask) != mask)
1885 continue;
1887 TRACE("Memory type not compatible with host memory, using %u instead\n", i);
1888 info.memoryTypeIndex = i;
1889 break;
1891 if (i == device->phys_dev->memory_properties.memoryTypeCount)
1893 FIXME("Not found compatible memory type\n");
1894 alloc_size = 0;
1895 NtFreeVirtualMemory(GetCurrentProcess(), &mapping, &alloc_size, MEM_RELEASE);
1899 if (props.memoryTypeBits & (1u << info.memoryTypeIndex))
1901 host_pointer_info.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT;
1902 host_pointer_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
1903 host_pointer_info.pHostPointer = mapping;
1904 host_pointer_info.pNext = info.pNext;
1905 info.pNext = &host_pointer_info;
1907 info.allocationSize = (info.allocationSize + align) & ~align;
1911 if (!(memory = malloc(sizeof(*memory))))
1912 return VK_ERROR_OUT_OF_HOST_MEMORY;
1914 result = device->funcs.p_vkAllocateMemory(device->host_device, &info, NULL, &memory->host_memory);
1915 if (result != VK_SUCCESS)
1917 free(memory);
1918 return result;
1921 memory->size = info.allocationSize;
1922 memory->vm_map = mapping;
1924 *ret = (VkDeviceMemory)(uintptr_t)memory;
1925 add_handle_mapping(device->phys_dev->instance, *ret, memory->host_memory, &memory->wrapper_entry);
1926 return VK_SUCCESS;
1929 void wine_vkFreeMemory(VkDevice handle, VkDeviceMemory memory_handle, const VkAllocationCallbacks *allocator)
1931 struct wine_device *device = wine_device_from_handle(handle);
1932 struct wine_device_memory *memory;
1934 if (!memory_handle)
1935 return;
1936 memory = wine_device_memory_from_handle(memory_handle);
1938 if (memory->vm_map && !device->phys_dev->external_memory_align)
1940 const VkMemoryUnmapInfoKHR info =
1942 .sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR,
1943 .memory = memory->host_memory,
1944 .flags = VK_MEMORY_UNMAP_RESERVE_BIT_EXT,
1946 device->funcs.p_vkUnmapMemory2KHR(device->host_device, &info);
1949 device->funcs.p_vkFreeMemory(device->host_device, memory->host_memory, NULL);
1950 remove_handle_mapping(device->phys_dev->instance, &memory->wrapper_entry);
1952 if (memory->vm_map)
1954 SIZE_T alloc_size = 0;
1955 NtFreeVirtualMemory(GetCurrentProcess(), &memory->vm_map, &alloc_size, MEM_RELEASE);
1958 free(memory);
1961 VkResult wine_vkMapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset,
1962 VkDeviceSize size, VkMemoryMapFlags flags, void **data)
1964 const VkMemoryMapInfoKHR info =
1966 .sType = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR,
1967 .flags = flags,
1968 .memory = memory,
1969 .offset = offset,
1970 .size = size,
1973 return wine_vkMapMemory2KHR(device, &info, data);
1976 VkResult wine_vkMapMemory2KHR(VkDevice handle, const VkMemoryMapInfoKHR *map_info, void **data)
1978 struct wine_device *device = wine_device_from_handle(handle);
1979 struct wine_device_memory *memory = wine_device_memory_from_handle(map_info->memory);
1980 VkMemoryMapInfoKHR info = *map_info;
1981 VkMemoryMapPlacedInfoEXT placed_info =
1983 .sType = VK_STRUCTURE_TYPE_MEMORY_MAP_PLACED_INFO_EXT,
1985 VkResult result;
1987 info.memory = memory->host_memory;
1988 if (memory->vm_map)
1990 *data = (char *)memory->vm_map + info.offset;
1991 TRACE("returning %p\n", *data);
1992 return VK_SUCCESS;
1995 if (device->phys_dev->map_placed_align)
1997 SIZE_T alloc_size = memory->size;
1999 placed_info.pNext = info.pNext;
2000 info.pNext = &placed_info;
2001 info.offset = 0;
2002 info.size = VK_WHOLE_SIZE;
2003 info.flags |= VK_MEMORY_MAP_PLACED_BIT_EXT;
2005 if (NtAllocateVirtualMemory(GetCurrentProcess(), &placed_info.pPlacedAddress, zero_bits, &alloc_size,
2006 MEM_COMMIT, PAGE_READWRITE))
2008 ERR("NtAllocateVirtualMemory failed\n");
2009 return VK_ERROR_OUT_OF_HOST_MEMORY;
2013 if (device->funcs.p_vkMapMemory2KHR)
2015 result = device->funcs.p_vkMapMemory2KHR(device->host_device, &info, data);
2017 else
2019 assert(!info.pNext);
2020 result = device->funcs.p_vkMapMemory(device->host_device, info.memory, info.offset,
2021 info.size, info.flags, data);
2024 if (placed_info.pPlacedAddress)
2026 if (result != VK_SUCCESS)
2028 SIZE_T alloc_size = 0;
2029 ERR("vkMapMemory2EXT failed: %d\n", result);
2030 NtFreeVirtualMemory(GetCurrentProcess(), &placed_info.pPlacedAddress, &alloc_size, MEM_RELEASE);
2031 return result;
2033 memory->vm_map = placed_info.pPlacedAddress;
2034 *data = (char *)memory->vm_map + map_info->offset;
2035 TRACE("Using placed mapping %p\n", memory->vm_map);
2038 #ifdef _WIN64
2039 if (NtCurrentTeb()->WowTebOffset && result == VK_SUCCESS && (UINT_PTR)*data >> 32)
2041 FIXME("returned mapping %p does not fit 32-bit pointer\n", *data);
2042 device->funcs.p_vkUnmapMemory(device->host_device, memory->host_memory);
2043 *data = NULL;
2044 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2046 #endif
2048 return result;
2051 void wine_vkUnmapMemory(VkDevice device, VkDeviceMemory memory)
2053 const VkMemoryUnmapInfoKHR info =
2055 .sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR,
2056 .memory = memory,
2059 wine_vkUnmapMemory2KHR(device, &info);
2062 VkResult wine_vkUnmapMemory2KHR(VkDevice handle, const VkMemoryUnmapInfoKHR *unmap_info)
2064 struct wine_device *device = wine_device_from_handle(handle);
2065 struct wine_device_memory *memory = wine_device_memory_from_handle(unmap_info->memory);
2066 VkMemoryUnmapInfoKHR info;
2067 VkResult result;
2069 if (memory->vm_map && device->phys_dev->external_memory_align)
2070 return VK_SUCCESS;
2072 if (!device->funcs.p_vkUnmapMemory2KHR)
2074 assert(!unmap_info->pNext && !memory->vm_map);
2075 device->funcs.p_vkUnmapMemory(device->host_device, memory->host_memory);
2076 return VK_SUCCESS;
2079 info = *unmap_info;
2080 info.memory = memory->host_memory;
2081 if (memory->vm_map)
2082 info.flags |= VK_MEMORY_UNMAP_RESERVE_BIT_EXT;
2084 result = device->funcs.p_vkUnmapMemory2KHR(device->host_device, &info);
2086 if (result == VK_SUCCESS && memory->vm_map)
2088 SIZE_T size = 0;
2089 NtFreeVirtualMemory(GetCurrentProcess(), &memory->vm_map, &size, MEM_RELEASE);
2090 memory->vm_map = NULL;
2092 return result;
2095 VkResult wine_vkCreateBuffer(VkDevice handle, const VkBufferCreateInfo *create_info,
2096 const VkAllocationCallbacks *allocator, VkBuffer *buffer)
2098 struct wine_device *device = wine_device_from_handle(handle);
2099 VkExternalMemoryBufferCreateInfo external_memory_info;
2100 VkBufferCreateInfo info = *create_info;
2102 if (device->phys_dev->external_memory_align &&
2103 !find_next_struct(info.pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO))
2105 external_memory_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO;
2106 external_memory_info.pNext = info.pNext;
2107 external_memory_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
2108 info.pNext = &external_memory_info;
2111 return device->funcs.p_vkCreateBuffer(device->host_device, &info, NULL, buffer);
2114 VkResult wine_vkCreateImage(VkDevice handle, const VkImageCreateInfo *create_info,
2115 const VkAllocationCallbacks *allocator, VkImage *image)
2117 struct wine_device *device = wine_device_from_handle(handle);
2118 VkExternalMemoryImageCreateInfo external_memory_info;
2119 VkImageCreateInfo info = *create_info;
2121 if (device->phys_dev->external_memory_align &&
2122 !find_next_struct(info.pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO))
2124 external_memory_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
2125 external_memory_info.pNext = info.pNext;
2126 external_memory_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
2127 info.pNext = &external_memory_info;
2130 return device->funcs.p_vkCreateImage(device->host_device, &info, NULL, image);
2133 static void adjust_surface_capabilities(struct wine_instance *instance, struct wine_surface *surface,
2134 VkSurfaceCapabilitiesKHR *capabilities)
2136 RECT client_rect;
2138 /* Many Windows games, for example Strange Brigade, No Man's Sky, Path of Exile
2139 * and World War Z, do not expect that maxImageCount can be set to 0.
2140 * A value of 0 means that there is no limit on the number of images.
2141 * Nvidia reports 8 on Windows, AMD 16.
2142 * https://vulkan.gpuinfo.org/displayreport.php?id=9122#surface
2143 * https://vulkan.gpuinfo.org/displayreport.php?id=9121#surface
2145 if (!capabilities->maxImageCount)
2146 capabilities->maxImageCount = max(capabilities->minImageCount, 16);
2148 /* Update the image extents to match what the Win32 WSI would provide. */
2149 /* FIXME: handle DPI scaling, somehow */
2150 NtUserGetClientRect(surface->hwnd, &client_rect, get_win_monitor_dpi(surface->hwnd));
2151 capabilities->minImageExtent.width = client_rect.right - client_rect.left;
2152 capabilities->minImageExtent.height = client_rect.bottom - client_rect.top;
2153 capabilities->maxImageExtent.width = client_rect.right - client_rect.left;
2154 capabilities->maxImageExtent.height = client_rect.bottom - client_rect.top;
2155 capabilities->currentExtent.width = client_rect.right - client_rect.left;
2156 capabilities->currentExtent.height = client_rect.bottom - client_rect.top;
2159 VkResult wine_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice device_handle, VkSurfaceKHR surface_handle,
2160 VkSurfaceCapabilitiesKHR *capabilities)
2162 struct wine_phys_dev *physical_device = wine_phys_dev_from_handle(device_handle);
2163 struct wine_surface *surface = wine_surface_from_handle(surface_handle);
2164 struct wine_instance *instance = physical_device->instance;
2165 VkResult res;
2167 if (!NtUserIsWindow(surface->hwnd)) return VK_ERROR_SURFACE_LOST_KHR;
2168 res = instance->funcs.p_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device->host_physical_device,
2169 surface->host_surface, capabilities);
2170 if (res == VK_SUCCESS) adjust_surface_capabilities(instance, surface, capabilities);
2171 return res;
2174 VkResult wine_vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice device_handle, const VkPhysicalDeviceSurfaceInfo2KHR *surface_info,
2175 VkSurfaceCapabilities2KHR *capabilities)
2177 struct wine_phys_dev *physical_device = wine_phys_dev_from_handle(device_handle);
2178 struct wine_surface *surface = wine_surface_from_handle(surface_info->surface);
2179 VkPhysicalDeviceSurfaceInfo2KHR surface_info_host = *surface_info;
2180 struct wine_instance *instance = physical_device->instance;
2181 VkResult res;
2183 if (!instance->funcs.p_vkGetPhysicalDeviceSurfaceCapabilities2KHR)
2185 /* Until the loader version exporting this function is common, emulate it using the older non-2 version. */
2186 if (surface_info->pNext || capabilities->pNext) FIXME("Emulating vkGetPhysicalDeviceSurfaceCapabilities2KHR, ignoring pNext.\n");
2187 return wine_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device_handle, surface_info->surface,
2188 &capabilities->surfaceCapabilities);
2191 surface_info_host.surface = surface->host_surface;
2193 if (!NtUserIsWindow(surface->hwnd)) return VK_ERROR_SURFACE_LOST_KHR;
2194 res = instance->funcs.p_vkGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device->host_physical_device,
2195 &surface_info_host, capabilities);
2196 if (res == VK_SUCCESS) adjust_surface_capabilities(instance, surface, &capabilities->surfaceCapabilities);
2197 return res;
2200 VkResult wine_vkGetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice device_handle, VkSurfaceKHR surface_handle,
2201 uint32_t *rect_count, VkRect2D *rects)
2203 struct wine_phys_dev *physical_device = wine_phys_dev_from_handle(device_handle);
2204 struct wine_surface *surface = wine_surface_from_handle(surface_handle);
2205 struct wine_instance *instance = physical_device->instance;
2207 if (!NtUserIsWindow(surface->hwnd))
2209 if (rects && !*rect_count) return VK_INCOMPLETE;
2210 if (rects) memset(rects, 0, sizeof(VkRect2D));
2211 *rect_count = 1;
2212 return VK_SUCCESS;
2215 return instance->funcs.p_vkGetPhysicalDevicePresentRectanglesKHR(physical_device->host_physical_device,
2216 surface->host_surface, rect_count, rects);
2219 VkResult wine_vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice device_handle, VkSurfaceKHR surface_handle,
2220 uint32_t *format_count, VkSurfaceFormatKHR *formats)
2222 struct wine_phys_dev *physical_device = wine_phys_dev_from_handle(device_handle);
2223 struct wine_surface *surface = wine_surface_from_handle(surface_handle);
2224 struct wine_instance *instance = physical_device->instance;
2226 return instance->funcs.p_vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device->host_physical_device, surface->host_surface,
2227 format_count, formats);
2230 VkResult wine_vkGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice device_handle, const VkPhysicalDeviceSurfaceInfo2KHR *surface_info,
2231 uint32_t *format_count, VkSurfaceFormat2KHR *formats)
2233 struct wine_phys_dev *physical_device = wine_phys_dev_from_handle(device_handle);
2234 struct wine_surface *surface = wine_surface_from_handle(surface_info->surface);
2235 VkPhysicalDeviceSurfaceInfo2KHR surface_info_host = *surface_info;
2236 struct wine_instance *instance = physical_device->instance;
2237 VkResult res;
2239 if (!physical_device->instance->funcs.p_vkGetPhysicalDeviceSurfaceFormats2KHR)
2241 VkSurfaceFormatKHR *surface_formats;
2242 UINT i;
2244 /* Until the loader version exporting this function is common, emulate it using the older non-2 version. */
2245 if (surface_info->pNext) FIXME("Emulating vkGetPhysicalDeviceSurfaceFormats2KHR, ignoring pNext.\n");
2247 if (!formats) return wine_vkGetPhysicalDeviceSurfaceFormatsKHR(device_handle, surface_info->surface, format_count, NULL);
2249 surface_formats = calloc(*format_count, sizeof(*surface_formats));
2250 if (!surface_formats) return VK_ERROR_OUT_OF_HOST_MEMORY;
2252 res = wine_vkGetPhysicalDeviceSurfaceFormatsKHR(device_handle, surface_info->surface, format_count, surface_formats);
2253 if (res == VK_SUCCESS || res == VK_INCOMPLETE)
2255 for (i = 0; i < *format_count; i++)
2256 formats[i].surfaceFormat = surface_formats[i];
2259 free(surface_formats);
2260 return res;
2263 surface_info_host.surface = surface->host_surface;
2265 return instance->funcs.p_vkGetPhysicalDeviceSurfaceFormats2KHR(physical_device->host_physical_device,
2266 &surface_info_host, format_count, formats);
2269 VkResult wine_vkCreateDebugUtilsMessengerEXT(VkInstance handle,
2270 const VkDebugUtilsMessengerCreateInfoEXT *create_info,
2271 const VkAllocationCallbacks *allocator,
2272 VkDebugUtilsMessengerEXT *messenger)
2274 struct wine_instance *instance = wine_instance_from_handle(handle);
2275 VkDebugUtilsMessengerCreateInfoEXT wine_create_info;
2276 struct wine_debug_utils_messenger *object;
2277 VkResult res;
2279 if (allocator)
2280 FIXME("Support for allocation callbacks not implemented yet\n");
2282 if (!(object = calloc(1, sizeof(*object))))
2283 return VK_ERROR_OUT_OF_HOST_MEMORY;
2285 object->instance = instance;
2286 object->user_callback = create_info->pfnUserCallback;
2287 object->user_data = create_info->pUserData;
2289 wine_create_info = *create_info;
2291 wine_create_info.pfnUserCallback = (void *) &debug_utils_callback_conversion;
2292 wine_create_info.pUserData = object;
2294 res = instance->funcs.p_vkCreateDebugUtilsMessengerEXT(instance->host_instance, &wine_create_info,
2295 NULL, &object->host_debug_messenger);
2296 if (res != VK_SUCCESS)
2298 free(object);
2299 return res;
2302 *messenger = wine_debug_utils_messenger_to_handle(object);
2303 add_handle_mapping(instance, *messenger, object->host_debug_messenger, &object->wrapper_entry);
2304 return VK_SUCCESS;
2307 void wine_vkDestroyDebugUtilsMessengerEXT(VkInstance handle, VkDebugUtilsMessengerEXT messenger,
2308 const VkAllocationCallbacks *allocator)
2310 struct wine_instance *instance = wine_instance_from_handle(handle);
2311 struct wine_debug_utils_messenger *object;
2313 object = wine_debug_utils_messenger_from_handle(messenger);
2315 if (!object)
2316 return;
2318 instance->funcs.p_vkDestroyDebugUtilsMessengerEXT(instance->host_instance, object->host_debug_messenger, NULL);
2319 remove_handle_mapping(instance, &object->wrapper_entry);
2321 free(object);
2324 VkResult wine_vkCreateDebugReportCallbackEXT(VkInstance handle,
2325 const VkDebugReportCallbackCreateInfoEXT *create_info,
2326 const VkAllocationCallbacks *allocator,
2327 VkDebugReportCallbackEXT *callback)
2329 struct wine_instance *instance = wine_instance_from_handle(handle);
2330 VkDebugReportCallbackCreateInfoEXT wine_create_info;
2331 struct wine_debug_report_callback *object;
2332 VkResult res;
2334 if (allocator)
2335 FIXME("Support for allocation callbacks not implemented yet\n");
2337 if (!(object = calloc(1, sizeof(*object))))
2338 return VK_ERROR_OUT_OF_HOST_MEMORY;
2340 object->instance = instance;
2341 object->user_callback = create_info->pfnCallback;
2342 object->user_data = create_info->pUserData;
2344 wine_create_info = *create_info;
2346 wine_create_info.pfnCallback = (void *) debug_report_callback_conversion;
2347 wine_create_info.pUserData = object;
2349 res = instance->funcs.p_vkCreateDebugReportCallbackEXT(instance->host_instance, &wine_create_info,
2350 NULL, &object->host_debug_callback);
2351 if (res != VK_SUCCESS)
2353 free(object);
2354 return res;
2357 *callback = wine_debug_report_callback_to_handle(object);
2358 add_handle_mapping(instance, *callback, object->host_debug_callback, &object->wrapper_entry);
2359 return VK_SUCCESS;
2362 void wine_vkDestroyDebugReportCallbackEXT(VkInstance handle, VkDebugReportCallbackEXT callback,
2363 const VkAllocationCallbacks *allocator)
2365 struct wine_instance *instance = wine_instance_from_handle(handle);
2366 struct wine_debug_report_callback *object;
2368 object = wine_debug_report_callback_from_handle(callback);
2370 if (!object)
2371 return;
2373 instance->funcs.p_vkDestroyDebugReportCallbackEXT(instance->host_instance, object->host_debug_callback, NULL);
2374 remove_handle_mapping(instance, &object->wrapper_entry);
2376 free(object);
2379 VkResult wine_vkCreateDeferredOperationKHR(VkDevice handle,
2380 const VkAllocationCallbacks* allocator,
2381 VkDeferredOperationKHR* operation)
2383 struct wine_device *device = wine_device_from_handle(handle);
2384 struct wine_deferred_operation *object;
2385 VkResult res;
2387 if (allocator)
2388 FIXME("Support for allocation callbacks not implemented yet\n");
2390 if (!(object = calloc(1, sizeof(*object))))
2391 return VK_ERROR_OUT_OF_HOST_MEMORY;
2393 res = device->funcs.p_vkCreateDeferredOperationKHR(device->host_device, NULL, &object->host_deferred_operation);
2395 if (res != VK_SUCCESS)
2397 free(object);
2398 return res;
2401 init_conversion_context(&object->ctx);
2403 *operation = wine_deferred_operation_to_handle(object);
2404 add_handle_mapping(device->phys_dev->instance, *operation, object->host_deferred_operation, &object->wrapper_entry);
2405 return VK_SUCCESS;
2408 void wine_vkDestroyDeferredOperationKHR(VkDevice handle,
2409 VkDeferredOperationKHR operation,
2410 const VkAllocationCallbacks* allocator)
2412 struct wine_device *device = wine_device_from_handle(handle);
2413 struct wine_deferred_operation *object;
2415 object = wine_deferred_operation_from_handle(operation);
2417 if (!object)
2418 return;
2420 device->funcs.p_vkDestroyDeferredOperationKHR(device->host_device, object->host_deferred_operation, NULL);
2421 remove_handle_mapping(device->phys_dev->instance, &object->wrapper_entry);
2423 free_conversion_context(&object->ctx);
2424 free(object);
2427 #ifdef _WIN64
2429 NTSTATUS vk_is_available_instance_function(void *arg)
2431 struct is_available_instance_function_params *params = arg;
2432 struct wine_instance *instance = wine_instance_from_handle(params->instance);
2434 if (!strcmp(params->name, "vkCreateWin32SurfaceKHR"))
2435 return instance->enable_win32_surface;
2436 if (!strcmp(params->name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
2437 return instance->enable_win32_surface;
2439 return !!vk_funcs->p_vkGetInstanceProcAddr(instance->host_instance, params->name);
2442 NTSTATUS vk_is_available_device_function(void *arg)
2444 struct is_available_device_function_params *params = arg;
2445 struct wine_device *device = wine_device_from_handle(params->device);
2446 return !!vk_funcs->p_vkGetDeviceProcAddr(device->host_device, params->name);
2449 #endif /* _WIN64 */
2451 NTSTATUS vk_is_available_instance_function32(void *arg)
2453 struct
2455 UINT32 instance;
2456 UINT32 name;
2457 } *params = arg;
2458 struct wine_instance *instance = wine_instance_from_handle(UlongToPtr(params->instance));
2459 return !!vk_funcs->p_vkGetInstanceProcAddr(instance->host_instance, UlongToPtr(params->name));
2462 NTSTATUS vk_is_available_device_function32(void *arg)
2464 struct
2466 UINT32 device;
2467 UINT32 name;
2468 } *params = arg;
2469 struct wine_device *device = wine_device_from_handle(UlongToPtr(params->device));
2470 return !!vk_funcs->p_vkGetDeviceProcAddr(device->host_device, UlongToPtr(params->name));