spapr: Don't allow multiple active vCPUs at CAS
[qemu/ar7.git] / contrib / vhost-user-gpu / vugbm.c
blob9c357b63993bc8ee0c4e28cb262f4b921f597342
1 /*
2 * Virtio vhost-user GPU Device
4 * DRM helpers
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 */
10 #include "vugbm.h"
12 static bool
13 mem_alloc_bo(struct vugbm_buffer *buf)
15 buf->mmap = g_malloc(buf->width * buf->height * 4);
16 buf->stride = buf->width * 4;
17 return true;
20 static void
21 mem_free_bo(struct vugbm_buffer *buf)
23 g_free(buf->mmap);
26 static bool
27 mem_map_bo(struct vugbm_buffer *buf)
29 return buf->mmap != NULL;
32 static void
33 mem_unmap_bo(struct vugbm_buffer *buf)
37 static void
38 mem_device_destroy(struct vugbm_device *dev)
42 #ifdef CONFIG_MEMFD
43 struct udmabuf_create {
44 uint32_t memfd;
45 uint32_t flags;
46 uint64_t offset;
47 uint64_t size;
50 #define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
52 static size_t
53 udmabuf_get_size(struct vugbm_buffer *buf)
55 return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size);
58 static bool
59 udmabuf_alloc_bo(struct vugbm_buffer *buf)
61 int ret;
63 buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
64 if (buf->memfd < 0) {
65 return false;
68 ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
69 if (ret < 0) {
70 close(buf->memfd);
71 return false;
74 ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
75 if (ret < 0) {
76 close(buf->memfd);
77 return false;
80 buf->stride = buf->width * 4;
82 return true;
85 static void
86 udmabuf_free_bo(struct vugbm_buffer *buf)
88 close(buf->memfd);
91 static bool
92 udmabuf_map_bo(struct vugbm_buffer *buf)
94 buf->mmap = mmap(NULL, udmabuf_get_size(buf),
95 PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
96 if (buf->mmap == MAP_FAILED) {
97 return false;
100 return true;
103 static bool
104 udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
106 struct udmabuf_create create = {
107 .memfd = buf->memfd,
108 .offset = 0,
109 .size = udmabuf_get_size(buf),
112 *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
114 return *fd >= 0;
117 static void
118 udmabuf_unmap_bo(struct vugbm_buffer *buf)
120 munmap(buf->mmap, udmabuf_get_size(buf));
123 static void
124 udmabuf_device_destroy(struct vugbm_device *dev)
126 close(dev->fd);
128 #endif
130 #ifdef CONFIG_GBM
131 static bool
132 alloc_bo(struct vugbm_buffer *buf)
134 struct gbm_device *dev = buf->dev->dev;
136 assert(!buf->bo);
138 buf->bo = gbm_bo_create(dev, buf->width, buf->height,
139 buf->format,
140 GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
142 if (buf->bo) {
143 buf->stride = gbm_bo_get_stride(buf->bo);
144 return true;
147 return false;
150 static void
151 free_bo(struct vugbm_buffer *buf)
153 gbm_bo_destroy(buf->bo);
156 static bool
157 map_bo(struct vugbm_buffer *buf)
159 uint32_t stride;
161 buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
162 GBM_BO_TRANSFER_READ_WRITE, &stride,
163 &buf->mmap_data);
165 assert(stride == buf->stride);
167 return buf->mmap != NULL;
170 static void
171 unmap_bo(struct vugbm_buffer *buf)
173 gbm_bo_unmap(buf->bo, buf->mmap_data);
176 static bool
177 get_fd(struct vugbm_buffer *buf, int *fd)
179 *fd = gbm_bo_get_fd(buf->bo);
181 return *fd >= 0;
184 static void
185 device_destroy(struct vugbm_device *dev)
187 gbm_device_destroy(dev->dev);
189 #endif
191 void
192 vugbm_device_destroy(struct vugbm_device *dev)
194 if (!dev->inited) {
195 return;
198 dev->device_destroy(dev);
201 bool
202 vugbm_device_init(struct vugbm_device *dev, int fd)
204 dev->fd = fd;
206 #ifdef CONFIG_GBM
207 dev->dev = gbm_create_device(fd);
208 #endif
210 if (0) {
211 /* nothing */
213 #ifdef CONFIG_GBM
214 else if (dev->dev != NULL) {
215 dev->alloc_bo = alloc_bo;
216 dev->free_bo = free_bo;
217 dev->get_fd = get_fd;
218 dev->map_bo = map_bo;
219 dev->unmap_bo = unmap_bo;
220 dev->device_destroy = device_destroy;
222 #endif
223 #ifdef CONFIG_MEMFD
224 else if (g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
225 dev->fd = open("/dev/udmabuf", O_RDWR);
226 if (dev->fd < 0) {
227 return false;
229 g_debug("Using experimental udmabuf backend");
230 dev->alloc_bo = udmabuf_alloc_bo;
231 dev->free_bo = udmabuf_free_bo;
232 dev->get_fd = udmabuf_get_fd;
233 dev->map_bo = udmabuf_map_bo;
234 dev->unmap_bo = udmabuf_unmap_bo;
235 dev->device_destroy = udmabuf_device_destroy;
237 #endif
238 else {
239 g_debug("Using mem fallback");
240 dev->alloc_bo = mem_alloc_bo;
241 dev->free_bo = mem_free_bo;
242 dev->map_bo = mem_map_bo;
243 dev->unmap_bo = mem_unmap_bo;
244 dev->device_destroy = mem_device_destroy;
245 return false;
248 dev->inited = true;
249 return true;
252 static bool
253 vugbm_buffer_map(struct vugbm_buffer *buf)
255 struct vugbm_device *dev = buf->dev;
257 return dev->map_bo(buf);
260 static void
261 vugbm_buffer_unmap(struct vugbm_buffer *buf)
263 struct vugbm_device *dev = buf->dev;
265 dev->unmap_bo(buf);
268 bool
269 vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
271 if (!buffer->dev->get_fd) {
272 return false;
275 return true;
278 bool
279 vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
281 if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
282 !buffer->dev->get_fd(buffer, fd)) {
283 g_warning("Failed to get dmabuf");
284 return false;
287 if (*fd < 0) {
288 g_warning("error: dmabuf_fd < 0");
289 return false;
292 return true;
295 bool
296 vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
297 uint32_t width, uint32_t height)
299 buffer->dev = dev;
300 buffer->width = width;
301 buffer->height = height;
302 buffer->format = GBM_FORMAT_XRGB8888;
303 buffer->stride = 0; /* modified during alloc */
304 if (!dev->alloc_bo(buffer)) {
305 g_warning("alloc_bo failed");
306 return false;
309 if (!vugbm_buffer_map(buffer)) {
310 g_warning("map_bo failed");
311 goto err;
314 return true;
316 err:
317 dev->free_bo(buffer);
318 return false;
321 void
322 vugbm_buffer_destroy(struct vugbm_buffer *buffer)
324 struct vugbm_device *dev = buffer->dev;
326 vugbm_buffer_unmap(buffer);
327 dev->free_bo(buffer);