virtio-gpu: fix crashes upon warm reboot with vga mode
[qemu/ar7.git] / contrib / ivshmem-server / ivshmem-server.c
blobe2f295bd439e5a500939527f8a524bd36da552bc
1 /*
2 * Copyright 6WIND S.A., 2014
4 * This work is licensed under the terms of the GNU GPL, version 2 or
5 * (at your option) any later version. See the COPYING file in the
6 * top-level directory.
7 */
8 #include "qemu/osdep.h"
9 #include "qemu-common.h"
10 #include "qemu/host-utils.h"
11 #include "qemu/sockets.h"
13 #include <sys/socket.h>
14 #include <sys/un.h>
16 #include "ivshmem-server.h"
18 /* log a message on stdout if verbose=1 */
19 #define IVSHMEM_SERVER_DEBUG(server, fmt, ...) do { \
20 if ((server)->verbose) { \
21 printf(fmt, ## __VA_ARGS__); \
22 } \
23 } while (0)
25 /** maximum size of a huge page, used by ivshmem_server_ftruncate() */
26 #define IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE (1024 * 1024 * 1024)
28 /** default listen backlog (number of sockets not accepted) */
29 #define IVSHMEM_SERVER_LISTEN_BACKLOG 10
31 /* send message to a client unix socket */
32 static int
33 ivshmem_server_send_one_msg(int sock_fd, int64_t peer_id, int fd)
35 int ret;
36 struct msghdr msg;
37 struct iovec iov[1];
38 union {
39 struct cmsghdr cmsg;
40 char control[CMSG_SPACE(sizeof(int))];
41 } msg_control;
42 struct cmsghdr *cmsg;
44 peer_id = GINT64_TO_LE(peer_id);
45 iov[0].iov_base = &peer_id;
46 iov[0].iov_len = sizeof(peer_id);
48 memset(&msg, 0, sizeof(msg));
49 msg.msg_iov = iov;
50 msg.msg_iovlen = 1;
52 /* if fd is specified, add it in a cmsg */
53 if (fd >= 0) {
54 memset(&msg_control, 0, sizeof(msg_control));
55 msg.msg_control = &msg_control;
56 msg.msg_controllen = sizeof(msg_control);
57 cmsg = CMSG_FIRSTHDR(&msg);
58 cmsg->cmsg_level = SOL_SOCKET;
59 cmsg->cmsg_type = SCM_RIGHTS;
60 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
61 memcpy(CMSG_DATA(cmsg), &fd, sizeof(fd));
64 ret = sendmsg(sock_fd, &msg, 0);
65 if (ret <= 0) {
66 return -1;
69 return 0;
72 /* free a peer when the server advertises a disconnection or when the
73 * server is freed */
74 static void
75 ivshmem_server_free_peer(IvshmemServer *server, IvshmemServerPeer *peer)
77 unsigned vector;
78 IvshmemServerPeer *other_peer;
80 IVSHMEM_SERVER_DEBUG(server, "free peer %" PRId64 "\n", peer->id);
81 close(peer->sock_fd);
82 QTAILQ_REMOVE(&server->peer_list, peer, next);
84 /* advertise the deletion to other peers */
85 QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
86 ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id, -1);
89 for (vector = 0; vector < peer->vectors_count; vector++) {
90 event_notifier_cleanup(&peer->vectors[vector]);
93 g_free(peer);
96 /* send the peer id and the shm_fd just after a new client connection */
97 static int
98 ivshmem_server_send_initial_info(IvshmemServer *server, IvshmemServerPeer *peer)
100 int ret;
102 /* send our protocol version first */
103 ret = ivshmem_server_send_one_msg(peer->sock_fd, IVSHMEM_PROTOCOL_VERSION,
104 -1);
105 if (ret < 0) {
106 IVSHMEM_SERVER_DEBUG(server, "cannot send version: %s\n",
107 strerror(errno));
108 return -1;
111 /* send the peer id to the client */
112 ret = ivshmem_server_send_one_msg(peer->sock_fd, peer->id, -1);
113 if (ret < 0) {
114 IVSHMEM_SERVER_DEBUG(server, "cannot send peer id: %s\n",
115 strerror(errno));
116 return -1;
119 /* send the shm_fd */
120 ret = ivshmem_server_send_one_msg(peer->sock_fd, -1, server->shm_fd);
121 if (ret < 0) {
122 IVSHMEM_SERVER_DEBUG(server, "cannot send shm fd: %s\n",
123 strerror(errno));
124 return -1;
127 return 0;
130 /* handle message on listening unix socket (new client connection) */
131 static int
132 ivshmem_server_handle_new_conn(IvshmemServer *server)
134 IvshmemServerPeer *peer, *other_peer;
135 struct sockaddr_un unaddr;
136 socklen_t unaddr_len;
137 int newfd;
138 unsigned i;
140 /* accept the incoming connection */
141 unaddr_len = sizeof(unaddr);
142 newfd = qemu_accept(server->sock_fd,
143 (struct sockaddr *)&unaddr, &unaddr_len);
145 if (newfd < 0) {
146 IVSHMEM_SERVER_DEBUG(server, "cannot accept() %s\n", strerror(errno));
147 return -1;
150 qemu_set_nonblock(newfd);
151 IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd);
153 /* allocate new structure for this peer */
154 peer = g_malloc0(sizeof(*peer));
155 peer->sock_fd = newfd;
157 /* get an unused peer id */
158 /* XXX: this could use id allocation such as Linux IDA, or simply
159 * a free-list */
160 for (i = 0; i < G_MAXUINT16; i++) {
161 if (ivshmem_server_search_peer(server, server->cur_id) == NULL) {
162 break;
164 server->cur_id++;
166 if (i == G_MAXUINT16) {
167 IVSHMEM_SERVER_DEBUG(server, "cannot allocate new client id\n");
168 close(newfd);
169 g_free(peer);
170 return -1;
172 peer->id = server->cur_id++;
174 /* create eventfd, one per vector */
175 peer->vectors_count = server->n_vectors;
176 for (i = 0; i < peer->vectors_count; i++) {
177 if (event_notifier_init(&peer->vectors[i], FALSE) < 0) {
178 IVSHMEM_SERVER_DEBUG(server, "cannot create eventfd\n");
179 goto fail;
183 /* send peer id and shm fd */
184 if (ivshmem_server_send_initial_info(server, peer) < 0) {
185 IVSHMEM_SERVER_DEBUG(server, "cannot send initial info\n");
186 goto fail;
189 /* advertise the new peer to others */
190 QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
191 for (i = 0; i < peer->vectors_count; i++) {
192 ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id,
193 peer->vectors[i].wfd);
197 /* advertise the other peers to the new one */
198 QTAILQ_FOREACH(other_peer, &server->peer_list, next) {
199 for (i = 0; i < peer->vectors_count; i++) {
200 ivshmem_server_send_one_msg(peer->sock_fd, other_peer->id,
201 other_peer->vectors[i].wfd);
205 /* advertise the new peer to itself */
206 for (i = 0; i < peer->vectors_count; i++) {
207 ivshmem_server_send_one_msg(peer->sock_fd, peer->id,
208 event_notifier_get_fd(&peer->vectors[i]));
211 QTAILQ_INSERT_TAIL(&server->peer_list, peer, next);
212 IVSHMEM_SERVER_DEBUG(server, "new peer id = %" PRId64 "\n",
213 peer->id);
214 return 0;
216 fail:
217 while (i--) {
218 event_notifier_cleanup(&peer->vectors[i]);
220 close(newfd);
221 g_free(peer);
222 return -1;
225 /* Try to ftruncate a file to next power of 2 of shmsize.
226 * If it fails; all power of 2 above shmsize are tested until
227 * we reach the maximum huge page size. This is useful
228 * if the shm file is in a hugetlbfs that cannot be truncated to the
229 * shm_size value. */
230 static int
231 ivshmem_server_ftruncate(int fd, unsigned shmsize)
233 int ret;
234 struct stat mapstat;
236 /* align shmsize to next power of 2 */
237 shmsize = pow2ceil(shmsize);
239 if (fstat(fd, &mapstat) != -1 && mapstat.st_size == shmsize) {
240 return 0;
243 while (shmsize <= IVSHMEM_SERVER_MAX_HUGEPAGE_SIZE) {
244 ret = ftruncate(fd, shmsize);
245 if (ret == 0) {
246 return ret;
248 shmsize *= 2;
251 return -1;
254 /* Init a new ivshmem server */
256 ivshmem_server_init(IvshmemServer *server, const char *unix_sock_path,
257 const char *shm_path, bool use_shm_open,
258 size_t shm_size, unsigned n_vectors,
259 bool verbose)
261 int ret;
263 memset(server, 0, sizeof(*server));
264 server->verbose = verbose;
266 ret = snprintf(server->unix_sock_path, sizeof(server->unix_sock_path),
267 "%s", unix_sock_path);
268 if (ret < 0 || ret >= sizeof(server->unix_sock_path)) {
269 IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
270 return -1;
272 ret = snprintf(server->shm_path, sizeof(server->shm_path),
273 "%s", shm_path);
274 if (ret < 0 || ret >= sizeof(server->shm_path)) {
275 IVSHMEM_SERVER_DEBUG(server, "could not copy shm path\n");
276 return -1;
279 server->use_shm_open = use_shm_open;
280 server->shm_size = shm_size;
281 server->n_vectors = n_vectors;
283 QTAILQ_INIT(&server->peer_list);
285 return 0;
288 /* open shm, create and bind to the unix socket */
290 ivshmem_server_start(IvshmemServer *server)
292 struct sockaddr_un sun;
293 int shm_fd, sock_fd, ret;
295 /* open shm file */
296 if (server->use_shm_open) {
297 IVSHMEM_SERVER_DEBUG(server, "Using POSIX shared memory: %s\n",
298 server->shm_path);
299 shm_fd = shm_open(server->shm_path, O_CREAT | O_RDWR, S_IRWXU);
300 } else {
301 gchar *filename = g_strdup_printf("%s/ivshmem.XXXXXX", server->shm_path);
302 IVSHMEM_SERVER_DEBUG(server, "Using file-backed shared memory: %s\n",
303 server->shm_path);
304 shm_fd = mkstemp(filename);
305 unlink(filename);
306 g_free(filename);
309 if (shm_fd < 0) {
310 fprintf(stderr, "cannot open shm file %s: %s\n", server->shm_path,
311 strerror(errno));
312 return -1;
314 if (ivshmem_server_ftruncate(shm_fd, server->shm_size) < 0) {
315 fprintf(stderr, "ftruncate(%s) failed: %s\n", server->shm_path,
316 strerror(errno));
317 goto err_close_shm;
320 IVSHMEM_SERVER_DEBUG(server, "create & bind socket %s\n",
321 server->unix_sock_path);
323 /* create the unix listening socket */
324 sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
325 if (sock_fd < 0) {
326 IVSHMEM_SERVER_DEBUG(server, "cannot create socket: %s\n",
327 strerror(errno));
328 goto err_close_shm;
331 sun.sun_family = AF_UNIX;
332 ret = snprintf(sun.sun_path, sizeof(sun.sun_path), "%s",
333 server->unix_sock_path);
334 if (ret < 0 || ret >= sizeof(sun.sun_path)) {
335 IVSHMEM_SERVER_DEBUG(server, "could not copy unix socket path\n");
336 goto err_close_sock;
338 if (bind(sock_fd, (struct sockaddr *)&sun, sizeof(sun)) < 0) {
339 IVSHMEM_SERVER_DEBUG(server, "cannot connect to %s: %s\n", sun.sun_path,
340 strerror(errno));
341 goto err_close_sock;
344 if (listen(sock_fd, IVSHMEM_SERVER_LISTEN_BACKLOG) < 0) {
345 IVSHMEM_SERVER_DEBUG(server, "listen() failed: %s\n", strerror(errno));
346 goto err_close_sock;
349 server->sock_fd = sock_fd;
350 server->shm_fd = shm_fd;
352 return 0;
354 err_close_sock:
355 close(sock_fd);
356 err_close_shm:
357 close(shm_fd);
358 return -1;
361 /* close connections to clients, the unix socket and the shm fd */
362 void
363 ivshmem_server_close(IvshmemServer *server)
365 IvshmemServerPeer *peer, *npeer;
367 IVSHMEM_SERVER_DEBUG(server, "close server\n");
369 QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, npeer) {
370 ivshmem_server_free_peer(server, peer);
373 unlink(server->unix_sock_path);
374 close(server->sock_fd);
375 close(server->shm_fd);
376 server->sock_fd = -1;
377 server->shm_fd = -1;
380 /* get the fd_set according to the unix socket and the peer list */
381 void
382 ivshmem_server_get_fds(const IvshmemServer *server, fd_set *fds, int *maxfd)
384 IvshmemServerPeer *peer;
386 if (server->sock_fd == -1) {
387 return;
390 FD_SET(server->sock_fd, fds);
391 if (server->sock_fd >= *maxfd) {
392 *maxfd = server->sock_fd + 1;
395 QTAILQ_FOREACH(peer, &server->peer_list, next) {
396 FD_SET(peer->sock_fd, fds);
397 if (peer->sock_fd >= *maxfd) {
398 *maxfd = peer->sock_fd + 1;
403 /* process incoming messages on the sockets in fd_set */
405 ivshmem_server_handle_fds(IvshmemServer *server, fd_set *fds, int maxfd)
407 IvshmemServerPeer *peer, *peer_next;
409 if (server->sock_fd < maxfd && FD_ISSET(server->sock_fd, fds) &&
410 ivshmem_server_handle_new_conn(server) < 0 && errno != EINTR) {
411 IVSHMEM_SERVER_DEBUG(server, "ivshmem_server_handle_new_conn() "
412 "failed\n");
413 return -1;
416 QTAILQ_FOREACH_SAFE(peer, &server->peer_list, next, peer_next) {
417 /* any message from a peer socket result in a close() */
418 IVSHMEM_SERVER_DEBUG(server, "peer->sock_fd=%d\n", peer->sock_fd);
419 if (peer->sock_fd < maxfd && FD_ISSET(peer->sock_fd, fds)) {
420 ivshmem_server_free_peer(server, peer);
424 return 0;
427 /* lookup peer from its id */
428 IvshmemServerPeer *
429 ivshmem_server_search_peer(IvshmemServer *server, int64_t peer_id)
431 IvshmemServerPeer *peer;
433 QTAILQ_FOREACH(peer, &server->peer_list, next) {
434 if (peer->id == peer_id) {
435 return peer;
438 return NULL;
441 /* dump our info, the list of peers their vectors on stdout */
442 void
443 ivshmem_server_dump(const IvshmemServer *server)
445 const IvshmemServerPeer *peer;
446 unsigned vector;
448 /* dump peers */
449 QTAILQ_FOREACH(peer, &server->peer_list, next) {
450 printf("peer_id = %" PRId64 "\n", peer->id);
452 for (vector = 0; vector < peer->vectors_count; vector++) {
453 printf(" vector %d is enabled (fd=%d)\n", vector,
454 event_notifier_get_fd(&peer->vectors[vector]));