linux-user: add support for xtensa FDPIC
[qemu/armbru.git] / net / vmnet-common.m
blob295828348501ba291ca88e5e3632c6156ff6fc10
1 /*
2  * vmnet-common.m - network client wrapper for Apple vmnet.framework
3  *
4  * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
5  * Copyright(c) 2021 Phillip Tennen <phillip@axleos.com>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
12 #include "qemu/osdep.h"
13 #include "qemu/main-loop.h"
14 #include "qemu/log.h"
15 #include "qapi/qapi-types-net.h"
16 #include "vmnet_int.h"
17 #include "clients.h"
18 #include "qemu/error-report.h"
19 #include "qapi/error.h"
20 #include "sysemu/runstate.h"
22 #include <vmnet/vmnet.h>
23 #include <dispatch/dispatch.h>
26 static void vmnet_send_completed(NetClientState *nc, ssize_t len);
29 const char *vmnet_status_map_str(vmnet_return_t status)
31     switch (status) {
32     case VMNET_SUCCESS:
33         return "success";
34     case VMNET_FAILURE:
35         return "general failure (possibly not enough privileges)";
36     case VMNET_MEM_FAILURE:
37         return "memory allocation failure";
38     case VMNET_INVALID_ARGUMENT:
39         return "invalid argument specified";
40     case VMNET_SETUP_INCOMPLETE:
41         return "interface setup is not complete";
42     case VMNET_INVALID_ACCESS:
43         return "invalid access, permission denied";
44     case VMNET_PACKET_TOO_BIG:
45         return "packet size is larger than MTU";
46     case VMNET_BUFFER_EXHAUSTED:
47         return "buffers exhausted in kernel";
48     case VMNET_TOO_MANY_PACKETS:
49         return "packet count exceeds limit";
50 #if defined(MAC_OS_VERSION_11_0) && \
51     MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
52     case VMNET_SHARING_SERVICE_BUSY:
53         return "conflict, sharing service is in use";
54 #endif
55     default:
56         return "unknown vmnet error";
57     }
61 /**
62  * Write packets from QEMU to vmnet interface.
63  *
64  * vmnet.framework supports iov, but writing more than
65  * one iov into vmnet interface fails with
66  * 'VMNET_INVALID_ARGUMENT'. Collecting provided iovs into
67  * one and passing it to vmnet works fine. That's the
68  * reason why receive_iov() left unimplemented. But it still
69  * works with good performance having .receive() only.
70  */
71 ssize_t vmnet_receive_common(NetClientState *nc,
72                              const uint8_t *buf,
73                              size_t size)
75     VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
76     struct vmpktdesc packet;
77     struct iovec iov;
78     int pkt_cnt;
79     vmnet_return_t if_status;
81     if (size > s->max_packet_size) {
82         warn_report("vmnet: packet is too big, %zu > %" PRIu64,
83             packet.vm_pkt_size,
84             s->max_packet_size);
85         return -1;
86     }
88     iov.iov_base = (char *) buf;
89     iov.iov_len = size;
91     packet.vm_pkt_iovcnt = 1;
92     packet.vm_flags = 0;
93     packet.vm_pkt_size = size;
94     packet.vm_pkt_iov = &iov;
95     pkt_cnt = 1;
97     if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt);
98     if (if_status != VMNET_SUCCESS) {
99         error_report("vmnet: write error: %s\n",
100                      vmnet_status_map_str(if_status));
101         return -1;
102     }
104     if (pkt_cnt) {
105         return size;
106     }
107     return 0;
112  * Read packets from vmnet interface and write them
113  * to temporary buffers in VmnetState.
115  * Returns read packets number (may be 0) on success,
116  * -1 on error
117  */
118 static int vmnet_read_packets(VmnetState *s)
120     assert(s->packets_send_current_pos == s->packets_send_end_pos);
122     struct vmpktdesc *packets = s->packets_buf;
123     vmnet_return_t status;
124     int i;
126     /* Read as many packets as present */
127     s->packets_send_current_pos = 0;
128     s->packets_send_end_pos = VMNET_PACKETS_LIMIT;
129     for (i = 0; i < s->packets_send_end_pos; ++i) {
130         packets[i].vm_pkt_size = s->max_packet_size;
131         packets[i].vm_pkt_iovcnt = 1;
132         packets[i].vm_flags = 0;
133     }
135     status = vmnet_read(s->vmnet_if, packets, &s->packets_send_end_pos);
136     if (status != VMNET_SUCCESS) {
137         error_printf("vmnet: read failed: %s\n",
138                      vmnet_status_map_str(status));
139         s->packets_send_current_pos = 0;
140         s->packets_send_end_pos = 0;
141         return -1;
142     }
143     return s->packets_send_end_pos;
148  * Write packets from temporary buffers in VmnetState
149  * to QEMU.
150  */
151 static void vmnet_write_packets_to_qemu(VmnetState *s)
153     while (s->packets_send_current_pos < s->packets_send_end_pos) {
154         ssize_t size = qemu_send_packet_async(&s->nc,
155                                       s->iov_buf[s->packets_send_current_pos].iov_base,
156                                       s->packets_buf[s->packets_send_current_pos].vm_pkt_size,
157                                       vmnet_send_completed);
159         if (size == 0) {
160             /* QEMU is not ready to consume more packets -
161              * stop and wait for completion callback call */
162             return;
163         }
164         ++s->packets_send_current_pos;
165     }
170  * Bottom half callback that transfers packets from vmnet interface
171  * to QEMU.
173  * The process of transferring packets is three-staged:
174  * 1. Handle vmnet event;
175  * 2. Read packets from vmnet interface into temporary buffer;
176  * 3. Write packets from temporary buffer to QEMU.
178  * QEMU may suspend this process on the last stage, returning 0 from
179  * qemu_send_packet_async function. If this happens, we should
180  * respectfully wait until it is ready to consume more packets,
181  * write left ones in temporary buffer and only after this
182  * continue reading more packets from vmnet interface.
184  * Packets to be transferred are stored into packets_buf,
185  * in the window [packets_send_current_pos..packets_send_end_pos)
186  * including current_pos, excluding end_pos.
188  * Thus, if QEMU is not ready, buffer is not read and
189  * packets_send_current_pos < packets_send_end_pos.
190  */
191 static void vmnet_send_bh(void *opaque)
193     NetClientState *nc = (NetClientState *) opaque;
194     VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
196     /*
197      * Do nothing if QEMU is not ready - wait
198      * for completion callback invocation
199      */
200     if (s->packets_send_current_pos < s->packets_send_end_pos) {
201         return;
202     }
204     /* Read packets from vmnet interface */
205     if (vmnet_read_packets(s) > 0) {
206         /* Send them to QEMU */
207         vmnet_write_packets_to_qemu(s);
208     }
213  * Completion callback to be invoked by QEMU when it becomes
214  * ready to consume more packets.
215  */
216 static void vmnet_send_completed(NetClientState *nc, ssize_t len)
218     VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
220     /* Callback is invoked eq queued packet is sent */
221     ++s->packets_send_current_pos;
223     /* Complete sending packets left in VmnetState buffers */
224     vmnet_write_packets_to_qemu(s);
226     /* And read new ones from vmnet if VmnetState buffer is ready */
227     if (s->packets_send_current_pos < s->packets_send_end_pos) {
228         qemu_bh_schedule(s->send_bh);
229     }
233 static void vmnet_bufs_init(VmnetState *s)
235     struct vmpktdesc *packets = s->packets_buf;
236     struct iovec *iov = s->iov_buf;
237     int i;
239     for (i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
240         iov[i].iov_len = s->max_packet_size;
241         iov[i].iov_base = g_malloc0(iov[i].iov_len);
242         packets[i].vm_pkt_iov = iov + i;
243     }
247  * Called on state change to un-register/re-register handlers
248  */
249 static void vmnet_vm_state_change_cb(void *opaque, bool running, RunState state)
251     VmnetState *s = opaque;
253     if (running) {
254         vmnet_interface_set_event_callback(
255             s->vmnet_if,
256             VMNET_INTERFACE_PACKETS_AVAILABLE,
257             s->if_queue,
258             ^(interface_event_t event_id, xpc_object_t event) {
259                 assert(event_id == VMNET_INTERFACE_PACKETS_AVAILABLE);
260                 /*
261                  * This function is being called from a non qemu thread, so
262                  * we only schedule a BH, and do the rest of the io completion
263                  * handling from vmnet_send_bh() which runs in a qemu context.
264                  */
265                 qemu_bh_schedule(s->send_bh);
266             });
267     } else {
268         vmnet_interface_set_event_callback(
269             s->vmnet_if,
270             VMNET_INTERFACE_PACKETS_AVAILABLE,
271             NULL,
272             NULL);
273     }
276 int vmnet_if_create(NetClientState *nc,
277                     xpc_object_t if_desc,
278                     Error **errp)
280     VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
281     dispatch_semaphore_t if_created_sem = dispatch_semaphore_create(0);
282     __block vmnet_return_t if_status;
284     s->if_queue = dispatch_queue_create(
285         "org.qemu.vmnet.if_queue",
286         DISPATCH_QUEUE_SERIAL
287     );
289     xpc_dictionary_set_bool(
290         if_desc,
291         vmnet_allocate_mac_address_key,
292         false
293     );
295 #ifdef DEBUG
296     qemu_log("vmnet.start.interface_desc:\n");
297     xpc_dictionary_apply(if_desc,
298                          ^bool(const char *k, xpc_object_t v) {
299                              char *desc = xpc_copy_description(v);
300                              qemu_log("  %s=%s\n", k, desc);
301                              free(desc);
302                              return true;
303                          });
304 #endif /* DEBUG */
306     s->vmnet_if = vmnet_start_interface(
307         if_desc,
308         s->if_queue,
309         ^(vmnet_return_t status, xpc_object_t interface_param) {
310             if_status = status;
311             if (status != VMNET_SUCCESS || !interface_param) {
312                 dispatch_semaphore_signal(if_created_sem);
313                 return;
314             }
316 #ifdef DEBUG
317             qemu_log("vmnet.start.interface_param:\n");
318             xpc_dictionary_apply(interface_param,
319                                  ^bool(const char *k, xpc_object_t v) {
320                                      char *desc = xpc_copy_description(v);
321                                      qemu_log("  %s=%s\n", k, desc);
322                                      free(desc);
323                                      return true;
324                                  });
325 #endif /* DEBUG */
327             s->mtu = xpc_dictionary_get_uint64(
328                 interface_param,
329                 vmnet_mtu_key);
330             s->max_packet_size = xpc_dictionary_get_uint64(
331                 interface_param,
332                 vmnet_max_packet_size_key);
334             dispatch_semaphore_signal(if_created_sem);
335         });
337     if (s->vmnet_if == NULL) {
338         dispatch_release(s->if_queue);
339         dispatch_release(if_created_sem);
340         error_setg(errp,
341                    "unable to create interface with requested params");
342         return -1;
343     }
345     dispatch_semaphore_wait(if_created_sem, DISPATCH_TIME_FOREVER);
346     dispatch_release(if_created_sem);
348     if (if_status != VMNET_SUCCESS) {
349         dispatch_release(s->if_queue);
350         error_setg(errp,
351                    "cannot create vmnet interface: %s",
352                    vmnet_status_map_str(if_status));
353         return -1;
354     }
356     s->send_bh = aio_bh_new(qemu_get_aio_context(), vmnet_send_bh, nc);
357     vmnet_bufs_init(s);
359     s->packets_send_current_pos = 0;
360     s->packets_send_end_pos = 0;
362     vmnet_vm_state_change_cb(s, 1, RUN_STATE_RUNNING);
364     s->change = qemu_add_vm_change_state_handler(vmnet_vm_state_change_cb, s);
366     return 0;
370 void vmnet_cleanup_common(NetClientState *nc)
372     VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
373     dispatch_semaphore_t if_stopped_sem;
375     if (s->vmnet_if == NULL) {
376         return;
377     }
379     vmnet_vm_state_change_cb(s, 0, RUN_STATE_SHUTDOWN);
380     qemu_del_vm_change_state_handler(s->change);
381     if_stopped_sem = dispatch_semaphore_create(0);
382     vmnet_stop_interface(
383         s->vmnet_if,
384         s->if_queue,
385         ^(vmnet_return_t status) {
386             assert(status == VMNET_SUCCESS);
387             dispatch_semaphore_signal(if_stopped_sem);
388         });
389     dispatch_semaphore_wait(if_stopped_sem, DISPATCH_TIME_FOREVER);
391     qemu_purge_queued_packets(nc);
393     qemu_bh_delete(s->send_bh);
394     dispatch_release(if_stopped_sem);
395     dispatch_release(s->if_queue);
397     for (int i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
398         g_free(s->iov_buf[i].iov_base);
399     }