tests/vhost-user-bridge: add scattering of incoming packets
[qemu/ar7.git] / tests / vhost-user-bridge.c
blobf5030b247b50f396022ceeec1a39cac3ac0a7d9a
1 /*
2 * Vhost User Bridge
4 * Copyright (c) 2015 Red Hat, Inc.
6 * Authors:
7 * Victor Kaplansky <victork@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
14 * TODO:
15 * - main should get parameters from the command line.
16 * - implement all request handlers. Still not implemented:
17 * vubr_get_queue_num_exec()
18 * vubr_send_rarp_exec()
19 * - test for broken requests and virtqueue.
20 * - implement features defined by Virtio 1.0 spec.
21 * - support mergeable buffers and indirect descriptors.
22 * - implement clean shutdown.
23 * - implement non-blocking writes to UDP backend.
24 * - implement polling strategy.
25 * - implement clean starting/stopping of vq processing
26 * - implement clean starting/stopping of used and buffers
27 * dirty page logging.
30 #define _FILE_OFFSET_BITS 64
32 #include <stddef.h>
33 #include <assert.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <stdint.h>
37 #include <inttypes.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <errno.h>
41 #include <sys/types.h>
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/unistd.h>
45 #include <sys/mman.h>
46 #include <sys/eventfd.h>
47 #include <arpa/inet.h>
48 #include <ctype.h>
49 #include <netdb.h>
50 #include <qemu/osdep.h>
52 #include <linux/vhost.h>
54 #include "qemu/atomic.h"
55 #include "standard-headers/linux/virtio_net.h"
56 #include "standard-headers/linux/virtio_ring.h"
58 #define VHOST_USER_BRIDGE_DEBUG 1
60 #define DPRINT(...) \
61 do { \
62 if (VHOST_USER_BRIDGE_DEBUG) { \
63 printf(__VA_ARGS__); \
64 } \
65 } while (0)
67 typedef void (*CallbackFunc)(int sock, void *ctx);
69 typedef struct Event {
70 void *ctx;
71 CallbackFunc callback;
72 } Event;
74 typedef struct Dispatcher {
75 int max_sock;
76 fd_set fdset;
77 Event events[FD_SETSIZE];
78 } Dispatcher;
80 static void
81 vubr_die(const char *s)
83 perror(s);
84 exit(1);
87 static int
88 dispatcher_init(Dispatcher *dispr)
90 FD_ZERO(&dispr->fdset);
91 dispr->max_sock = -1;
92 return 0;
95 static int
96 dispatcher_add(Dispatcher *dispr, int sock, void *ctx, CallbackFunc cb)
98 if (sock >= FD_SETSIZE) {
99 fprintf(stderr,
100 "Error: Failed to add new event. sock %d should be less than %d\n",
101 sock, FD_SETSIZE);
102 return -1;
105 dispr->events[sock].ctx = ctx;
106 dispr->events[sock].callback = cb;
108 FD_SET(sock, &dispr->fdset);
109 if (sock > dispr->max_sock) {
110 dispr->max_sock = sock;
112 DPRINT("Added sock %d for watching. max_sock: %d\n",
113 sock, dispr->max_sock);
114 return 0;
117 /* dispatcher_remove() is not currently in use but may be useful
118 * in the future. */
119 static int
120 dispatcher_remove(Dispatcher *dispr, int sock)
122 if (sock >= FD_SETSIZE) {
123 fprintf(stderr,
124 "Error: Failed to remove event. sock %d should be less than %d\n",
125 sock, FD_SETSIZE);
126 return -1;
129 FD_CLR(sock, &dispr->fdset);
130 DPRINT("Sock %d removed from dispatcher watch.\n", sock);
131 return 0;
134 /* timeout in us */
135 static int
136 dispatcher_wait(Dispatcher *dispr, uint32_t timeout)
138 struct timeval tv;
139 tv.tv_sec = timeout / 1000000;
140 tv.tv_usec = timeout % 1000000;
142 fd_set fdset = dispr->fdset;
144 /* wait until some of sockets become readable. */
145 int rc = select(dispr->max_sock + 1, &fdset, 0, 0, &tv);
147 if (rc == -1) {
148 vubr_die("select");
151 /* Timeout */
152 if (rc == 0) {
153 return 0;
156 /* Now call callback for every ready socket. */
158 int sock;
159 for (sock = 0; sock < dispr->max_sock + 1; sock++) {
160 /* The callback on a socket can remove other sockets from the
161 * dispatcher, thus we have to check that the socket is
162 * still not removed from dispatcher's list
164 if (FD_ISSET(sock, &fdset) && FD_ISSET(sock, &dispr->fdset)) {
165 Event *e = &dispr->events[sock];
166 e->callback(sock, e->ctx);
170 return 0;
173 typedef struct VubrVirtq {
174 int call_fd;
175 int kick_fd;
176 uint32_t size;
177 uint16_t last_avail_index;
178 uint16_t last_used_index;
179 struct vring_desc *desc;
180 struct vring_avail *avail;
181 struct vring_used *used;
182 uint64_t log_guest_addr;
183 int enable;
184 } VubrVirtq;
186 /* Based on qemu/hw/virtio/vhost-user.c */
188 #define VHOST_MEMORY_MAX_NREGIONS 8
189 #define VHOST_USER_F_PROTOCOL_FEATURES 30
190 /* v1.0 compliant. */
191 #define VIRTIO_F_VERSION_1 32
193 #define VHOST_LOG_PAGE 4096
195 enum VhostUserProtocolFeature {
196 VHOST_USER_PROTOCOL_F_MQ = 0,
197 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
198 VHOST_USER_PROTOCOL_F_RARP = 2,
200 VHOST_USER_PROTOCOL_F_MAX
203 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
205 typedef enum VhostUserRequest {
206 VHOST_USER_NONE = 0,
207 VHOST_USER_GET_FEATURES = 1,
208 VHOST_USER_SET_FEATURES = 2,
209 VHOST_USER_SET_OWNER = 3,
210 VHOST_USER_RESET_OWNER = 4,
211 VHOST_USER_SET_MEM_TABLE = 5,
212 VHOST_USER_SET_LOG_BASE = 6,
213 VHOST_USER_SET_LOG_FD = 7,
214 VHOST_USER_SET_VRING_NUM = 8,
215 VHOST_USER_SET_VRING_ADDR = 9,
216 VHOST_USER_SET_VRING_BASE = 10,
217 VHOST_USER_GET_VRING_BASE = 11,
218 VHOST_USER_SET_VRING_KICK = 12,
219 VHOST_USER_SET_VRING_CALL = 13,
220 VHOST_USER_SET_VRING_ERR = 14,
221 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
222 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
223 VHOST_USER_GET_QUEUE_NUM = 17,
224 VHOST_USER_SET_VRING_ENABLE = 18,
225 VHOST_USER_SEND_RARP = 19,
226 VHOST_USER_MAX
227 } VhostUserRequest;
229 typedef struct VhostUserMemoryRegion {
230 uint64_t guest_phys_addr;
231 uint64_t memory_size;
232 uint64_t userspace_addr;
233 uint64_t mmap_offset;
234 } VhostUserMemoryRegion;
236 typedef struct VhostUserMemory {
237 uint32_t nregions;
238 uint32_t padding;
239 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
240 } VhostUserMemory;
242 typedef struct VhostUserLog {
243 uint64_t mmap_size;
244 uint64_t mmap_offset;
245 } VhostUserLog;
247 typedef struct VhostUserMsg {
248 VhostUserRequest request;
250 #define VHOST_USER_VERSION_MASK (0x3)
251 #define VHOST_USER_REPLY_MASK (0x1<<2)
252 uint32_t flags;
253 uint32_t size; /* the following payload size */
254 union {
255 #define VHOST_USER_VRING_IDX_MASK (0xff)
256 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
257 uint64_t u64;
258 struct vhost_vring_state state;
259 struct vhost_vring_addr addr;
260 VhostUserMemory memory;
261 VhostUserLog log;
262 } payload;
263 int fds[VHOST_MEMORY_MAX_NREGIONS];
264 int fd_num;
265 } QEMU_PACKED VhostUserMsg;
267 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
269 /* The version of the protocol we support */
270 #define VHOST_USER_VERSION (0x1)
272 #define MAX_NR_VIRTQUEUE (8)
274 typedef struct VubrDevRegion {
275 /* Guest Physical address. */
276 uint64_t gpa;
277 /* Memory region size. */
278 uint64_t size;
279 /* QEMU virtual address (userspace). */
280 uint64_t qva;
281 /* Starting offset in our mmaped space. */
282 uint64_t mmap_offset;
283 /* Start address of mmaped space. */
284 uint64_t mmap_addr;
285 } VubrDevRegion;
287 typedef struct VubrDev {
288 int sock;
289 Dispatcher dispatcher;
290 uint32_t nregions;
291 VubrDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
292 VubrVirtq vq[MAX_NR_VIRTQUEUE];
293 int log_call_fd;
294 uint64_t log_size;
295 uint8_t *log_table;
296 int backend_udp_sock;
297 struct sockaddr_in backend_udp_dest;
298 int ready;
299 uint64_t features;
300 int hdrlen;
301 } VubrDev;
303 static const char *vubr_request_str[] = {
304 [VHOST_USER_NONE] = "VHOST_USER_NONE",
305 [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
306 [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
307 [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
308 [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
309 [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
310 [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
311 [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
312 [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
313 [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
314 [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
315 [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
316 [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
317 [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
318 [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
319 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
320 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
321 [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
322 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
323 [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
324 [VHOST_USER_MAX] = "VHOST_USER_MAX",
327 static void
328 print_buffer(uint8_t *buf, size_t len)
330 int i;
331 printf("Raw buffer:\n");
332 for (i = 0; i < len; i++) {
333 if (i % 16 == 0) {
334 printf("\n");
336 if (i % 4 == 0) {
337 printf(" ");
339 printf("%02x ", buf[i]);
341 printf("\n............................................................\n");
344 /* Translate guest physical address to our virtual address. */
345 static uint64_t
346 gpa_to_va(VubrDev *dev, uint64_t guest_addr)
348 int i;
350 /* Find matching memory region. */
351 for (i = 0; i < dev->nregions; i++) {
352 VubrDevRegion *r = &dev->regions[i];
354 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
355 return guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
359 assert(!"address not found in regions");
360 return 0;
363 /* Translate qemu virtual address to our virtual address. */
364 static uint64_t
365 qva_to_va(VubrDev *dev, uint64_t qemu_addr)
367 int i;
369 /* Find matching memory region. */
370 for (i = 0; i < dev->nregions; i++) {
371 VubrDevRegion *r = &dev->regions[i];
373 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
374 return qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
378 assert(!"address not found in regions");
379 return 0;
382 static void
383 vubr_message_read(int conn_fd, VhostUserMsg *vmsg)
385 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
386 struct iovec iov = {
387 .iov_base = (char *)vmsg,
388 .iov_len = VHOST_USER_HDR_SIZE,
390 struct msghdr msg = {
391 .msg_iov = &iov,
392 .msg_iovlen = 1,
393 .msg_control = control,
394 .msg_controllen = sizeof(control),
396 size_t fd_size;
397 struct cmsghdr *cmsg;
398 int rc;
400 rc = recvmsg(conn_fd, &msg, 0);
402 if (rc == 0) {
403 vubr_die("recvmsg");
404 fprintf(stderr, "Peer disconnected.\n");
405 exit(1);
407 if (rc < 0) {
408 vubr_die("recvmsg");
411 vmsg->fd_num = 0;
412 for (cmsg = CMSG_FIRSTHDR(&msg);
413 cmsg != NULL;
414 cmsg = CMSG_NXTHDR(&msg, cmsg))
416 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
417 fd_size = cmsg->cmsg_len - CMSG_LEN(0);
418 vmsg->fd_num = fd_size / sizeof(int);
419 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
420 break;
424 if (vmsg->size > sizeof(vmsg->payload)) {
425 fprintf(stderr,
426 "Error: too big message request: %d, size: vmsg->size: %u, "
427 "while sizeof(vmsg->payload) = %lu\n",
428 vmsg->request, vmsg->size, sizeof(vmsg->payload));
429 exit(1);
432 if (vmsg->size) {
433 rc = read(conn_fd, &vmsg->payload, vmsg->size);
434 if (rc == 0) {
435 vubr_die("recvmsg");
436 fprintf(stderr, "Peer disconnected.\n");
437 exit(1);
439 if (rc < 0) {
440 vubr_die("recvmsg");
443 assert(rc == vmsg->size);
447 static void
448 vubr_message_write(int conn_fd, VhostUserMsg *vmsg)
450 int rc;
452 do {
453 rc = write(conn_fd, vmsg, VHOST_USER_HDR_SIZE + vmsg->size);
454 } while (rc < 0 && errno == EINTR);
456 if (rc < 0) {
457 vubr_die("write");
461 static void
462 vubr_backend_udp_sendbuf(VubrDev *dev, uint8_t *buf, size_t len)
464 int slen = sizeof(struct sockaddr_in);
466 if (sendto(dev->backend_udp_sock, buf, len, 0,
467 (struct sockaddr *) &dev->backend_udp_dest, slen) == -1) {
468 vubr_die("sendto()");
472 static int
473 vubr_backend_udp_recvbuf(VubrDev *dev, uint8_t *buf, size_t buflen)
475 int slen = sizeof(struct sockaddr_in);
476 int rc;
478 rc = recvfrom(dev->backend_udp_sock, buf, buflen, 0,
479 (struct sockaddr *) &dev->backend_udp_dest,
480 (socklen_t *)&slen);
481 if (rc == -1) {
482 vubr_die("recvfrom()");
485 return rc;
488 static void
489 vubr_consume_raw_packet(VubrDev *dev, uint8_t *buf, uint32_t len)
491 int hdrlen = dev->hdrlen;
492 DPRINT(" hdrlen = %d\n", dev->hdrlen);
494 if (VHOST_USER_BRIDGE_DEBUG) {
495 print_buffer(buf, len);
497 vubr_backend_udp_sendbuf(dev, buf + hdrlen, len - hdrlen);
500 /* Kick the log_call_fd if required. */
501 static void
502 vubr_log_kick(VubrDev *dev)
504 if (dev->log_call_fd != -1) {
505 DPRINT("Kicking the QEMU's log...\n");
506 eventfd_write(dev->log_call_fd, 1);
510 /* Kick the guest if necessary. */
511 static void
512 vubr_virtqueue_kick(VubrVirtq *vq)
514 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
515 DPRINT("Kicking the guest...\n");
516 eventfd_write(vq->call_fd, 1);
520 static void
521 vubr_log_page(uint8_t *log_table, uint64_t page)
523 DPRINT("Logged dirty guest page: %"PRId64"\n", page);
524 atomic_or(&log_table[page / 8], 1 << (page % 8));
527 static void
528 vubr_log_write(VubrDev *dev, uint64_t address, uint64_t length)
530 uint64_t page;
532 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
533 !dev->log_table || !length) {
534 return;
537 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
539 page = address / VHOST_LOG_PAGE;
540 while (page * VHOST_LOG_PAGE < address + length) {
541 vubr_log_page(dev->log_table, page);
542 page += VHOST_LOG_PAGE;
544 vubr_log_kick(dev);
547 static void
548 vubr_post_buffer(VubrDev *dev, VubrVirtq *vq, uint8_t *buf, int32_t len)
550 struct vring_desc *desc = vq->desc;
551 struct vring_avail *avail = vq->avail;
552 struct vring_used *used = vq->used;
553 uint64_t log_guest_addr = vq->log_guest_addr;
554 int32_t remaining_len = len;
556 unsigned int size = vq->size;
558 uint16_t avail_index = atomic_mb_read(&avail->idx);
560 /* We check the available descriptors before posting the
561 * buffer, so here we assume that enough available
562 * descriptors. */
563 assert(vq->last_avail_index != avail_index);
564 uint16_t a_index = vq->last_avail_index % size;
565 uint16_t u_index = vq->last_used_index % size;
566 uint16_t d_index = avail->ring[a_index];
568 int i = d_index;
569 uint32_t written_len = 0;
571 do {
572 DPRINT("Post packet to guest on vq:\n");
573 DPRINT(" size = %d\n", vq->size);
574 DPRINT(" last_avail_index = %d\n", vq->last_avail_index);
575 DPRINT(" last_used_index = %d\n", vq->last_used_index);
576 DPRINT(" a_index = %d\n", a_index);
577 DPRINT(" u_index = %d\n", u_index);
578 DPRINT(" d_index = %d\n", d_index);
579 DPRINT(" desc[%d].addr = 0x%016"PRIx64"\n", i, desc[i].addr);
580 DPRINT(" desc[%d].len = %d\n", i, desc[i].len);
581 DPRINT(" desc[%d].flags = %d\n", i, desc[i].flags);
582 DPRINT(" avail->idx = %d\n", avail_index);
583 DPRINT(" used->idx = %d\n", used->idx);
585 if (!(desc[i].flags & VRING_DESC_F_WRITE)) {
586 /* FIXME: we should find writable descriptor. */
587 fprintf(stderr, "Error: descriptor is not writable. Exiting.\n");
588 exit(1);
591 void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr);
592 uint32_t chunk_len = desc[i].len;
593 uint32_t chunk_write_len = MIN(remaining_len, chunk_len);
595 memcpy(chunk_start, buf + written_len, chunk_write_len);
596 vubr_log_write(dev, desc[i].addr, chunk_write_len);
597 remaining_len -= chunk_write_len;
598 written_len += chunk_write_len;
600 if ((remaining_len == 0) || !(desc[i].flags & VRING_DESC_F_NEXT)) {
601 break;
604 i = desc[i].next;
605 } while (1);
607 if (remaining_len > 0) {
608 fprintf(stderr,
609 "Too long packet for RX, remaining_len = %d, Dropping...\n",
610 remaining_len);
611 return;
614 /* Add descriptor to the used ring. */
615 used->ring[u_index].id = d_index;
616 used->ring[u_index].len = len;
617 vubr_log_write(dev,
618 log_guest_addr + offsetof(struct vring_used, ring[u_index]),
619 sizeof(used->ring[u_index]));
621 vq->last_avail_index++;
622 vq->last_used_index++;
624 atomic_mb_set(&used->idx, vq->last_used_index);
625 vubr_log_write(dev,
626 log_guest_addr + offsetof(struct vring_used, idx),
627 sizeof(used->idx));
629 /* Kick the guest if necessary. */
630 vubr_virtqueue_kick(vq);
633 static int
634 vubr_process_desc(VubrDev *dev, VubrVirtq *vq)
636 struct vring_desc *desc = vq->desc;
637 struct vring_avail *avail = vq->avail;
638 struct vring_used *used = vq->used;
639 uint64_t log_guest_addr = vq->log_guest_addr;
641 unsigned int size = vq->size;
643 uint16_t a_index = vq->last_avail_index % size;
644 uint16_t u_index = vq->last_used_index % size;
645 uint16_t d_index = avail->ring[a_index];
647 uint32_t i, len = 0;
648 size_t buf_size = 4096;
649 uint8_t buf[4096];
651 DPRINT("Chunks: ");
652 i = d_index;
653 do {
654 void *chunk_start = (void *)gpa_to_va(dev, desc[i].addr);
655 uint32_t chunk_len = desc[i].len;
657 assert(!(desc[i].flags & VRING_DESC_F_WRITE));
659 if (len + chunk_len < buf_size) {
660 memcpy(buf + len, chunk_start, chunk_len);
661 DPRINT("%d ", chunk_len);
662 } else {
663 fprintf(stderr, "Error: too long packet. Dropping...\n");
664 break;
667 len += chunk_len;
669 if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
670 break;
673 i = desc[i].next;
674 } while (1);
675 DPRINT("\n");
677 if (!len) {
678 return -1;
681 /* Add descriptor to the used ring. */
682 used->ring[u_index].id = d_index;
683 used->ring[u_index].len = len;
684 vubr_log_write(dev,
685 log_guest_addr + offsetof(struct vring_used, ring[u_index]),
686 sizeof(used->ring[u_index]));
688 vubr_consume_raw_packet(dev, buf, len);
690 return 0;
693 static void
694 vubr_process_avail(VubrDev *dev, VubrVirtq *vq)
696 struct vring_avail *avail = vq->avail;
697 struct vring_used *used = vq->used;
698 uint64_t log_guest_addr = vq->log_guest_addr;
700 while (vq->last_avail_index != atomic_mb_read(&avail->idx)) {
701 vubr_process_desc(dev, vq);
702 vq->last_avail_index++;
703 vq->last_used_index++;
706 atomic_mb_set(&used->idx, vq->last_used_index);
707 vubr_log_write(dev,
708 log_guest_addr + offsetof(struct vring_used, idx),
709 sizeof(used->idx));
712 static void
713 vubr_backend_recv_cb(int sock, void *ctx)
715 VubrDev *dev = (VubrDev *) ctx;
716 VubrVirtq *rx_vq = &dev->vq[0];
717 uint8_t buf[4096];
718 struct virtio_net_hdr_v1 *hdr = (struct virtio_net_hdr_v1 *)buf;
719 int hdrlen = dev->hdrlen;
720 int buflen = sizeof(buf);
721 int len;
723 if (!dev->ready) {
724 return;
727 DPRINT("\n\n *** IN UDP RECEIVE CALLBACK ***\n\n");
728 DPRINT(" hdrlen = %d\n", hdrlen);
730 uint16_t avail_index = atomic_mb_read(&rx_vq->avail->idx);
732 /* If there is no available descriptors, just do nothing.
733 * The buffer will be handled by next arrived UDP packet,
734 * or next kick on receive virtq. */
735 if (rx_vq->last_avail_index == avail_index) {
736 DPRINT("Got UDP packet, but no available descriptors on RX virtq.\n");
737 return;
740 memset(buf, 0, hdrlen);
741 /* TODO: support mergeable buffers. */
742 if (hdrlen == 12)
743 hdr->num_buffers = 1;
744 len = vubr_backend_udp_recvbuf(dev, buf + hdrlen, buflen - hdrlen);
746 vubr_post_buffer(dev, rx_vq, buf, len + hdrlen);
749 static void
750 vubr_kick_cb(int sock, void *ctx)
752 VubrDev *dev = (VubrDev *) ctx;
753 eventfd_t kick_data;
754 ssize_t rc;
756 rc = eventfd_read(sock, &kick_data);
757 if (rc == -1) {
758 vubr_die("eventfd_read()");
759 } else {
760 DPRINT("Got kick_data: %016"PRIx64"\n", kick_data);
761 vubr_process_avail(dev, &dev->vq[1]);
765 static int
766 vubr_none_exec(VubrDev *dev, VhostUserMsg *vmsg)
768 DPRINT("Function %s() not implemented yet.\n", __func__);
769 return 0;
772 static int
773 vubr_get_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
775 vmsg->payload.u64 =
776 ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
777 (1ULL << VHOST_F_LOG_ALL) |
778 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
779 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
781 vmsg->size = sizeof(vmsg->payload.u64);
783 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
785 /* Reply */
786 return 1;
789 static int
790 vubr_set_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
792 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
794 dev->features = vmsg->payload.u64;
795 if ((dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
796 (dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))) {
797 dev->hdrlen = 12;
798 } else {
799 dev->hdrlen = 10;
802 return 0;
805 static int
806 vubr_set_owner_exec(VubrDev *dev, VhostUserMsg *vmsg)
808 return 0;
811 static void
812 vubr_close_log(VubrDev *dev)
814 if (dev->log_table) {
815 if (munmap(dev->log_table, dev->log_size) != 0) {
816 vubr_die("munmap()");
819 dev->log_table = 0;
821 if (dev->log_call_fd != -1) {
822 close(dev->log_call_fd);
823 dev->log_call_fd = -1;
827 static int
828 vubr_reset_device_exec(VubrDev *dev, VhostUserMsg *vmsg)
830 vubr_close_log(dev);
831 dev->ready = 0;
832 dev->features = 0;
833 return 0;
836 static int
837 vubr_set_mem_table_exec(VubrDev *dev, VhostUserMsg *vmsg)
839 int i;
840 VhostUserMemory *memory = &vmsg->payload.memory;
841 dev->nregions = memory->nregions;
843 DPRINT("Nregions: %d\n", memory->nregions);
844 for (i = 0; i < dev->nregions; i++) {
845 void *mmap_addr;
846 VhostUserMemoryRegion *msg_region = &memory->regions[i];
847 VubrDevRegion *dev_region = &dev->regions[i];
849 DPRINT("Region %d\n", i);
850 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
851 msg_region->guest_phys_addr);
852 DPRINT(" memory_size: 0x%016"PRIx64"\n",
853 msg_region->memory_size);
854 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
855 msg_region->userspace_addr);
856 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
857 msg_region->mmap_offset);
859 dev_region->gpa = msg_region->guest_phys_addr;
860 dev_region->size = msg_region->memory_size;
861 dev_region->qva = msg_region->userspace_addr;
862 dev_region->mmap_offset = msg_region->mmap_offset;
864 /* We don't use offset argument of mmap() since the
865 * mapped address has to be page aligned, and we use huge
866 * pages. */
867 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
868 PROT_READ | PROT_WRITE, MAP_SHARED,
869 vmsg->fds[i], 0);
871 if (mmap_addr == MAP_FAILED) {
872 vubr_die("mmap");
874 dev_region->mmap_addr = (uint64_t) mmap_addr;
875 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", dev_region->mmap_addr);
877 close(vmsg->fds[i]);
880 return 0;
883 static int
884 vubr_set_log_base_exec(VubrDev *dev, VhostUserMsg *vmsg)
886 int fd;
887 uint64_t log_mmap_size, log_mmap_offset;
888 void *rc;
890 assert(vmsg->fd_num == 1);
891 fd = vmsg->fds[0];
893 assert(vmsg->size == sizeof(vmsg->payload.log));
894 log_mmap_offset = vmsg->payload.log.mmap_offset;
895 log_mmap_size = vmsg->payload.log.mmap_size;
896 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
897 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
899 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
900 log_mmap_offset);
901 if (rc == MAP_FAILED) {
902 vubr_die("mmap");
904 dev->log_table = rc;
905 dev->log_size = log_mmap_size;
907 vmsg->size = sizeof(vmsg->payload.u64);
908 /* Reply */
909 return 1;
912 static int
913 vubr_set_log_fd_exec(VubrDev *dev, VhostUserMsg *vmsg)
915 assert(vmsg->fd_num == 1);
916 dev->log_call_fd = vmsg->fds[0];
917 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
918 return 0;
921 static int
922 vubr_set_vring_num_exec(VubrDev *dev, VhostUserMsg *vmsg)
924 unsigned int index = vmsg->payload.state.index;
925 unsigned int num = vmsg->payload.state.num;
927 DPRINT("State.index: %d\n", index);
928 DPRINT("State.num: %d\n", num);
929 dev->vq[index].size = num;
930 return 0;
933 static int
934 vubr_set_vring_addr_exec(VubrDev *dev, VhostUserMsg *vmsg)
936 struct vhost_vring_addr *vra = &vmsg->payload.addr;
937 unsigned int index = vra->index;
938 VubrVirtq *vq = &dev->vq[index];
940 DPRINT("vhost_vring_addr:\n");
941 DPRINT(" index: %d\n", vra->index);
942 DPRINT(" flags: %d\n", vra->flags);
943 DPRINT(" desc_user_addr: 0x%016llx\n", vra->desc_user_addr);
944 DPRINT(" used_user_addr: 0x%016llx\n", vra->used_user_addr);
945 DPRINT(" avail_user_addr: 0x%016llx\n", vra->avail_user_addr);
946 DPRINT(" log_guest_addr: 0x%016llx\n", vra->log_guest_addr);
948 vq->desc = (struct vring_desc *)qva_to_va(dev, vra->desc_user_addr);
949 vq->used = (struct vring_used *)qva_to_va(dev, vra->used_user_addr);
950 vq->avail = (struct vring_avail *)qva_to_va(dev, vra->avail_user_addr);
951 vq->log_guest_addr = vra->log_guest_addr;
953 DPRINT("Setting virtq addresses:\n");
954 DPRINT(" vring_desc at %p\n", vq->desc);
955 DPRINT(" vring_used at %p\n", vq->used);
956 DPRINT(" vring_avail at %p\n", vq->avail);
958 vq->last_used_index = vq->used->idx;
959 return 0;
962 static int
963 vubr_set_vring_base_exec(VubrDev *dev, VhostUserMsg *vmsg)
965 unsigned int index = vmsg->payload.state.index;
966 unsigned int num = vmsg->payload.state.num;
968 DPRINT("State.index: %d\n", index);
969 DPRINT("State.num: %d\n", num);
970 dev->vq[index].last_avail_index = num;
972 return 0;
975 static int
976 vubr_get_vring_base_exec(VubrDev *dev, VhostUserMsg *vmsg)
978 unsigned int index = vmsg->payload.state.index;
980 DPRINT("State.index: %d\n", index);
981 vmsg->payload.state.num = dev->vq[index].last_avail_index;
982 vmsg->size = sizeof(vmsg->payload.state);
983 /* FIXME: this is a work-around for a bug in QEMU enabling
984 * too early vrings. When protocol features are enabled,
985 * we have to respect * VHOST_USER_SET_VRING_ENABLE request. */
986 dev->ready = 0;
988 if (dev->vq[index].call_fd != -1) {
989 close(dev->vq[index].call_fd);
990 dispatcher_remove(&dev->dispatcher, dev->vq[index].call_fd);
991 dev->vq[index].call_fd = -1;
993 if (dev->vq[index].kick_fd != -1) {
994 close(dev->vq[index].kick_fd);
995 dispatcher_remove(&dev->dispatcher, dev->vq[index].kick_fd);
996 dev->vq[index].kick_fd = -1;
999 /* Reply */
1000 return 1;
1003 static int
1004 vubr_set_vring_kick_exec(VubrDev *dev, VhostUserMsg *vmsg)
1006 uint64_t u64_arg = vmsg->payload.u64;
1007 int index = u64_arg & VHOST_USER_VRING_IDX_MASK;
1009 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1011 assert((u64_arg & VHOST_USER_VRING_NOFD_MASK) == 0);
1012 assert(vmsg->fd_num == 1);
1014 if (dev->vq[index].kick_fd != -1) {
1015 close(dev->vq[index].kick_fd);
1016 dispatcher_remove(&dev->dispatcher, dev->vq[index].kick_fd);
1018 dev->vq[index].kick_fd = vmsg->fds[0];
1019 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
1021 if (index % 2 == 1) {
1022 /* TX queue. */
1023 dispatcher_add(&dev->dispatcher, dev->vq[index].kick_fd,
1024 dev, vubr_kick_cb);
1026 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1027 dev->vq[index].kick_fd, index);
1029 /* We temporarily use this hack to determine that both TX and RX
1030 * queues are set up and ready for processing.
1031 * FIXME: we need to rely in VHOST_USER_SET_VRING_ENABLE and
1032 * actual kicks. */
1033 if (dev->vq[0].kick_fd != -1 &&
1034 dev->vq[1].kick_fd != -1) {
1035 dev->ready = 1;
1036 DPRINT("vhost-user-bridge is ready for processing queues.\n");
1038 return 0;
1042 static int
1043 vubr_set_vring_call_exec(VubrDev *dev, VhostUserMsg *vmsg)
1045 uint64_t u64_arg = vmsg->payload.u64;
1046 int index = u64_arg & VHOST_USER_VRING_IDX_MASK;
1048 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1049 assert((u64_arg & VHOST_USER_VRING_NOFD_MASK) == 0);
1050 assert(vmsg->fd_num == 1);
1052 if (dev->vq[index].call_fd != -1) {
1053 close(dev->vq[index].call_fd);
1054 dispatcher_remove(&dev->dispatcher, dev->vq[index].call_fd);
1056 dev->vq[index].call_fd = vmsg->fds[0];
1057 DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
1059 return 0;
1062 static int
1063 vubr_set_vring_err_exec(VubrDev *dev, VhostUserMsg *vmsg)
1065 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1066 return 0;
1069 static int
1070 vubr_get_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
1072 vmsg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
1073 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1074 vmsg->size = sizeof(vmsg->payload.u64);
1076 /* Reply */
1077 return 1;
1080 static int
1081 vubr_set_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
1083 /* FIXME: unimplented */
1084 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1085 return 0;
1088 static int
1089 vubr_get_queue_num_exec(VubrDev *dev, VhostUserMsg *vmsg)
1091 DPRINT("Function %s() not implemented yet.\n", __func__);
1092 return 0;
1095 static int
1096 vubr_set_vring_enable_exec(VubrDev *dev, VhostUserMsg *vmsg)
1098 unsigned int index = vmsg->payload.state.index;
1099 unsigned int enable = vmsg->payload.state.num;
1101 DPRINT("State.index: %d\n", index);
1102 DPRINT("State.enable: %d\n", enable);
1103 dev->vq[index].enable = enable;
1104 return 0;
1107 static int
1108 vubr_send_rarp_exec(VubrDev *dev, VhostUserMsg *vmsg)
1110 DPRINT("Function %s() not implemented yet.\n", __func__);
1111 return 0;
1114 static int
1115 vubr_execute_request(VubrDev *dev, VhostUserMsg *vmsg)
1117 /* Print out generic part of the request. */
1118 DPRINT(
1119 "================== Vhost user message from QEMU ==================\n");
1120 DPRINT("Request: %s (%d)\n", vubr_request_str[vmsg->request],
1121 vmsg->request);
1122 DPRINT("Flags: 0x%x\n", vmsg->flags);
1123 DPRINT("Size: %d\n", vmsg->size);
1125 if (vmsg->fd_num) {
1126 int i;
1127 DPRINT("Fds:");
1128 for (i = 0; i < vmsg->fd_num; i++) {
1129 DPRINT(" %d", vmsg->fds[i]);
1131 DPRINT("\n");
1134 switch (vmsg->request) {
1135 case VHOST_USER_NONE:
1136 return vubr_none_exec(dev, vmsg);
1137 case VHOST_USER_GET_FEATURES:
1138 return vubr_get_features_exec(dev, vmsg);
1139 case VHOST_USER_SET_FEATURES:
1140 return vubr_set_features_exec(dev, vmsg);
1141 case VHOST_USER_SET_OWNER:
1142 return vubr_set_owner_exec(dev, vmsg);
1143 case VHOST_USER_RESET_OWNER:
1144 return vubr_reset_device_exec(dev, vmsg);
1145 case VHOST_USER_SET_MEM_TABLE:
1146 return vubr_set_mem_table_exec(dev, vmsg);
1147 case VHOST_USER_SET_LOG_BASE:
1148 return vubr_set_log_base_exec(dev, vmsg);
1149 case VHOST_USER_SET_LOG_FD:
1150 return vubr_set_log_fd_exec(dev, vmsg);
1151 case VHOST_USER_SET_VRING_NUM:
1152 return vubr_set_vring_num_exec(dev, vmsg);
1153 case VHOST_USER_SET_VRING_ADDR:
1154 return vubr_set_vring_addr_exec(dev, vmsg);
1155 case VHOST_USER_SET_VRING_BASE:
1156 return vubr_set_vring_base_exec(dev, vmsg);
1157 case VHOST_USER_GET_VRING_BASE:
1158 return vubr_get_vring_base_exec(dev, vmsg);
1159 case VHOST_USER_SET_VRING_KICK:
1160 return vubr_set_vring_kick_exec(dev, vmsg);
1161 case VHOST_USER_SET_VRING_CALL:
1162 return vubr_set_vring_call_exec(dev, vmsg);
1163 case VHOST_USER_SET_VRING_ERR:
1164 return vubr_set_vring_err_exec(dev, vmsg);
1165 case VHOST_USER_GET_PROTOCOL_FEATURES:
1166 return vubr_get_protocol_features_exec(dev, vmsg);
1167 case VHOST_USER_SET_PROTOCOL_FEATURES:
1168 return vubr_set_protocol_features_exec(dev, vmsg);
1169 case VHOST_USER_GET_QUEUE_NUM:
1170 return vubr_get_queue_num_exec(dev, vmsg);
1171 case VHOST_USER_SET_VRING_ENABLE:
1172 return vubr_set_vring_enable_exec(dev, vmsg);
1173 case VHOST_USER_SEND_RARP:
1174 return vubr_send_rarp_exec(dev, vmsg);
1176 case VHOST_USER_MAX:
1177 assert(vmsg->request != VHOST_USER_MAX);
1179 return 0;
1182 static void
1183 vubr_receive_cb(int sock, void *ctx)
1185 VubrDev *dev = (VubrDev *) ctx;
1186 VhostUserMsg vmsg;
1187 int reply_requested;
1189 vubr_message_read(sock, &vmsg);
1190 reply_requested = vubr_execute_request(dev, &vmsg);
1191 if (reply_requested) {
1192 /* Set the version in the flags when sending the reply */
1193 vmsg.flags &= ~VHOST_USER_VERSION_MASK;
1194 vmsg.flags |= VHOST_USER_VERSION;
1195 vmsg.flags |= VHOST_USER_REPLY_MASK;
1196 vubr_message_write(sock, &vmsg);
1200 static void
1201 vubr_accept_cb(int sock, void *ctx)
1203 VubrDev *dev = (VubrDev *)ctx;
1204 int conn_fd;
1205 struct sockaddr_un un;
1206 socklen_t len = sizeof(un);
1208 conn_fd = accept(sock, (struct sockaddr *) &un, &len);
1209 if (conn_fd == -1) {
1210 vubr_die("accept()");
1212 DPRINT("Got connection from remote peer on sock %d\n", conn_fd);
1213 dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb);
1216 static VubrDev *
1217 vubr_new(const char *path)
1219 VubrDev *dev = (VubrDev *) calloc(1, sizeof(VubrDev));
1220 dev->nregions = 0;
1221 int i;
1222 struct sockaddr_un un;
1223 size_t len;
1225 for (i = 0; i < MAX_NR_VIRTQUEUE; i++) {
1226 dev->vq[i] = (VubrVirtq) {
1227 .call_fd = -1, .kick_fd = -1,
1228 .size = 0,
1229 .last_avail_index = 0, .last_used_index = 0,
1230 .desc = 0, .avail = 0, .used = 0,
1231 .enable = 0,
1235 /* Init log */
1236 dev->log_call_fd = -1;
1237 dev->log_size = 0;
1238 dev->log_table = 0;
1239 dev->ready = 0;
1240 dev->features = 0;
1242 /* Get a UNIX socket. */
1243 dev->sock = socket(AF_UNIX, SOCK_STREAM, 0);
1244 if (dev->sock == -1) {
1245 vubr_die("socket");
1248 un.sun_family = AF_UNIX;
1249 strcpy(un.sun_path, path);
1250 len = sizeof(un.sun_family) + strlen(path);
1251 unlink(path);
1253 if (bind(dev->sock, (struct sockaddr *) &un, len) == -1) {
1254 vubr_die("bind");
1257 if (listen(dev->sock, 1) == -1) {
1258 vubr_die("listen");
1261 dispatcher_init(&dev->dispatcher);
1262 dispatcher_add(&dev->dispatcher, dev->sock, (void *)dev,
1263 vubr_accept_cb);
1265 DPRINT("Waiting for connections on UNIX socket %s ...\n", path);
1266 return dev;
1269 static void
1270 vubr_set_host(struct sockaddr_in *saddr, const char *host)
1272 if (isdigit(host[0])) {
1273 if (!inet_aton(host, &saddr->sin_addr)) {
1274 fprintf(stderr, "inet_aton() failed.\n");
1275 exit(1);
1277 } else {
1278 struct hostent *he = gethostbyname(host);
1280 if (!he) {
1281 fprintf(stderr, "gethostbyname() failed.\n");
1282 exit(1);
1284 saddr->sin_addr = *(struct in_addr *)he->h_addr;
1288 static void
1289 vubr_backend_udp_setup(VubrDev *dev,
1290 const char *local_host,
1291 const char *local_port,
1292 const char *remote_host,
1293 const char *remote_port)
1295 int sock;
1296 const char *r;
1298 int lport, rport;
1300 lport = strtol(local_port, (char **)&r, 0);
1301 if (r == local_port) {
1302 fprintf(stderr, "lport parsing failed.\n");
1303 exit(1);
1306 rport = strtol(remote_port, (char **)&r, 0);
1307 if (r == remote_port) {
1308 fprintf(stderr, "rport parsing failed.\n");
1309 exit(1);
1312 struct sockaddr_in si_local = {
1313 .sin_family = AF_INET,
1314 .sin_port = htons(lport),
1317 vubr_set_host(&si_local, local_host);
1319 /* setup destination for sends */
1320 dev->backend_udp_dest = (struct sockaddr_in) {
1321 .sin_family = AF_INET,
1322 .sin_port = htons(rport),
1324 vubr_set_host(&dev->backend_udp_dest, remote_host);
1326 sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
1327 if (sock == -1) {
1328 vubr_die("socket");
1331 if (bind(sock, (struct sockaddr *)&si_local, sizeof(si_local)) == -1) {
1332 vubr_die("bind");
1335 dev->backend_udp_sock = sock;
1336 dispatcher_add(&dev->dispatcher, sock, dev, vubr_backend_recv_cb);
1337 DPRINT("Waiting for data from udp backend on %s:%d...\n",
1338 local_host, lport);
1341 static void
1342 vubr_run(VubrDev *dev)
1344 while (1) {
1345 /* timeout 200ms */
1346 dispatcher_wait(&dev->dispatcher, 200000);
1347 /* Here one can try polling strategy. */
1351 static int
1352 vubr_parse_host_port(const char **host, const char **port, const char *buf)
1354 char *p = strchr(buf, ':');
1356 if (!p) {
1357 return -1;
1359 *p = '\0';
1360 *host = strdup(buf);
1361 *port = strdup(p + 1);
1362 return 0;
1365 #define DEFAULT_UD_SOCKET "/tmp/vubr.sock"
1366 #define DEFAULT_LHOST "127.0.0.1"
1367 #define DEFAULT_LPORT "4444"
1368 #define DEFAULT_RHOST "127.0.0.1"
1369 #define DEFAULT_RPORT "5555"
1371 static const char *ud_socket_path = DEFAULT_UD_SOCKET;
1372 static const char *lhost = DEFAULT_LHOST;
1373 static const char *lport = DEFAULT_LPORT;
1374 static const char *rhost = DEFAULT_RHOST;
1375 static const char *rport = DEFAULT_RPORT;
1378 main(int argc, char *argv[])
1380 VubrDev *dev;
1381 int opt;
1383 while ((opt = getopt(argc, argv, "l:r:u:")) != -1) {
1385 switch (opt) {
1386 case 'l':
1387 if (vubr_parse_host_port(&lhost, &lport, optarg) < 0) {
1388 goto out;
1390 break;
1391 case 'r':
1392 if (vubr_parse_host_port(&rhost, &rport, optarg) < 0) {
1393 goto out;
1395 break;
1396 case 'u':
1397 ud_socket_path = strdup(optarg);
1398 break;
1399 default:
1400 goto out;
1404 DPRINT("ud socket: %s\n", ud_socket_path);
1405 DPRINT("local: %s:%s\n", lhost, lport);
1406 DPRINT("remote: %s:%s\n", rhost, rport);
1408 dev = vubr_new(ud_socket_path);
1409 if (!dev) {
1410 return 1;
1413 vubr_backend_udp_setup(dev, lhost, lport, rhost, rport);
1414 vubr_run(dev);
1415 return 0;
1417 out:
1418 fprintf(stderr, "Usage: %s ", argv[0]);
1419 fprintf(stderr, "[-u ud_socket_path] [-l lhost:lport] [-r rhost:rport]\n");
1420 fprintf(stderr, "\t-u path to unix doman socket. default: %s\n",
1421 DEFAULT_UD_SOCKET);
1422 fprintf(stderr, "\t-l local host and port. default: %s:%s\n",
1423 DEFAULT_LHOST, DEFAULT_LPORT);
1424 fprintf(stderr, "\t-r remote host and port. default: %s:%s\n",
1425 DEFAULT_RHOST, DEFAULT_RPORT);
1427 return 1;