1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
4 * This work is licensed under the terms of the GNU GPL, version 2.
6 * virtio-net server in host kernel.
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/mmu_context.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/rcupdate.h>
19 #include <linux/file.h>
21 #include <linux/net.h>
22 #include <linux/if_packet.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_tun.h>
30 /* Max number of bytes transferred before requeueing the job.
31 * Using this limit prevents one virtqueue from starving others. */
32 #define VHOST_NET_WEIGHT 0x80000
40 enum vhost_net_poll_state
{
41 VHOST_NET_POLL_DISABLED
= 0,
42 VHOST_NET_POLL_STARTED
= 1,
43 VHOST_NET_POLL_STOPPED
= 2,
48 struct vhost_virtqueue vqs
[VHOST_NET_VQ_MAX
];
49 struct vhost_poll poll
[VHOST_NET_VQ_MAX
];
50 /* Tells us whether we are polling a socket for TX.
51 * We only do this when socket buffer fills up.
52 * Protected by tx vq lock. */
53 enum vhost_net_poll_state tx_poll_state
;
56 /* Pop first len bytes from iovec. Return number of segments used. */
57 static int move_iovec_hdr(struct iovec
*from
, struct iovec
*to
,
58 size_t len
, int iov_count
)
62 while (len
&& seg
< iov_count
) {
63 size
= min(from
->iov_len
, len
);
64 to
->iov_base
= from
->iov_base
;
66 from
->iov_len
-= size
;
67 from
->iov_base
+= size
;
76 /* Caller must have TX VQ lock */
77 static void tx_poll_stop(struct vhost_net
*net
)
79 if (likely(net
->tx_poll_state
!= VHOST_NET_POLL_STARTED
))
81 vhost_poll_stop(net
->poll
+ VHOST_NET_VQ_TX
);
82 net
->tx_poll_state
= VHOST_NET_POLL_STOPPED
;
85 /* Caller must have TX VQ lock */
86 static void tx_poll_start(struct vhost_net
*net
, struct socket
*sock
)
88 if (unlikely(net
->tx_poll_state
!= VHOST_NET_POLL_STOPPED
))
90 vhost_poll_start(net
->poll
+ VHOST_NET_VQ_TX
, sock
->file
);
91 net
->tx_poll_state
= VHOST_NET_POLL_STARTED
;
94 /* Expects to be always run from workqueue - which acts as
95 * read-size critical section for our kind of RCU. */
96 static void handle_tx(struct vhost_net
*net
)
98 struct vhost_virtqueue
*vq
= &net
->dev
.vqs
[VHOST_NET_VQ_TX
];
99 unsigned head
, out
, in
, s
;
100 struct msghdr msg
= {
106 .msg_flags
= MSG_DONTWAIT
,
108 size_t len
, total_len
= 0;
111 struct socket
*sock
= rcu_dereference(vq
->private_data
);
115 wmem
= atomic_read(&sock
->sk
->sk_wmem_alloc
);
116 if (wmem
>= sock
->sk
->sk_sndbuf
)
120 mutex_lock(&vq
->mutex
);
121 vhost_disable_notify(vq
);
123 if (wmem
< sock
->sk
->sk_sndbuf
* 2)
125 hdr_size
= vq
->hdr_size
;
128 head
= vhost_get_vq_desc(&net
->dev
, vq
, vq
->iov
,
132 /* Nothing new? Wait for eventfd to tell us they refilled. */
133 if (head
== vq
->num
) {
134 wmem
= atomic_read(&sock
->sk
->sk_wmem_alloc
);
135 if (wmem
>= sock
->sk
->sk_sndbuf
* 3 / 4) {
136 tx_poll_start(net
, sock
);
137 set_bit(SOCK_ASYNC_NOSPACE
, &sock
->flags
);
140 if (unlikely(vhost_enable_notify(vq
))) {
141 vhost_disable_notify(vq
);
147 vq_err(vq
, "Unexpected descriptor format for TX: "
148 "out %d, int %d\n", out
, in
);
151 /* Skip header. TODO: support TSO. */
152 s
= move_iovec_hdr(vq
->iov
, vq
->hdr
, hdr_size
, out
);
153 msg
.msg_iovlen
= out
;
154 len
= iov_length(vq
->iov
, out
);
157 vq_err(vq
, "Unexpected header len for TX: "
158 "%zd expected %zd\n",
159 iov_length(vq
->hdr
, s
), hdr_size
);
162 /* TODO: Check specific error and bomb out unless ENOBUFS? */
163 err
= sock
->ops
->sendmsg(NULL
, sock
, &msg
, len
);
164 if (unlikely(err
< 0)) {
165 vhost_discard_vq_desc(vq
);
166 tx_poll_start(net
, sock
);
170 pr_err("Truncated TX packet: "
171 " len %d != %zd\n", err
, len
);
172 vhost_add_used_and_signal(&net
->dev
, vq
, head
, 0);
174 if (unlikely(total_len
>= VHOST_NET_WEIGHT
)) {
175 vhost_poll_queue(&vq
->poll
);
180 mutex_unlock(&vq
->mutex
);
181 unuse_mm(net
->dev
.mm
);
184 /* Expects to be always run from workqueue - which acts as
185 * read-size critical section for our kind of RCU. */
186 static void handle_rx(struct vhost_net
*net
)
188 struct vhost_virtqueue
*vq
= &net
->dev
.vqs
[VHOST_NET_VQ_RX
];
189 unsigned head
, out
, in
, log
, s
;
190 struct vhost_log
*vq_log
;
191 struct msghdr msg
= {
194 .msg_control
= NULL
, /* FIXME: get and handle RX aux data. */
197 .msg_flags
= MSG_DONTWAIT
,
200 struct virtio_net_hdr hdr
= {
202 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
205 size_t len
, total_len
= 0;
208 struct socket
*sock
= rcu_dereference(vq
->private_data
);
209 if (!sock
|| skb_queue_empty(&sock
->sk
->sk_receive_queue
))
213 mutex_lock(&vq
->mutex
);
214 vhost_disable_notify(vq
);
215 hdr_size
= vq
->hdr_size
;
217 vq_log
= unlikely(vhost_has_feature(&net
->dev
, VHOST_F_LOG_ALL
)) ?
221 head
= vhost_get_vq_desc(&net
->dev
, vq
, vq
->iov
,
225 /* OK, now we need to know about added descriptors. */
226 if (head
== vq
->num
) {
227 if (unlikely(vhost_enable_notify(vq
))) {
228 /* They have slipped one in as we were
229 * doing that: check again. */
230 vhost_disable_notify(vq
);
233 /* Nothing new? Wait for eventfd to tell us
237 /* We don't need to be notified again. */
239 vq_err(vq
, "Unexpected descriptor format for RX: "
244 /* Skip header. TODO: support TSO/mergeable rx buffers. */
245 s
= move_iovec_hdr(vq
->iov
, vq
->hdr
, hdr_size
, in
);
247 len
= iov_length(vq
->iov
, in
);
250 vq_err(vq
, "Unexpected header len for RX: "
251 "%zd expected %zd\n",
252 iov_length(vq
->hdr
, s
), hdr_size
);
255 err
= sock
->ops
->recvmsg(NULL
, sock
, &msg
,
256 len
, MSG_DONTWAIT
| MSG_TRUNC
);
257 /* TODO: Check specific error and bomb out unless EAGAIN? */
259 vhost_discard_vq_desc(vq
);
262 /* TODO: Should check and handle checksum. */
264 pr_err("Discarded truncated rx packet: "
265 " len %d > %zd\n", err
, len
);
266 vhost_discard_vq_desc(vq
);
270 err
= memcpy_toiovec(vq
->hdr
, (unsigned char *)&hdr
, hdr_size
);
272 vq_err(vq
, "Unable to write vnet_hdr at addr %p: %d\n",
273 vq
->iov
->iov_base
, err
);
277 vhost_add_used_and_signal(&net
->dev
, vq
, head
, len
);
278 if (unlikely(vq_log
))
279 vhost_log_write(vq
, vq_log
, log
, len
);
281 if (unlikely(total_len
>= VHOST_NET_WEIGHT
)) {
282 vhost_poll_queue(&vq
->poll
);
287 mutex_unlock(&vq
->mutex
);
288 unuse_mm(net
->dev
.mm
);
291 static void handle_tx_kick(struct work_struct
*work
)
293 struct vhost_virtqueue
*vq
;
294 struct vhost_net
*net
;
295 vq
= container_of(work
, struct vhost_virtqueue
, poll
.work
);
296 net
= container_of(vq
->dev
, struct vhost_net
, dev
);
300 static void handle_rx_kick(struct work_struct
*work
)
302 struct vhost_virtqueue
*vq
;
303 struct vhost_net
*net
;
304 vq
= container_of(work
, struct vhost_virtqueue
, poll
.work
);
305 net
= container_of(vq
->dev
, struct vhost_net
, dev
);
309 static void handle_tx_net(struct work_struct
*work
)
311 struct vhost_net
*net
;
312 net
= container_of(work
, struct vhost_net
, poll
[VHOST_NET_VQ_TX
].work
);
316 static void handle_rx_net(struct work_struct
*work
)
318 struct vhost_net
*net
;
319 net
= container_of(work
, struct vhost_net
, poll
[VHOST_NET_VQ_RX
].work
);
323 static int vhost_net_open(struct inode
*inode
, struct file
*f
)
325 struct vhost_net
*n
= kmalloc(sizeof *n
, GFP_KERNEL
);
329 n
->vqs
[VHOST_NET_VQ_TX
].handle_kick
= handle_tx_kick
;
330 n
->vqs
[VHOST_NET_VQ_RX
].handle_kick
= handle_rx_kick
;
331 r
= vhost_dev_init(&n
->dev
, n
->vqs
, VHOST_NET_VQ_MAX
);
337 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_TX
, handle_tx_net
, POLLOUT
);
338 vhost_poll_init(n
->poll
+ VHOST_NET_VQ_RX
, handle_rx_net
, POLLIN
);
339 n
->tx_poll_state
= VHOST_NET_POLL_DISABLED
;
346 static void vhost_net_disable_vq(struct vhost_net
*n
,
347 struct vhost_virtqueue
*vq
)
349 if (!vq
->private_data
)
351 if (vq
== n
->vqs
+ VHOST_NET_VQ_TX
) {
353 n
->tx_poll_state
= VHOST_NET_POLL_DISABLED
;
355 vhost_poll_stop(n
->poll
+ VHOST_NET_VQ_RX
);
358 static void vhost_net_enable_vq(struct vhost_net
*n
,
359 struct vhost_virtqueue
*vq
)
361 struct socket
*sock
= vq
->private_data
;
364 if (vq
== n
->vqs
+ VHOST_NET_VQ_TX
) {
365 n
->tx_poll_state
= VHOST_NET_POLL_STOPPED
;
366 tx_poll_start(n
, sock
);
368 vhost_poll_start(n
->poll
+ VHOST_NET_VQ_RX
, sock
->file
);
371 static struct socket
*vhost_net_stop_vq(struct vhost_net
*n
,
372 struct vhost_virtqueue
*vq
)
376 mutex_lock(&vq
->mutex
);
377 sock
= vq
->private_data
;
378 vhost_net_disable_vq(n
, vq
);
379 rcu_assign_pointer(vq
->private_data
, NULL
);
380 mutex_unlock(&vq
->mutex
);
384 static void vhost_net_stop(struct vhost_net
*n
, struct socket
**tx_sock
,
385 struct socket
**rx_sock
)
387 *tx_sock
= vhost_net_stop_vq(n
, n
->vqs
+ VHOST_NET_VQ_TX
);
388 *rx_sock
= vhost_net_stop_vq(n
, n
->vqs
+ VHOST_NET_VQ_RX
);
391 static void vhost_net_flush_vq(struct vhost_net
*n
, int index
)
393 vhost_poll_flush(n
->poll
+ index
);
394 vhost_poll_flush(&n
->dev
.vqs
[index
].poll
);
397 static void vhost_net_flush(struct vhost_net
*n
)
399 vhost_net_flush_vq(n
, VHOST_NET_VQ_TX
);
400 vhost_net_flush_vq(n
, VHOST_NET_VQ_RX
);
403 static int vhost_net_release(struct inode
*inode
, struct file
*f
)
405 struct vhost_net
*n
= f
->private_data
;
406 struct socket
*tx_sock
;
407 struct socket
*rx_sock
;
409 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
411 vhost_dev_cleanup(&n
->dev
);
416 /* We do an extra flush before freeing memory,
417 * since jobs can re-queue themselves. */
423 static struct socket
*get_raw_socket(int fd
)
426 struct sockaddr_ll sa
;
427 char buf
[MAX_ADDR_LEN
];
429 int uaddr_len
= sizeof uaddr
, r
;
430 struct socket
*sock
= sockfd_lookup(fd
, &r
);
432 return ERR_PTR(-ENOTSOCK
);
434 /* Parameter checking */
435 if (sock
->sk
->sk_type
!= SOCK_RAW
) {
436 r
= -ESOCKTNOSUPPORT
;
440 r
= sock
->ops
->getname(sock
, (struct sockaddr
*)&uaddr
.sa
,
445 if (uaddr
.sa
.sll_family
!= AF_PACKET
) {
455 static struct socket
*get_tun_socket(int fd
)
457 struct file
*file
= fget(fd
);
460 return ERR_PTR(-EBADF
);
461 sock
= tun_get_socket(file
);
467 static struct socket
*get_socket(int fd
)
470 /* special case to disable backend */
473 sock
= get_raw_socket(fd
);
476 sock
= get_tun_socket(fd
);
479 return ERR_PTR(-ENOTSOCK
);
482 static long vhost_net_set_backend(struct vhost_net
*n
, unsigned index
, int fd
)
484 struct socket
*sock
, *oldsock
;
485 struct vhost_virtqueue
*vq
;
488 mutex_lock(&n
->dev
.mutex
);
489 r
= vhost_dev_check_owner(&n
->dev
);
493 if (index
>= VHOST_NET_VQ_MAX
) {
498 mutex_lock(&vq
->mutex
);
500 /* Verify that ring has been setup correctly. */
501 if (!vhost_vq_access_ok(vq
)) {
505 sock
= get_socket(fd
);
511 /* start polling new socket */
512 oldsock
= vq
->private_data
;
516 vhost_net_disable_vq(n
, vq
);
517 rcu_assign_pointer(vq
->private_data
, sock
);
518 vhost_net_enable_vq(n
, vq
);
519 mutex_unlock(&vq
->mutex
);
522 vhost_net_flush_vq(n
, index
);
526 mutex_unlock(&n
->dev
.mutex
);
530 static long vhost_net_reset_owner(struct vhost_net
*n
)
532 struct socket
*tx_sock
= NULL
;
533 struct socket
*rx_sock
= NULL
;
535 mutex_lock(&n
->dev
.mutex
);
536 err
= vhost_dev_check_owner(&n
->dev
);
539 vhost_net_stop(n
, &tx_sock
, &rx_sock
);
541 err
= vhost_dev_reset_owner(&n
->dev
);
543 mutex_unlock(&n
->dev
.mutex
);
551 static int vhost_net_set_features(struct vhost_net
*n
, u64 features
)
553 size_t hdr_size
= features
& (1 << VHOST_NET_F_VIRTIO_NET_HDR
) ?
554 sizeof(struct virtio_net_hdr
) : 0;
556 mutex_lock(&n
->dev
.mutex
);
557 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
558 !vhost_log_access_ok(&n
->dev
)) {
559 mutex_unlock(&n
->dev
.mutex
);
562 n
->dev
.acked_features
= features
;
564 for (i
= 0; i
< VHOST_NET_VQ_MAX
; ++i
) {
565 mutex_lock(&n
->vqs
[i
].mutex
);
566 n
->vqs
[i
].hdr_size
= hdr_size
;
567 mutex_unlock(&n
->vqs
[i
].mutex
);
570 mutex_unlock(&n
->dev
.mutex
);
574 static long vhost_net_ioctl(struct file
*f
, unsigned int ioctl
,
577 struct vhost_net
*n
= f
->private_data
;
578 void __user
*argp
= (void __user
*)arg
;
579 u64 __user
*featurep
= argp
;
580 struct vhost_vring_file backend
;
584 case VHOST_NET_SET_BACKEND
:
585 r
= copy_from_user(&backend
, argp
, sizeof backend
);
588 return vhost_net_set_backend(n
, backend
.index
, backend
.fd
);
589 case VHOST_GET_FEATURES
:
590 features
= VHOST_FEATURES
;
591 return copy_to_user(featurep
, &features
, sizeof features
);
592 case VHOST_SET_FEATURES
:
593 r
= copy_from_user(&features
, featurep
, sizeof features
);
596 if (features
& ~VHOST_FEATURES
)
598 return vhost_net_set_features(n
, features
);
599 case VHOST_RESET_OWNER
:
600 return vhost_net_reset_owner(n
);
602 mutex_lock(&n
->dev
.mutex
);
603 r
= vhost_dev_ioctl(&n
->dev
, ioctl
, arg
);
605 mutex_unlock(&n
->dev
.mutex
);
611 static long vhost_net_compat_ioctl(struct file
*f
, unsigned int ioctl
,
614 return vhost_net_ioctl(f
, ioctl
, (unsigned long)compat_ptr(arg
));
618 const static struct file_operations vhost_net_fops
= {
619 .owner
= THIS_MODULE
,
620 .release
= vhost_net_release
,
621 .unlocked_ioctl
= vhost_net_ioctl
,
623 .compat_ioctl
= vhost_net_compat_ioctl
,
625 .open
= vhost_net_open
,
628 static struct miscdevice vhost_net_misc
= {
634 int vhost_net_init(void)
636 int r
= vhost_init();
639 r
= misc_register(&vhost_net_misc
);
649 module_init(vhost_net_init
);
651 void vhost_net_exit(void)
653 misc_deregister(&vhost_net_misc
);
656 module_exit(vhost_net_exit
);
658 MODULE_VERSION("0.0.1");
659 MODULE_LICENSE("GPL v2");
660 MODULE_AUTHOR("Michael S. Tsirkin");
661 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");