1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/socket.h> /* memcpy_fromiovec */
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/rcupdate.h>
22 #include <linux/poll.h>
23 #include <linux/file.h>
24 #include <linux/highmem.h>
25 #include <linux/slab.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28 #include <linux/module.h>
33 VHOST_MEMORY_MAX_NREGIONS
= 64,
34 VHOST_MEMORY_F_LOG
= 0x1,
37 #define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
38 #define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
40 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
43 struct vhost_poll
*poll
;
45 poll
= container_of(pt
, struct vhost_poll
, table
);
47 add_wait_queue(wqh
, &poll
->wait
);
50 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
53 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
55 if (!((unsigned long)key
& poll
->mask
))
58 vhost_poll_queue(poll
);
62 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
64 INIT_LIST_HEAD(&work
->node
);
66 init_waitqueue_head(&work
->done
);
68 work
->queue_seq
= work
->done_seq
= 0;
70 EXPORT_SYMBOL_GPL(vhost_work_init
);
72 /* Init poll structure */
73 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
74 unsigned long mask
, struct vhost_dev
*dev
)
76 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
77 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
82 vhost_work_init(&poll
->work
, fn
);
84 EXPORT_SYMBOL_GPL(vhost_poll_init
);
86 /* Start polling a file. We add ourselves to file's wait queue. The caller must
87 * keep a reference to a file until after vhost_poll_stop is called. */
88 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
96 mask
= file
->f_op
->poll(file
, &poll
->table
);
98 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
101 remove_wait_queue(poll
->wqh
, &poll
->wait
);
107 EXPORT_SYMBOL_GPL(vhost_poll_start
);
109 /* Stop polling a file. After this function returns, it becomes safe to drop the
110 * file reference. You must also flush afterwards. */
111 void vhost_poll_stop(struct vhost_poll
*poll
)
114 remove_wait_queue(poll
->wqh
, &poll
->wait
);
118 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
120 static bool vhost_work_seq_done(struct vhost_dev
*dev
, struct vhost_work
*work
,
125 spin_lock_irq(&dev
->work_lock
);
126 left
= seq
- work
->done_seq
;
127 spin_unlock_irq(&dev
->work_lock
);
131 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
136 spin_lock_irq(&dev
->work_lock
);
137 seq
= work
->queue_seq
;
139 spin_unlock_irq(&dev
->work_lock
);
140 wait_event(work
->done
, vhost_work_seq_done(dev
, work
, seq
));
141 spin_lock_irq(&dev
->work_lock
);
142 flushing
= --work
->flushing
;
143 spin_unlock_irq(&dev
->work_lock
);
144 BUG_ON(flushing
< 0);
146 EXPORT_SYMBOL_GPL(vhost_work_flush
);
148 /* Flush any work that has been scheduled. When calling this, don't hold any
149 * locks that are also used by the callback. */
150 void vhost_poll_flush(struct vhost_poll
*poll
)
152 vhost_work_flush(poll
->dev
, &poll
->work
);
154 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
156 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
160 spin_lock_irqsave(&dev
->work_lock
, flags
);
161 if (list_empty(&work
->node
)) {
162 list_add_tail(&work
->node
, &dev
->work_list
);
164 wake_up_process(dev
->worker
);
166 spin_unlock_irqrestore(&dev
->work_lock
, flags
);
168 EXPORT_SYMBOL_GPL(vhost_work_queue
);
170 void vhost_poll_queue(struct vhost_poll
*poll
)
172 vhost_work_queue(poll
->dev
, &poll
->work
);
174 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
176 static void vhost_vq_reset(struct vhost_dev
*dev
,
177 struct vhost_virtqueue
*vq
)
183 vq
->last_avail_idx
= 0;
185 vq
->last_used_idx
= 0;
186 vq
->signalled_used
= 0;
187 vq
->signalled_used_valid
= false;
189 vq
->log_used
= false;
190 vq
->log_addr
= -1ull;
191 vq
->private_data
= NULL
;
193 vq
->error_ctx
= NULL
;
201 static int vhost_worker(void *data
)
203 struct vhost_dev
*dev
= data
;
204 struct vhost_work
*work
= NULL
;
205 unsigned uninitialized_var(seq
);
206 mm_segment_t oldfs
= get_fs();
212 /* mb paired w/ kthread_stop */
213 set_current_state(TASK_INTERRUPTIBLE
);
215 spin_lock_irq(&dev
->work_lock
);
217 work
->done_seq
= seq
;
219 wake_up_all(&work
->done
);
222 if (kthread_should_stop()) {
223 spin_unlock_irq(&dev
->work_lock
);
224 __set_current_state(TASK_RUNNING
);
227 if (!list_empty(&dev
->work_list
)) {
228 work
= list_first_entry(&dev
->work_list
,
229 struct vhost_work
, node
);
230 list_del_init(&work
->node
);
231 seq
= work
->queue_seq
;
234 spin_unlock_irq(&dev
->work_lock
);
237 __set_current_state(TASK_RUNNING
);
250 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
260 /* Helper to allocate iovec buffers for all vqs. */
261 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
263 struct vhost_virtqueue
*vq
;
266 for (i
= 0; i
< dev
->nvqs
; ++i
) {
268 vq
->indirect
= kmalloc(sizeof *vq
->indirect
* UIO_MAXIOV
,
270 vq
->log
= kmalloc(sizeof *vq
->log
* UIO_MAXIOV
, GFP_KERNEL
);
271 vq
->heads
= kmalloc(sizeof *vq
->heads
* UIO_MAXIOV
, GFP_KERNEL
);
272 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
279 vhost_vq_free_iovecs(dev
->vqs
[i
]);
283 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
287 for (i
= 0; i
< dev
->nvqs
; ++i
)
288 vhost_vq_free_iovecs(dev
->vqs
[i
]);
291 long vhost_dev_init(struct vhost_dev
*dev
,
292 struct vhost_virtqueue
**vqs
, int nvqs
)
294 struct vhost_virtqueue
*vq
;
299 mutex_init(&dev
->mutex
);
301 dev
->log_file
= NULL
;
304 spin_lock_init(&dev
->work_lock
);
305 INIT_LIST_HEAD(&dev
->work_list
);
308 for (i
= 0; i
< dev
->nvqs
; ++i
) {
314 mutex_init(&vq
->mutex
);
315 vhost_vq_reset(dev
, vq
);
317 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
323 EXPORT_SYMBOL_GPL(vhost_dev_init
);
325 /* Caller should have device mutex */
326 long vhost_dev_check_owner(struct vhost_dev
*dev
)
328 /* Are you the owner? If not, I don't think you mean to do that */
329 return dev
->mm
== current
->mm
? 0 : -EPERM
;
331 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
333 struct vhost_attach_cgroups_struct
{
334 struct vhost_work work
;
335 struct task_struct
*owner
;
339 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
341 struct vhost_attach_cgroups_struct
*s
;
343 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
344 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
347 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
349 struct vhost_attach_cgroups_struct attach
;
351 attach
.owner
= current
;
352 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
353 vhost_work_queue(dev
, &attach
.work
);
354 vhost_work_flush(dev
, &attach
.work
);
358 /* Caller should have device mutex */
359 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
363 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
365 /* Caller should have device mutex */
366 long vhost_dev_set_owner(struct vhost_dev
*dev
)
368 struct task_struct
*worker
;
371 /* Is there an owner already? */
372 if (vhost_dev_has_owner(dev
)) {
377 /* No owner, become one */
378 dev
->mm
= get_task_mm(current
);
379 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
380 if (IS_ERR(worker
)) {
381 err
= PTR_ERR(worker
);
385 dev
->worker
= worker
;
386 wake_up_process(worker
); /* avoid contributing to loadavg */
388 err
= vhost_attach_cgroups(dev
);
392 err
= vhost_dev_alloc_iovecs(dev
);
398 kthread_stop(worker
);
407 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
409 struct vhost_memory
*vhost_dev_reset_owner_prepare(void)
411 return kmalloc(offsetof(struct vhost_memory
, regions
), GFP_KERNEL
);
413 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
415 /* Caller should have device mutex */
416 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_memory
*memory
)
418 vhost_dev_cleanup(dev
, true);
420 /* Restore memory to default empty mapping. */
421 memory
->nregions
= 0;
422 RCU_INIT_POINTER(dev
->memory
, memory
);
424 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
426 void vhost_dev_stop(struct vhost_dev
*dev
)
430 for (i
= 0; i
< dev
->nvqs
; ++i
) {
431 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
432 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
433 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
437 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
439 /* Caller should have device mutex if and only if locked is set */
440 void vhost_dev_cleanup(struct vhost_dev
*dev
, bool locked
)
444 for (i
= 0; i
< dev
->nvqs
; ++i
) {
445 if (dev
->vqs
[i
]->error_ctx
)
446 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
447 if (dev
->vqs
[i
]->error
)
448 fput(dev
->vqs
[i
]->error
);
449 if (dev
->vqs
[i
]->kick
)
450 fput(dev
->vqs
[i
]->kick
);
451 if (dev
->vqs
[i
]->call_ctx
)
452 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
);
453 if (dev
->vqs
[i
]->call
)
454 fput(dev
->vqs
[i
]->call
);
455 vhost_vq_reset(dev
, dev
->vqs
[i
]);
457 vhost_dev_free_iovecs(dev
);
459 eventfd_ctx_put(dev
->log_ctx
);
463 dev
->log_file
= NULL
;
464 /* No one will access memory at this point */
465 kfree(rcu_dereference_protected(dev
->memory
,
467 lockdep_is_held(&dev
->mutex
)));
468 RCU_INIT_POINTER(dev
->memory
, NULL
);
469 WARN_ON(!list_empty(&dev
->work_list
));
471 kthread_stop(dev
->worker
);
478 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
480 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
482 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
484 /* Make sure 64 bit math will not overflow. */
485 if (a
> ULONG_MAX
- (unsigned long)log_base
||
486 a
+ (unsigned long)log_base
> ULONG_MAX
)
489 return access_ok(VERIFY_WRITE
, log_base
+ a
,
490 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
493 /* Caller should have vq mutex and device mutex. */
494 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_memory
*mem
,
502 for (i
= 0; i
< mem
->nregions
; ++i
) {
503 struct vhost_memory_region
*m
= mem
->regions
+ i
;
504 unsigned long a
= m
->userspace_addr
;
505 if (m
->memory_size
> ULONG_MAX
)
507 else if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
510 else if (log_all
&& !log_access_ok(log_base
,
518 /* Can we switch to this memory table? */
519 /* Caller should have device mutex but not vq mutex */
520 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_memory
*mem
,
525 for (i
= 0; i
< d
->nvqs
; ++i
) {
527 mutex_lock(&d
->vqs
[i
]->mutex
);
528 /* If ring is inactive, will check when it's enabled. */
529 if (d
->vqs
[i
]->private_data
)
530 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
, mem
,
534 mutex_unlock(&d
->vqs
[i
]->mutex
);
541 static int vq_access_ok(struct vhost_dev
*d
, unsigned int num
,
542 struct vring_desc __user
*desc
,
543 struct vring_avail __user
*avail
,
544 struct vring_used __user
*used
)
546 size_t s
= vhost_has_feature(d
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
547 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
548 access_ok(VERIFY_READ
, avail
,
549 sizeof *avail
+ num
* sizeof *avail
->ring
+ s
) &&
550 access_ok(VERIFY_WRITE
, used
,
551 sizeof *used
+ num
* sizeof *used
->ring
+ s
);
554 /* Can we log writes? */
555 /* Caller should have device mutex but not vq mutex */
556 int vhost_log_access_ok(struct vhost_dev
*dev
)
558 struct vhost_memory
*mp
;
560 mp
= rcu_dereference_protected(dev
->memory
,
561 lockdep_is_held(&dev
->mutex
));
562 return memory_access_ok(dev
, mp
, 1);
564 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
566 /* Verify access for write logging. */
567 /* Caller should have vq mutex and device mutex */
568 static int vq_log_access_ok(struct vhost_dev
*d
, struct vhost_virtqueue
*vq
,
569 void __user
*log_base
)
571 struct vhost_memory
*mp
;
572 size_t s
= vhost_has_feature(d
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
574 mp
= rcu_dereference_protected(vq
->dev
->memory
,
575 lockdep_is_held(&vq
->mutex
));
576 return vq_memory_access_ok(log_base
, mp
,
577 vhost_has_feature(vq
->dev
, VHOST_F_LOG_ALL
)) &&
578 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
580 vq
->num
* sizeof *vq
->used
->ring
+ s
));
583 /* Can we start vq? */
584 /* Caller should have vq mutex and device mutex */
585 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
587 return vq_access_ok(vq
->dev
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
588 vq_log_access_ok(vq
->dev
, vq
, vq
->log_base
);
590 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
592 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
594 struct vhost_memory mem
, *newmem
, *oldmem
;
595 unsigned long size
= offsetof(struct vhost_memory
, regions
);
597 if (copy_from_user(&mem
, m
, size
))
601 if (mem
.nregions
> VHOST_MEMORY_MAX_NREGIONS
)
603 newmem
= kmalloc(size
+ mem
.nregions
* sizeof *m
->regions
, GFP_KERNEL
);
607 memcpy(newmem
, &mem
, size
);
608 if (copy_from_user(newmem
->regions
, m
->regions
,
609 mem
.nregions
* sizeof *m
->regions
)) {
614 if (!memory_access_ok(d
, newmem
,
615 vhost_has_feature(d
, VHOST_F_LOG_ALL
))) {
619 oldmem
= rcu_dereference_protected(d
->memory
,
620 lockdep_is_held(&d
->mutex
));
621 rcu_assign_pointer(d
->memory
, newmem
);
627 long vhost_vring_ioctl(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
629 struct file
*eventfp
, *filep
= NULL
;
630 bool pollstart
= false, pollstop
= false;
631 struct eventfd_ctx
*ctx
= NULL
;
632 u32 __user
*idxp
= argp
;
633 struct vhost_virtqueue
*vq
;
634 struct vhost_vring_state s
;
635 struct vhost_vring_file f
;
636 struct vhost_vring_addr a
;
640 r
= get_user(idx
, idxp
);
648 mutex_lock(&vq
->mutex
);
651 case VHOST_SET_VRING_NUM
:
652 /* Resizing ring with an active backend?
653 * You don't want to do that. */
654 if (vq
->private_data
) {
658 if (copy_from_user(&s
, argp
, sizeof s
)) {
662 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
668 case VHOST_SET_VRING_BASE
:
669 /* Moving base with an active backend?
670 * You don't want to do that. */
671 if (vq
->private_data
) {
675 if (copy_from_user(&s
, argp
, sizeof s
)) {
679 if (s
.num
> 0xffff) {
683 vq
->last_avail_idx
= s
.num
;
684 /* Forget the cached index value. */
685 vq
->avail_idx
= vq
->last_avail_idx
;
687 case VHOST_GET_VRING_BASE
:
689 s
.num
= vq
->last_avail_idx
;
690 if (copy_to_user(argp
, &s
, sizeof s
))
693 case VHOST_SET_VRING_ADDR
:
694 if (copy_from_user(&a
, argp
, sizeof a
)) {
698 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
702 /* For 32bit, verify that the top 32bits of the user
703 data are set to zero. */
704 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
705 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
706 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
710 if ((a
.avail_user_addr
& (sizeof *vq
->avail
->ring
- 1)) ||
711 (a
.used_user_addr
& (sizeof *vq
->used
->ring
- 1)) ||
712 (a
.log_guest_addr
& (sizeof *vq
->used
->ring
- 1))) {
717 /* We only verify access here if backend is configured.
718 * If it is not, we don't as size might not have been setup.
719 * We will verify when backend is configured. */
720 if (vq
->private_data
) {
721 if (!vq_access_ok(d
, vq
->num
,
722 (void __user
*)(unsigned long)a
.desc_user_addr
,
723 (void __user
*)(unsigned long)a
.avail_user_addr
,
724 (void __user
*)(unsigned long)a
.used_user_addr
)) {
729 /* Also validate log access for used ring if enabled. */
730 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
731 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
733 vq
->num
* sizeof *vq
->used
->ring
)) {
739 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
740 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
741 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
742 vq
->log_addr
= a
.log_guest_addr
;
743 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
745 case VHOST_SET_VRING_KICK
:
746 if (copy_from_user(&f
, argp
, sizeof f
)) {
750 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
751 if (IS_ERR(eventfp
)) {
752 r
= PTR_ERR(eventfp
);
755 if (eventfp
!= vq
->kick
) {
756 pollstop
= (filep
= vq
->kick
) != NULL
;
757 pollstart
= (vq
->kick
= eventfp
) != NULL
;
761 case VHOST_SET_VRING_CALL
:
762 if (copy_from_user(&f
, argp
, sizeof f
)) {
766 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
767 if (IS_ERR(eventfp
)) {
768 r
= PTR_ERR(eventfp
);
771 if (eventfp
!= vq
->call
) {
775 vq
->call_ctx
= eventfp
?
776 eventfd_ctx_fileget(eventfp
) : NULL
;
780 case VHOST_SET_VRING_ERR
:
781 if (copy_from_user(&f
, argp
, sizeof f
)) {
785 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
786 if (IS_ERR(eventfp
)) {
787 r
= PTR_ERR(eventfp
);
790 if (eventfp
!= vq
->error
) {
794 vq
->error_ctx
= eventfp
?
795 eventfd_ctx_fileget(eventfp
) : NULL
;
803 if (pollstop
&& vq
->handle_kick
)
804 vhost_poll_stop(&vq
->poll
);
807 eventfd_ctx_put(ctx
);
811 if (pollstart
&& vq
->handle_kick
)
812 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
814 mutex_unlock(&vq
->mutex
);
816 if (pollstop
&& vq
->handle_kick
)
817 vhost_poll_flush(&vq
->poll
);
820 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
822 /* Caller must have device mutex */
823 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
825 struct file
*eventfp
, *filep
= NULL
;
826 struct eventfd_ctx
*ctx
= NULL
;
831 /* If you are not the owner, you can become one */
832 if (ioctl
== VHOST_SET_OWNER
) {
833 r
= vhost_dev_set_owner(d
);
837 /* You must be the owner to do anything else */
838 r
= vhost_dev_check_owner(d
);
843 case VHOST_SET_MEM_TABLE
:
844 r
= vhost_set_memory(d
, argp
);
846 case VHOST_SET_LOG_BASE
:
847 if (copy_from_user(&p
, argp
, sizeof p
)) {
851 if ((u64
)(unsigned long)p
!= p
) {
855 for (i
= 0; i
< d
->nvqs
; ++i
) {
856 struct vhost_virtqueue
*vq
;
857 void __user
*base
= (void __user
*)(unsigned long)p
;
859 mutex_lock(&vq
->mutex
);
860 /* If ring is inactive, will check when it's enabled. */
861 if (vq
->private_data
&& !vq_log_access_ok(d
, vq
, base
))
865 mutex_unlock(&vq
->mutex
);
868 case VHOST_SET_LOG_FD
:
869 r
= get_user(fd
, (int __user
*)argp
);
872 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
873 if (IS_ERR(eventfp
)) {
874 r
= PTR_ERR(eventfp
);
877 if (eventfp
!= d
->log_file
) {
880 d
->log_ctx
= eventfp
?
881 eventfd_ctx_fileget(eventfp
) : NULL
;
884 for (i
= 0; i
< d
->nvqs
; ++i
) {
885 mutex_lock(&d
->vqs
[i
]->mutex
);
886 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
887 mutex_unlock(&d
->vqs
[i
]->mutex
);
890 eventfd_ctx_put(ctx
);
901 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
903 static const struct vhost_memory_region
*find_region(struct vhost_memory
*mem
,
904 __u64 addr
, __u32 len
)
906 struct vhost_memory_region
*reg
;
909 /* linear search is not brilliant, but we really have on the order of 6
910 * regions in practice */
911 for (i
= 0; i
< mem
->nregions
; ++i
) {
912 reg
= mem
->regions
+ i
;
913 if (reg
->guest_phys_addr
<= addr
&&
914 reg
->guest_phys_addr
+ reg
->memory_size
- 1 >= addr
)
920 /* TODO: This is really inefficient. We need something like get_user()
921 * (instruction directly accesses the data, with an exception table entry
922 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
924 static int set_bit_to_user(int nr
, void __user
*addr
)
926 unsigned long log
= (unsigned long)addr
;
929 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
932 r
= get_user_pages_fast(log
, 1, 1, &page
);
936 base
= kmap_atomic(page
);
939 set_page_dirty_lock(page
);
944 static int log_write(void __user
*log_base
,
945 u64 write_address
, u64 write_length
)
947 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
952 write_length
+= write_address
% VHOST_PAGE_SIZE
;
954 u64 base
= (u64
)(unsigned long)log_base
;
955 u64 log
= base
+ write_page
/ 8;
956 int bit
= write_page
% 8;
957 if ((u64
)(unsigned long)log
!= log
)
959 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
962 if (write_length
<= VHOST_PAGE_SIZE
)
964 write_length
-= VHOST_PAGE_SIZE
;
970 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
971 unsigned int log_num
, u64 len
)
975 /* Make sure data written is seen before log. */
977 for (i
= 0; i
< log_num
; ++i
) {
978 u64 l
= min(log
[i
].len
, len
);
979 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
985 eventfd_signal(vq
->log_ctx
, 1);
989 /* Length written exceeds what we have stored. This is a bug. */
993 EXPORT_SYMBOL_GPL(vhost_log_write
);
995 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
998 if (__put_user(vq
->used_flags
, &vq
->used
->flags
) < 0)
1000 if (unlikely(vq
->log_used
)) {
1001 /* Make sure the flag is seen before log. */
1003 /* Log used flag write. */
1004 used
= &vq
->used
->flags
;
1005 log_write(vq
->log_base
, vq
->log_addr
+
1006 (used
- (void __user
*)vq
->used
),
1007 sizeof vq
->used
->flags
);
1009 eventfd_signal(vq
->log_ctx
, 1);
1014 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
1016 if (__put_user(vq
->avail_idx
, vhost_avail_event(vq
)))
1018 if (unlikely(vq
->log_used
)) {
1020 /* Make sure the event is seen before log. */
1022 /* Log avail event write */
1023 used
= vhost_avail_event(vq
);
1024 log_write(vq
->log_base
, vq
->log_addr
+
1025 (used
- (void __user
*)vq
->used
),
1026 sizeof *vhost_avail_event(vq
));
1028 eventfd_signal(vq
->log_ctx
, 1);
1033 int vhost_init_used(struct vhost_virtqueue
*vq
)
1036 if (!vq
->private_data
)
1039 r
= vhost_update_used_flags(vq
);
1042 vq
->signalled_used_valid
= false;
1043 return get_user(vq
->last_used_idx
, &vq
->used
->idx
);
1045 EXPORT_SYMBOL_GPL(vhost_init_used
);
1047 static int translate_desc(struct vhost_dev
*dev
, u64 addr
, u32 len
,
1048 struct iovec iov
[], int iov_size
)
1050 const struct vhost_memory_region
*reg
;
1051 struct vhost_memory
*mem
;
1058 mem
= rcu_dereference(dev
->memory
);
1059 while ((u64
)len
> s
) {
1061 if (unlikely(ret
>= iov_size
)) {
1065 reg
= find_region(mem
, addr
, len
);
1066 if (unlikely(!reg
)) {
1071 size
= reg
->memory_size
- addr
+ reg
->guest_phys_addr
;
1072 _iov
->iov_len
= min((u64
)len
- s
, size
);
1073 _iov
->iov_base
= (void __user
*)(unsigned long)
1074 (reg
->userspace_addr
+ addr
- reg
->guest_phys_addr
);
1084 /* Each buffer in the virtqueues is actually a chain of descriptors. This
1085 * function returns the next descriptor in the chain,
1086 * or -1U if we're at the end. */
1087 static unsigned next_desc(struct vring_desc
*desc
)
1091 /* If this descriptor says it doesn't chain, we're done. */
1092 if (!(desc
->flags
& VRING_DESC_F_NEXT
))
1095 /* Check they're not leading us off end of descriptors. */
1097 /* Make sure compiler knows to grab that: we don't want it changing! */
1098 /* We will use the result as an index in an array, so most
1099 * architectures only need a compiler barrier here. */
1100 read_barrier_depends();
1105 static int get_indirect(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
1106 struct iovec iov
[], unsigned int iov_size
,
1107 unsigned int *out_num
, unsigned int *in_num
,
1108 struct vhost_log
*log
, unsigned int *log_num
,
1109 struct vring_desc
*indirect
)
1111 struct vring_desc desc
;
1112 unsigned int i
= 0, count
, found
= 0;
1116 if (unlikely(indirect
->len
% sizeof desc
)) {
1117 vq_err(vq
, "Invalid length in indirect descriptor: "
1118 "len 0x%llx not multiple of 0x%zx\n",
1119 (unsigned long long)indirect
->len
,
1124 ret
= translate_desc(dev
, indirect
->addr
, indirect
->len
, vq
->indirect
,
1126 if (unlikely(ret
< 0)) {
1127 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1131 /* We will use the result as an address to read from, so most
1132 * architectures only need a compiler barrier here. */
1133 read_barrier_depends();
1135 count
= indirect
->len
/ sizeof desc
;
1136 /* Buffers are chained via a 16 bit next field, so
1137 * we can have at most 2^16 of these. */
1138 if (unlikely(count
> USHRT_MAX
+ 1)) {
1139 vq_err(vq
, "Indirect buffer length too big: %d\n",
1145 unsigned iov_count
= *in_num
+ *out_num
;
1146 if (unlikely(++found
> count
)) {
1147 vq_err(vq
, "Loop detected: last one at %u "
1148 "indirect size %u\n",
1152 if (unlikely(memcpy_fromiovec((unsigned char *)&desc
,
1153 vq
->indirect
, sizeof desc
))) {
1154 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1155 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
1158 if (unlikely(desc
.flags
& VRING_DESC_F_INDIRECT
)) {
1159 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1160 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
1164 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
1165 iov_size
- iov_count
);
1166 if (unlikely(ret
< 0)) {
1167 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1171 /* If this is an input descriptor, increment that count. */
1172 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1174 if (unlikely(log
)) {
1175 log
[*log_num
].addr
= desc
.addr
;
1176 log
[*log_num
].len
= desc
.len
;
1180 /* If it's an output descriptor, they're all supposed
1181 * to come before any input descriptors. */
1182 if (unlikely(*in_num
)) {
1183 vq_err(vq
, "Indirect descriptor "
1184 "has out after in: idx %d\n", i
);
1189 } while ((i
= next_desc(&desc
)) != -1);
1193 /* This looks in the virtqueue and for the first available buffer, and converts
1194 * it to an iovec for convenient access. Since descriptors consist of some
1195 * number of output then some number of input descriptors, it's actually two
1196 * iovecs, but we pack them into one and note how many of each there were.
1198 * This function returns the descriptor number found, or vq->num (which is
1199 * never a valid descriptor number) if none was found. A negative code is
1200 * returned on error. */
1201 int vhost_get_vq_desc(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
1202 struct iovec iov
[], unsigned int iov_size
,
1203 unsigned int *out_num
, unsigned int *in_num
,
1204 struct vhost_log
*log
, unsigned int *log_num
)
1206 struct vring_desc desc
;
1207 unsigned int i
, head
, found
= 0;
1211 /* Check it isn't doing very strange things with descriptor numbers. */
1212 last_avail_idx
= vq
->last_avail_idx
;
1213 if (unlikely(__get_user(vq
->avail_idx
, &vq
->avail
->idx
))) {
1214 vq_err(vq
, "Failed to access avail idx at %p\n",
1219 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
1220 vq_err(vq
, "Guest moved used index from %u to %u",
1221 last_avail_idx
, vq
->avail_idx
);
1225 /* If there's nothing new since last we looked, return invalid. */
1226 if (vq
->avail_idx
== last_avail_idx
)
1229 /* Only get avail ring entries after they have been exposed by guest. */
1232 /* Grab the next descriptor number they're advertising, and increment
1233 * the index we've seen. */
1234 if (unlikely(__get_user(head
,
1235 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]))) {
1236 vq_err(vq
, "Failed to read head: idx %d address %p\n",
1238 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
1242 /* If their number is silly, that's an error. */
1243 if (unlikely(head
>= vq
->num
)) {
1244 vq_err(vq
, "Guest says index %u > %u is available",
1249 /* When we start there are none of either input nor output. */
1250 *out_num
= *in_num
= 0;
1256 unsigned iov_count
= *in_num
+ *out_num
;
1257 if (unlikely(i
>= vq
->num
)) {
1258 vq_err(vq
, "Desc index is %u > %u, head = %u",
1262 if (unlikely(++found
> vq
->num
)) {
1263 vq_err(vq
, "Loop detected: last one at %u "
1264 "vq size %u head %u\n",
1268 ret
= __copy_from_user(&desc
, vq
->desc
+ i
, sizeof desc
);
1269 if (unlikely(ret
)) {
1270 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
1274 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1275 ret
= get_indirect(dev
, vq
, iov
, iov_size
,
1277 log
, log_num
, &desc
);
1278 if (unlikely(ret
< 0)) {
1279 vq_err(vq
, "Failure detected "
1280 "in indirect descriptor at idx %d\n", i
);
1286 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
1287 iov_size
- iov_count
);
1288 if (unlikely(ret
< 0)) {
1289 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
1293 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1294 /* If this is an input descriptor,
1295 * increment that count. */
1297 if (unlikely(log
)) {
1298 log
[*log_num
].addr
= desc
.addr
;
1299 log
[*log_num
].len
= desc
.len
;
1303 /* If it's an output descriptor, they're all supposed
1304 * to come before any input descriptors. */
1305 if (unlikely(*in_num
)) {
1306 vq_err(vq
, "Descriptor has out after in: "
1312 } while ((i
= next_desc(&desc
)) != -1);
1314 /* On success, increment avail index. */
1315 vq
->last_avail_idx
++;
1317 /* Assume notifications from guest are disabled at this point,
1318 * if they aren't we would need to update avail_event index. */
1319 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
1322 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
1324 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1325 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
1327 vq
->last_avail_idx
-= n
;
1329 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
1331 /* After we've used one of their buffers, we tell them about it. We'll then
1332 * want to notify the guest, using eventfd. */
1333 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
1335 struct vring_used_elem __user
*used
;
1337 /* The virtqueue contains a ring of used buffers. Get a pointer to the
1338 * next entry in that used ring. */
1339 used
= &vq
->used
->ring
[vq
->last_used_idx
% vq
->num
];
1340 if (__put_user(head
, &used
->id
)) {
1341 vq_err(vq
, "Failed to write used id");
1344 if (__put_user(len
, &used
->len
)) {
1345 vq_err(vq
, "Failed to write used len");
1348 /* Make sure buffer is written before we update index. */
1350 if (__put_user(vq
->last_used_idx
+ 1, &vq
->used
->idx
)) {
1351 vq_err(vq
, "Failed to increment used idx");
1354 if (unlikely(vq
->log_used
)) {
1355 /* Make sure data is seen before log. */
1357 /* Log used ring entry write. */
1358 log_write(vq
->log_base
,
1360 ((void __user
*)used
- (void __user
*)vq
->used
),
1362 /* Log used index update. */
1363 log_write(vq
->log_base
,
1364 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1365 sizeof vq
->used
->idx
);
1367 eventfd_signal(vq
->log_ctx
, 1);
1369 vq
->last_used_idx
++;
1370 /* If the driver never bothers to signal in a very long while,
1371 * used index might wrap around. If that happens, invalidate
1372 * signalled_used index we stored. TODO: make sure driver
1373 * signals at least once in 2^16 and remove this. */
1374 if (unlikely(vq
->last_used_idx
== vq
->signalled_used
))
1375 vq
->signalled_used_valid
= false;
1378 EXPORT_SYMBOL_GPL(vhost_add_used
);
1380 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
1381 struct vring_used_elem
*heads
,
1384 struct vring_used_elem __user
*used
;
1388 start
= vq
->last_used_idx
% vq
->num
;
1389 used
= vq
->used
->ring
+ start
;
1390 if (__copy_to_user(used
, heads
, count
* sizeof *used
)) {
1391 vq_err(vq
, "Failed to write used");
1394 if (unlikely(vq
->log_used
)) {
1395 /* Make sure data is seen before log. */
1397 /* Log used ring entry write. */
1398 log_write(vq
->log_base
,
1400 ((void __user
*)used
- (void __user
*)vq
->used
),
1401 count
* sizeof *used
);
1403 old
= vq
->last_used_idx
;
1404 new = (vq
->last_used_idx
+= count
);
1405 /* If the driver never bothers to signal in a very long while,
1406 * used index might wrap around. If that happens, invalidate
1407 * signalled_used index we stored. TODO: make sure driver
1408 * signals at least once in 2^16 and remove this. */
1409 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
1410 vq
->signalled_used_valid
= false;
1414 /* After we've used one of their buffers, we tell them about it. We'll then
1415 * want to notify the guest, using eventfd. */
1416 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
1421 start
= vq
->last_used_idx
% vq
->num
;
1422 n
= vq
->num
- start
;
1424 r
= __vhost_add_used_n(vq
, heads
, n
);
1430 r
= __vhost_add_used_n(vq
, heads
, count
);
1432 /* Make sure buffer is written before we update index. */
1434 if (put_user(vq
->last_used_idx
, &vq
->used
->idx
)) {
1435 vq_err(vq
, "Failed to increment used idx");
1438 if (unlikely(vq
->log_used
)) {
1439 /* Log used index update. */
1440 log_write(vq
->log_base
,
1441 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1442 sizeof vq
->used
->idx
);
1444 eventfd_signal(vq
->log_ctx
, 1);
1448 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
1450 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1452 __u16 old
, new, event
;
1454 /* Flush out used index updates. This is paired
1455 * with the barrier that the Guest executes when enabling
1459 if (vhost_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1460 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
1463 if (!vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1465 if (__get_user(flags
, &vq
->avail
->flags
)) {
1466 vq_err(vq
, "Failed to get flags");
1469 return !(flags
& VRING_AVAIL_F_NO_INTERRUPT
);
1471 old
= vq
->signalled_used
;
1472 v
= vq
->signalled_used_valid
;
1473 new = vq
->signalled_used
= vq
->last_used_idx
;
1474 vq
->signalled_used_valid
= true;
1479 if (get_user(event
, vhost_used_event(vq
))) {
1480 vq_err(vq
, "Failed to get used event idx");
1483 return vring_need_event(event
, new, old
);
1486 /* This actually signals the guest, using eventfd. */
1487 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1489 /* Signal the Guest tell them we used something up. */
1490 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
1491 eventfd_signal(vq
->call_ctx
, 1);
1493 EXPORT_SYMBOL_GPL(vhost_signal
);
1495 /* And here's the combo meal deal. Supersize me! */
1496 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
1497 struct vhost_virtqueue
*vq
,
1498 unsigned int head
, int len
)
1500 vhost_add_used(vq
, head
, len
);
1501 vhost_signal(dev
, vq
);
1503 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
1505 /* multi-buffer version of vhost_add_used_and_signal */
1506 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
1507 struct vhost_virtqueue
*vq
,
1508 struct vring_used_elem
*heads
, unsigned count
)
1510 vhost_add_used_n(vq
, heads
, count
);
1511 vhost_signal(dev
, vq
);
1513 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
1515 /* OK, now we need to know about added descriptors. */
1516 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1521 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
1523 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
1524 if (!vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1525 r
= vhost_update_used_flags(vq
);
1527 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1528 &vq
->used
->flags
, r
);
1532 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
1534 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
1535 vhost_avail_event(vq
), r
);
1539 /* They could have slipped one in as we were doing that: make
1540 * sure it's written, then check again. */
1542 r
= __get_user(avail_idx
, &vq
->avail
->idx
);
1544 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
1545 &vq
->avail
->idx
, r
);
1549 return avail_idx
!= vq
->avail_idx
;
1551 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
1553 /* We don't need to be notified again. */
1554 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1558 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
1560 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
1561 if (!vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1562 r
= vhost_update_used_flags(vq
);
1564 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1565 &vq
->used
->flags
, r
);
1568 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
1570 static int __init
vhost_init(void)
1575 static void __exit
vhost_exit(void)
1579 module_init(vhost_init
);
1580 module_exit(vhost_exit
);
1582 MODULE_VERSION("0.0.1");
1583 MODULE_LICENSE("GPL v2");
1584 MODULE_AUTHOR("Michael S. Tsirkin");
1585 MODULE_DESCRIPTION("Host kernel accelerator for virtio");