be2net: fix unconditionally returning IRQ_HANDLED in INTx
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / virtio / virtio_ring.c
blobffd7e7da5d3b1753316f633fbbb38935e5dc27de
1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
27 /* virtio guest is communicating with a virtual "device" that actually runs on
28 * a host processor. Memory barriers are used to control SMP effects. */
29 #ifdef CONFIG_SMP
30 /* Where possible, use SMP barriers which are more lightweight than mandatory
31 * barriers, because mandatory barriers control MMIO effects on accesses
32 * through relaxed memory I/O windows (which virtio-pci does not use). */
33 #define virtio_mb(vq) \
34 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
35 #define virtio_rmb(vq) \
36 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37 #define virtio_wmb(vq) \
38 do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
39 #else
40 /* We must force memory ordering even if guest is UP since host could be
41 * running on another CPU, but SMP barriers are defined to barrier() in that
42 * configuration. So fall back to mandatory barriers instead. */
43 #define virtio_mb(vq) mb()
44 #define virtio_rmb(vq) rmb()
45 #define virtio_wmb(vq) wmb()
46 #endif
48 #ifdef DEBUG
49 /* For development, we want to crash whenever the ring is screwed. */
50 #define BAD_RING(_vq, fmt, args...) \
51 do { \
52 dev_err(&(_vq)->vq.vdev->dev, \
53 "%s:"fmt, (_vq)->vq.name, ##args); \
54 BUG(); \
55 } while (0)
56 /* Caller is supposed to guarantee no reentry. */
57 #define START_USE(_vq) \
58 do { \
59 if ((_vq)->in_use) \
60 panic("%s:in_use = %i\n", \
61 (_vq)->vq.name, (_vq)->in_use); \
62 (_vq)->in_use = __LINE__; \
63 } while (0)
64 #define END_USE(_vq) \
65 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
66 #else
67 #define BAD_RING(_vq, fmt, args...) \
68 do { \
69 dev_err(&_vq->vq.vdev->dev, \
70 "%s:"fmt, (_vq)->vq.name, ##args); \
71 (_vq)->broken = true; \
72 } while (0)
73 #define START_USE(vq)
74 #define END_USE(vq)
75 #endif
77 struct vring_virtqueue
79 struct virtqueue vq;
81 /* Actual memory layout for this queue */
82 struct vring vring;
84 /* Can we use weak barriers? */
85 bool weak_barriers;
87 /* Other side has made a mess, don't try any more. */
88 bool broken;
90 /* Host supports indirect buffers */
91 bool indirect;
93 /* Host publishes avail event idx */
94 bool event;
96 /* Head of free buffer list. */
97 unsigned int free_head;
98 /* Number we've added since last sync. */
99 unsigned int num_added;
101 /* Last used index we've seen. */
102 u16 last_used_idx;
104 /* How to notify other side. FIXME: commonalize hcalls! */
105 void (*notify)(struct virtqueue *vq);
107 #ifdef DEBUG
108 /* They're supposed to lock for us. */
109 unsigned int in_use;
111 /* Figure out if their kicks are too delayed. */
112 bool last_add_time_valid;
113 ktime_t last_add_time;
114 #endif
116 /* Tokens for callbacks. */
117 void *data[];
120 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
122 /* Set up an indirect table of descriptors and add it to the queue. */
123 static int vring_add_indirect(struct vring_virtqueue *vq,
124 struct scatterlist sg[],
125 unsigned int out,
126 unsigned int in,
127 gfp_t gfp)
129 struct vring_desc *desc;
130 unsigned head;
131 int i;
134 * We require lowmem mappings for the descriptors because
135 * otherwise virt_to_phys will give us bogus addresses in the
136 * virtqueue.
138 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
140 desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
141 if (!desc)
142 return -ENOMEM;
144 /* Transfer entries from the sg list into the indirect page */
145 for (i = 0; i < out; i++) {
146 desc[i].flags = VRING_DESC_F_NEXT;
147 desc[i].addr = sg_phys(sg);
148 desc[i].len = sg->length;
149 desc[i].next = i+1;
150 sg++;
152 for (; i < (out + in); i++) {
153 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
154 desc[i].addr = sg_phys(sg);
155 desc[i].len = sg->length;
156 desc[i].next = i+1;
157 sg++;
160 /* Last one doesn't continue. */
161 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
162 desc[i-1].next = 0;
164 /* We're about to use a buffer */
165 vq->vq.num_free--;
167 /* Use a single buffer which doesn't continue */
168 head = vq->free_head;
169 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
170 vq->vring.desc[head].addr = virt_to_phys(desc);
171 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
173 /* Update free pointer */
174 vq->free_head = vq->vring.desc[head].next;
176 return head;
180 * virtqueue_add_buf - expose buffer to other end
181 * @vq: the struct virtqueue we're talking about.
182 * @sg: the description of the buffer(s).
183 * @out_num: the number of sg readable by other side
184 * @in_num: the number of sg which are writable (after readable ones)
185 * @data: the token identifying the buffer.
186 * @gfp: how to do memory allocations (if necessary).
188 * Caller must ensure we don't call this with other virtqueue operations
189 * at the same time (except where noted).
191 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
193 int virtqueue_add_buf(struct virtqueue *_vq,
194 struct scatterlist sg[],
195 unsigned int out,
196 unsigned int in,
197 void *data,
198 gfp_t gfp)
200 struct vring_virtqueue *vq = to_vvq(_vq);
201 unsigned int i, avail, uninitialized_var(prev);
202 int head;
204 START_USE(vq);
206 BUG_ON(data == NULL);
208 #ifdef DEBUG
210 ktime_t now = ktime_get();
212 /* No kick or get, with .1 second between? Warn. */
213 if (vq->last_add_time_valid)
214 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
215 > 100);
216 vq->last_add_time = now;
217 vq->last_add_time_valid = true;
219 #endif
221 /* If the host supports indirect descriptor tables, and we have multiple
222 * buffers, then go indirect. FIXME: tune this threshold */
223 if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
224 head = vring_add_indirect(vq, sg, out, in, gfp);
225 if (likely(head >= 0))
226 goto add_head;
229 BUG_ON(out + in > vq->vring.num);
230 BUG_ON(out + in == 0);
232 if (vq->vq.num_free < out + in) {
233 pr_debug("Can't add buf len %i - avail = %i\n",
234 out + in, vq->vq.num_free);
235 /* FIXME: for historical reasons, we force a notify here if
236 * there are outgoing parts to the buffer. Presumably the
237 * host should service the ring ASAP. */
238 if (out)
239 vq->notify(&vq->vq);
240 END_USE(vq);
241 return -ENOSPC;
244 /* We're about to use some buffers from the free list. */
245 vq->vq.num_free -= out + in;
247 head = vq->free_head;
248 for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
249 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
250 vq->vring.desc[i].addr = sg_phys(sg);
251 vq->vring.desc[i].len = sg->length;
252 prev = i;
253 sg++;
255 for (; in; i = vq->vring.desc[i].next, in--) {
256 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
257 vq->vring.desc[i].addr = sg_phys(sg);
258 vq->vring.desc[i].len = sg->length;
259 prev = i;
260 sg++;
262 /* Last one doesn't continue. */
263 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
265 /* Update free pointer */
266 vq->free_head = i;
268 add_head:
269 /* Set token. */
270 vq->data[head] = data;
272 /* Put entry in available array (but don't update avail->idx until they
273 * do sync). */
274 avail = (vq->vring.avail->idx & (vq->vring.num-1));
275 vq->vring.avail->ring[avail] = head;
277 /* Descriptors and available array need to be set before we expose the
278 * new available array entries. */
279 virtio_wmb(vq);
280 vq->vring.avail->idx++;
281 vq->num_added++;
283 /* This is very unlikely, but theoretically possible. Kick
284 * just in case. */
285 if (unlikely(vq->num_added == (1 << 16) - 1))
286 virtqueue_kick(_vq);
288 pr_debug("Added buffer head %i to %p\n", head, vq);
289 END_USE(vq);
291 return 0;
293 EXPORT_SYMBOL_GPL(virtqueue_add_buf);
296 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
297 * @vq: the struct virtqueue
299 * Instead of virtqueue_kick(), you can do:
300 * if (virtqueue_kick_prepare(vq))
301 * virtqueue_notify(vq);
303 * This is sometimes useful because the virtqueue_kick_prepare() needs
304 * to be serialized, but the actual virtqueue_notify() call does not.
306 bool virtqueue_kick_prepare(struct virtqueue *_vq)
308 struct vring_virtqueue *vq = to_vvq(_vq);
309 u16 new, old;
310 bool needs_kick;
312 START_USE(vq);
313 /* We need to expose available array entries before checking avail
314 * event. */
315 virtio_mb(vq);
317 old = vq->vring.avail->idx - vq->num_added;
318 new = vq->vring.avail->idx;
319 vq->num_added = 0;
321 #ifdef DEBUG
322 if (vq->last_add_time_valid) {
323 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
324 vq->last_add_time)) > 100);
326 vq->last_add_time_valid = false;
327 #endif
329 if (vq->event) {
330 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
331 new, old);
332 } else {
333 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
335 END_USE(vq);
336 return needs_kick;
338 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
341 * virtqueue_notify - second half of split virtqueue_kick call.
342 * @vq: the struct virtqueue
344 * This does not need to be serialized.
346 void virtqueue_notify(struct virtqueue *_vq)
348 struct vring_virtqueue *vq = to_vvq(_vq);
350 /* Prod other side to tell it about changes. */
351 vq->notify(_vq);
353 EXPORT_SYMBOL_GPL(virtqueue_notify);
356 * virtqueue_kick - update after add_buf
357 * @vq: the struct virtqueue
359 * After one or more virtqueue_add_buf calls, invoke this to kick
360 * the other side.
362 * Caller must ensure we don't call this with other virtqueue
363 * operations at the same time (except where noted).
365 void virtqueue_kick(struct virtqueue *vq)
367 if (virtqueue_kick_prepare(vq))
368 virtqueue_notify(vq);
370 EXPORT_SYMBOL_GPL(virtqueue_kick);
372 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
374 unsigned int i;
376 /* Clear data ptr. */
377 vq->data[head] = NULL;
379 /* Put back on free list: find end */
380 i = head;
382 /* Free the indirect table */
383 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
384 kfree(phys_to_virt(vq->vring.desc[i].addr));
386 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
387 i = vq->vring.desc[i].next;
388 vq->vq.num_free++;
391 vq->vring.desc[i].next = vq->free_head;
392 vq->free_head = head;
393 /* Plus final descriptor */
394 vq->vq.num_free++;
397 static inline bool more_used(const struct vring_virtqueue *vq)
399 return vq->last_used_idx != vq->vring.used->idx;
403 * virtqueue_get_buf - get the next used buffer
404 * @vq: the struct virtqueue we're talking about.
405 * @len: the length written into the buffer
407 * If the driver wrote data into the buffer, @len will be set to the
408 * amount written. This means you don't need to clear the buffer
409 * beforehand to ensure there's no data leakage in the case of short
410 * writes.
412 * Caller must ensure we don't call this with other virtqueue
413 * operations at the same time (except where noted).
415 * Returns NULL if there are no used buffers, or the "data" token
416 * handed to virtqueue_add_buf().
418 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
420 struct vring_virtqueue *vq = to_vvq(_vq);
421 void *ret;
422 unsigned int i;
423 u16 last_used;
425 START_USE(vq);
427 if (unlikely(vq->broken)) {
428 END_USE(vq);
429 return NULL;
432 if (!more_used(vq)) {
433 pr_debug("No more buffers in queue\n");
434 END_USE(vq);
435 return NULL;
438 /* Only get used array entries after they have been exposed by host. */
439 virtio_rmb(vq);
441 last_used = (vq->last_used_idx & (vq->vring.num - 1));
442 i = vq->vring.used->ring[last_used].id;
443 *len = vq->vring.used->ring[last_used].len;
445 if (unlikely(i >= vq->vring.num)) {
446 BAD_RING(vq, "id %u out of range\n", i);
447 return NULL;
449 if (unlikely(!vq->data[i])) {
450 BAD_RING(vq, "id %u is not a head!\n", i);
451 return NULL;
454 /* detach_buf clears data, so grab it now. */
455 ret = vq->data[i];
456 detach_buf(vq, i);
457 vq->last_used_idx++;
458 /* If we expect an interrupt for the next entry, tell host
459 * by writing event index and flush out the write before
460 * the read in the next get_buf call. */
461 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
462 vring_used_event(&vq->vring) = vq->last_used_idx;
463 virtio_mb(vq);
466 #ifdef DEBUG
467 vq->last_add_time_valid = false;
468 #endif
470 END_USE(vq);
471 return ret;
473 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
476 * virtqueue_disable_cb - disable callbacks
477 * @vq: the struct virtqueue we're talking about.
479 * Note that this is not necessarily synchronous, hence unreliable and only
480 * useful as an optimization.
482 * Unlike other operations, this need not be serialized.
484 void virtqueue_disable_cb(struct virtqueue *_vq)
486 struct vring_virtqueue *vq = to_vvq(_vq);
488 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
490 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
493 * virtqueue_enable_cb - restart callbacks after disable_cb.
494 * @vq: the struct virtqueue we're talking about.
496 * This re-enables callbacks; it returns "false" if there are pending
497 * buffers in the queue, to detect a possible race between the driver
498 * checking for more work, and enabling callbacks.
500 * Caller must ensure we don't call this with other virtqueue
501 * operations at the same time (except where noted).
503 bool virtqueue_enable_cb(struct virtqueue *_vq)
505 struct vring_virtqueue *vq = to_vvq(_vq);
507 START_USE(vq);
509 /* We optimistically turn back on interrupts, then check if there was
510 * more to do. */
511 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
512 * either clear the flags bit or point the event index at the next
513 * entry. Always do both to keep code simple. */
514 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
515 vring_used_event(&vq->vring) = vq->last_used_idx;
516 virtio_mb(vq);
517 if (unlikely(more_used(vq))) {
518 END_USE(vq);
519 return false;
522 END_USE(vq);
523 return true;
525 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
528 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
529 * @vq: the struct virtqueue we're talking about.
531 * This re-enables callbacks but hints to the other side to delay
532 * interrupts until most of the available buffers have been processed;
533 * it returns "false" if there are many pending buffers in the queue,
534 * to detect a possible race between the driver checking for more work,
535 * and enabling callbacks.
537 * Caller must ensure we don't call this with other virtqueue
538 * operations at the same time (except where noted).
540 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
542 struct vring_virtqueue *vq = to_vvq(_vq);
543 u16 bufs;
545 START_USE(vq);
547 /* We optimistically turn back on interrupts, then check if there was
548 * more to do. */
549 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
550 * either clear the flags bit or point the event index at the next
551 * entry. Always do both to keep code simple. */
552 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
553 /* TODO: tune this threshold */
554 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
555 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
556 virtio_mb(vq);
557 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
558 END_USE(vq);
559 return false;
562 END_USE(vq);
563 return true;
565 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
568 * virtqueue_detach_unused_buf - detach first unused buffer
569 * @vq: the struct virtqueue we're talking about.
571 * Returns NULL or the "data" token handed to virtqueue_add_buf().
572 * This is not valid on an active queue; it is useful only for device
573 * shutdown.
575 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
577 struct vring_virtqueue *vq = to_vvq(_vq);
578 unsigned int i;
579 void *buf;
581 START_USE(vq);
583 for (i = 0; i < vq->vring.num; i++) {
584 if (!vq->data[i])
585 continue;
586 /* detach_buf clears data, so grab it now. */
587 buf = vq->data[i];
588 detach_buf(vq, i);
589 vq->vring.avail->idx--;
590 END_USE(vq);
591 return buf;
593 /* That should have freed everything. */
594 BUG_ON(vq->vq.num_free != vq->vring.num);
596 END_USE(vq);
597 return NULL;
599 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
601 irqreturn_t vring_interrupt(int irq, void *_vq)
603 struct vring_virtqueue *vq = to_vvq(_vq);
605 if (!more_used(vq)) {
606 pr_debug("virtqueue interrupt with no work for %p\n", vq);
607 return IRQ_NONE;
610 if (unlikely(vq->broken))
611 return IRQ_HANDLED;
613 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
614 if (vq->vq.callback)
615 vq->vq.callback(&vq->vq);
617 return IRQ_HANDLED;
619 EXPORT_SYMBOL_GPL(vring_interrupt);
621 struct virtqueue *vring_new_virtqueue(unsigned int index,
622 unsigned int num,
623 unsigned int vring_align,
624 struct virtio_device *vdev,
625 bool weak_barriers,
626 void *pages,
627 void (*notify)(struct virtqueue *),
628 void (*callback)(struct virtqueue *),
629 const char *name)
631 struct vring_virtqueue *vq;
632 unsigned int i;
634 /* We assume num is a power of 2. */
635 if (num & (num - 1)) {
636 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
637 return NULL;
640 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
641 if (!vq)
642 return NULL;
644 vring_init(&vq->vring, num, pages, vring_align);
645 vq->vq.callback = callback;
646 vq->vq.vdev = vdev;
647 vq->vq.name = name;
648 vq->vq.num_free = num;
649 vq->vq.index = index;
650 vq->notify = notify;
651 vq->weak_barriers = weak_barriers;
652 vq->broken = false;
653 vq->last_used_idx = 0;
654 vq->num_added = 0;
655 list_add_tail(&vq->vq.list, &vdev->vqs);
656 #ifdef DEBUG
657 vq->in_use = false;
658 vq->last_add_time_valid = false;
659 #endif
661 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
662 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
664 /* No callback? Tell other side not to bother us. */
665 if (!callback)
666 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
668 /* Put everything in free lists. */
669 vq->free_head = 0;
670 for (i = 0; i < num-1; i++) {
671 vq->vring.desc[i].next = i+1;
672 vq->data[i] = NULL;
674 vq->data[i] = NULL;
676 return &vq->vq;
678 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
680 void vring_del_virtqueue(struct virtqueue *vq)
682 list_del(&vq->list);
683 kfree(to_vvq(vq));
685 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
687 /* Manipulates transport-specific feature bits. */
688 void vring_transport_features(struct virtio_device *vdev)
690 unsigned int i;
692 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
693 switch (i) {
694 case VIRTIO_RING_F_INDIRECT_DESC:
695 break;
696 case VIRTIO_RING_F_EVENT_IDX:
697 break;
698 default:
699 /* We don't understand this bit. */
700 clear_bit(i, vdev->features);
704 EXPORT_SYMBOL_GPL(vring_transport_features);
707 * virtqueue_get_vring_size - return the size of the virtqueue's vring
708 * @vq: the struct virtqueue containing the vring of interest.
710 * Returns the size of the vring. This is mainly used for boasting to
711 * userspace. Unlike other operations, this need not be serialized.
713 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
716 struct vring_virtqueue *vq = to_vvq(_vq);
718 return vq->vring.num;
720 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
722 MODULE_LICENSE("GPL");