pc: acpi: mark all possible CPUs as enabled in SRAT
[qemu.git] / linux-headers / linux / virtio_ring.h
blob1b333e25363a5c7414440cae97d734ff1fcd1613
1 #ifndef _LINUX_VIRTIO_RING_H
2 #define _LINUX_VIRTIO_RING_H
3 /* An interface for efficient virtio implementation, currently for use by KVM
4 * and lguest, but hopefully others soon. Do NOT change this since it will
5 * break existing servers and clients.
7 * This header is BSD licensed so anyone can use the definitions to implement
8 * compatible drivers/servers.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of IBM nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * Copyright Rusty Russell IBM Corporation 2007. */
34 #include <linux/types.h>
36 /* This marks a buffer as continuing via the next field. */
37 #define VRING_DESC_F_NEXT 1
38 /* This marks a buffer as write-only (otherwise read-only). */
39 #define VRING_DESC_F_WRITE 2
40 /* This means the buffer contains a list of buffer descriptors. */
41 #define VRING_DESC_F_INDIRECT 4
43 /* The Host uses this in used->flags to advise the Guest: don't kick me when
44 * you add a buffer. It's unreliable, so it's simply an optimization. Guest
45 * will still kick if it's out of buffers. */
46 #define VRING_USED_F_NO_NOTIFY 1
47 /* The Guest uses this in avail->flags to advise the Host: don't interrupt me
48 * when you consume a buffer. It's unreliable, so it's simply an
49 * optimization. */
50 #define VRING_AVAIL_F_NO_INTERRUPT 1
52 /* We support indirect buffer descriptors */
53 #define VIRTIO_RING_F_INDIRECT_DESC 28
55 /* The Guest publishes the used index for which it expects an interrupt
56 * at the end of the avail ring. Host should ignore the avail->flags field. */
57 /* The Host publishes the avail index for which it expects a kick
58 * at the end of the used ring. Guest should ignore the used->flags field. */
59 #define VIRTIO_RING_F_EVENT_IDX 29
61 /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
62 struct vring_desc {
63 /* Address (guest-physical). */
64 __u64 addr;
65 /* Length. */
66 __u32 len;
67 /* The flags as indicated above. */
68 __u16 flags;
69 /* We chain unused descriptors via this, too */
70 __u16 next;
73 struct vring_avail {
74 __u16 flags;
75 __u16 idx;
76 __u16 ring[];
79 /* u32 is used here for ids for padding reasons. */
80 struct vring_used_elem {
81 /* Index of start of used descriptor chain. */
82 __u32 id;
83 /* Total length of the descriptor chain which was used (written to) */
84 __u32 len;
87 struct vring_used {
88 __u16 flags;
89 __u16 idx;
90 struct vring_used_elem ring[];
93 struct vring {
94 unsigned int num;
96 struct vring_desc *desc;
98 struct vring_avail *avail;
100 struct vring_used *used;
103 /* The standard layout for the ring is a continuous chunk of memory which looks
104 * like this. We assume num is a power of 2.
106 * struct vring
108 * // The actual descriptors (16 bytes each)
109 * struct vring_desc desc[num];
111 * // A ring of available descriptor heads with free-running index.
112 * __u16 avail_flags;
113 * __u16 avail_idx;
114 * __u16 available[num];
115 * __u16 used_event_idx;
117 * // Padding to the next align boundary.
118 * char pad[];
120 * // A ring of used descriptor heads with free-running index.
121 * __u16 used_flags;
122 * __u16 used_idx;
123 * struct vring_used_elem used[num];
124 * __u16 avail_event_idx;
125 * };
127 /* We publish the used event index at the end of the available ring, and vice
128 * versa. They are at the end for backwards compatibility. */
129 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
130 #define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
132 static __inline__ void vring_init(struct vring *vr, unsigned int num, void *p,
133 unsigned long align)
135 vr->num = num;
136 vr->desc = p;
137 vr->avail = p + num*sizeof(struct vring_desc);
138 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16)
139 + align-1) & ~(align - 1));
142 static __inline__ unsigned vring_size(unsigned int num, unsigned long align)
144 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
145 + align - 1) & ~(align - 1))
146 + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
149 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
150 /* Assuming a given event_idx value from the other size, if
151 * we have just incremented index from old to new_idx,
152 * should we trigger an event? */
153 static __inline__ int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
155 /* Note: Xen has similar logic for notification hold-off
156 * in include/xen/interface/io/ring.h with req_event and req_prod
157 * corresponding to event_idx + 1 and new_idx respectively.
158 * Note also that req_event and req_prod in Xen start at 1,
159 * event indexes in virtio start at 0. */
160 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
163 #endif /* _LINUX_VIRTIO_RING_H */