hw/mips/jazz: Move the NIC init code into a separate function
[qemu/ar7.git] / linux-headers / linux / vhost.h
blobf5c48b61ab62244104bbf1b2100d3db7286f8c82
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 #ifndef _LINUX_VHOST_H
3 #define _LINUX_VHOST_H
4 /* Userspace interface for in-kernel virtio accelerators. */
6 /* vhost is used to reduce the number of system calls involved in virtio.
8 * Existing virtio net code is used in the guest without modification.
10 * This header includes interface used by userspace hypervisor for
11 * device configuration.
14 #include <linux/vhost_types.h>
15 #include <linux/types.h>
16 #include <linux/ioctl.h>
18 #define VHOST_FILE_UNBIND -1
20 /* ioctls */
22 #define VHOST_VIRTIO 0xAF
24 /* Features bitmask for forward compatibility. Transport bits are used for
25 * vhost specific features. */
26 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
27 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
29 /* Set current process as the (exclusive) owner of this file descriptor. This
30 * must be called before any other vhost command. Further calls to
31 * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */
32 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
33 /* Give up ownership, and reset the device to default values.
34 * Allows subsequent call to VHOST_OWNER_SET to succeed. */
35 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
37 /* Set up/modify memory layout */
38 #define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory)
40 /* Write logging setup. */
41 /* Memory writes can optionally be logged by setting bit at an offset
42 * (calculated from the physical address) from specified log base.
43 * The bit is set using an atomic 32 bit operation. */
44 /* Set base address for logging. */
45 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
46 /* Specify an eventfd file descriptor to signal on log write. */
47 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
48 /* By default, a device gets one vhost_worker that its virtqueues share. This
49 * command allows the owner of the device to create an additional vhost_worker
50 * for the device. It can later be bound to 1 or more of its virtqueues using
51 * the VHOST_ATTACH_VRING_WORKER command.
53 * This must be called after VHOST_SET_OWNER and the caller must be the owner
54 * of the device. The new thread will inherit caller's cgroups and namespaces,
55 * and will share the caller's memory space. The new thread will also be
56 * counted against the caller's RLIMIT_NPROC value.
58 * The worker's ID used in other commands will be returned in
59 * vhost_worker_state.
61 #define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state)
62 /* Free a worker created with VHOST_NEW_WORKER if it's not attached to any
63 * virtqueue. If userspace is not able to call this for workers its created,
64 * the kernel will free all the device's workers when the device is closed.
66 #define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state)
68 /* Ring setup. */
69 /* Set number of descriptors in ring. This parameter can not
70 * be modified while ring is running (bound to a device). */
71 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
72 /* Set addresses for the ring. */
73 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
74 /* Base value where queue looks for available descriptors */
75 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
76 /* Get accessor: reads index, writes value in num */
77 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
79 /* Set the vring byte order in num. Valid values are VHOST_VRING_LITTLE_ENDIAN
80 * or VHOST_VRING_BIG_ENDIAN (other values return -EINVAL).
81 * The byte order cannot be changed while the device is active: trying to do so
82 * returns -EBUSY.
83 * This is a legacy only API that is simply ignored when VIRTIO_F_VERSION_1 is
84 * set.
85 * Not all kernel configurations support this ioctl, but all configurations that
86 * support SET also support GET.
88 #define VHOST_VRING_LITTLE_ENDIAN 0
89 #define VHOST_VRING_BIG_ENDIAN 1
90 #define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
91 #define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
92 /* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
93 * virtqueues.
95 * This will replace the virtqueue's existing worker. If the replaced worker
96 * is no longer attached to any virtqueues, it can be freed with
97 * VHOST_FREE_WORKER.
99 #define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15, \
100 struct vhost_vring_worker)
101 /* Return the vring worker's ID */
102 #define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16, \
103 struct vhost_vring_worker)
105 /* The following ioctls use eventfd file descriptors to signal and poll
106 * for events. */
108 /* Set eventfd to poll for added buffers */
109 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
110 /* Set eventfd to signal when buffers have beed used */
111 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
112 /* Set eventfd to signal an error */
113 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
114 /* Set busy loop timeout (in us) */
115 #define VHOST_SET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x23, \
116 struct vhost_vring_state)
117 /* Get busy loop timeout (in us) */
118 #define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
119 struct vhost_vring_state)
121 /* Set or get vhost backend capability */
123 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
124 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
126 /* VHOST_NET specific defines */
128 /* Attach virtio net ring to a raw socket, or tap device.
129 * The socket must be already bound to an ethernet device, this device will be
130 * used for transmit. Pass fd -1 to unbind from the socket and the transmit
131 * device. This can be used to stop the ring (e.g. for migration). */
132 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
134 /* VHOST_SCSI specific defines */
136 #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
137 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
138 /* Changing this breaks userspace. */
139 #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
140 /* Set and get the events missed flag */
141 #define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
142 #define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
144 /* VHOST_VSOCK specific defines */
146 #define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64)
147 #define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int)
149 /* VHOST_VDPA specific defines */
151 /* Get the device id. The device ids follow the same definition of
152 * the device id defined in virtio-spec.
154 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
155 /* Get and set the status. The status bits follow the same definition
156 * of the device status defined in virtio-spec.
158 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
159 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
160 /* Get and set the device config. The device config follows the same
161 * definition of the device config defined in virtio-spec.
163 #define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, \
164 struct vhost_vdpa_config)
165 #define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, \
166 struct vhost_vdpa_config)
167 /* Enable/disable the ring. */
168 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, \
169 struct vhost_vring_state)
170 /* Get the max ring size. */
171 #define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16)
173 /* Set event fd for config interrupt*/
174 #define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
176 /* Get the valid iova range */
177 #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
178 struct vhost_vdpa_iova_range)
179 /* Get the config size */
180 #define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
182 /* Get the count of all virtqueues */
183 #define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
185 /* Get the number of virtqueue groups. */
186 #define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
188 /* Get the number of address spaces. */
189 #define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
191 /* Get the group for a virtqueue: read index, write group in num,
192 * The virtqueue index is stored in the index field of
193 * vhost_vring_state. The group for this specific virtqueue is
194 * returned via num field of vhost_vring_state.
196 #define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
197 struct vhost_vring_state)
198 /* Set the ASID for a virtqueue group. The group index is stored in
199 * the index field of vhost_vring_state, the ASID associated with this
200 * group is stored at num field of vhost_vring_state.
202 #define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
203 struct vhost_vring_state)
205 /* Suspend a device so it does not process virtqueue requests anymore
207 * After the return of ioctl the device must preserve all the necessary state
208 * (the virtqueue vring base plus the possible device specific states) that is
209 * required for restoring in the future. The device must not change its
210 * configuration after that point.
212 #define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
214 /* Resume a device so it can resume processing virtqueue requests
216 * After the return of this ioctl the device will have restored all the
217 * necessary states and it is fully operational to continue processing the
218 * virtqueue descriptors.
220 #define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
222 #endif