qapi: Remove QMP events and commands from user-mode builds
[qemu/ar7.git] / include / qemu / iov.h
blob93307466809b6faf7727128edb2b64fa77fbacf2
1 /*
2 * Helpers for using (partial) iovecs.
4 * Copyright (C) 2010 Red Hat, Inc.
6 * Author(s):
7 * Amit Shah <amit.shah@redhat.com>
8 * Michael Tokarev <mjt@tls.msk.ru>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
14 #ifndef IOV_H
15 #define IOV_H
17 /**
18 * count and return data size, in bytes, of an iovec
19 * starting at `iov' of `iov_cnt' number of elements.
21 size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt);
23 /**
24 * Copy from single continuous buffer to scatter-gather vector of buffers
25 * (iovec) and back like memcpy() between two continuous memory regions.
26 * Data in single continuous buffer starting at address `buf' and
27 * `bytes' bytes long will be copied to/from an iovec `iov' with
28 * `iov_cnt' number of elements, starting at byte position `offset'
29 * within the iovec. If the iovec does not contain enough space,
30 * only part of data will be copied, up to the end of the iovec.
31 * Number of bytes actually copied will be returned, which is
32 * min(bytes, iov_size(iov)-offset)
33 * `Offset' must point to the inside of iovec.
35 size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
36 size_t offset, const void *buf, size_t bytes);
37 size_t iov_to_buf_full(const struct iovec *iov, const unsigned int iov_cnt,
38 size_t offset, void *buf, size_t bytes);
40 static inline size_t
41 iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
42 size_t offset, const void *buf, size_t bytes)
44 if (__builtin_constant_p(bytes) && iov_cnt &&
45 offset <= iov[0].iov_len && bytes <= iov[0].iov_len - offset) {
46 memcpy(iov[0].iov_base + offset, buf, bytes);
47 return bytes;
48 } else {
49 return iov_from_buf_full(iov, iov_cnt, offset, buf, bytes);
53 static inline size_t
54 iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
55 size_t offset, void *buf, size_t bytes)
57 if (__builtin_constant_p(bytes) && iov_cnt &&
58 offset <= iov[0].iov_len && bytes <= iov[0].iov_len - offset) {
59 memcpy(buf, iov[0].iov_base + offset, bytes);
60 return bytes;
61 } else {
62 return iov_to_buf_full(iov, iov_cnt, offset, buf, bytes);
66 /**
67 * Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements,
68 * starting at byte offset `start', to value `fillc', repeating it
69 * `bytes' number of times. `Offset' must point to the inside of iovec.
70 * If `bytes' is large enough, only last bytes portion of iovec,
71 * up to the end of it, will be filled with the specified value.
72 * Function return actual number of bytes processed, which is
73 * min(size, iov_size(iov) - offset).
75 size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
76 size_t offset, int fillc, size_t bytes);
79 * Send/recv data from/to iovec buffers directly
81 * `offset' bytes in the beginning of iovec buffer are skipped and
82 * next `bytes' bytes are used, which must be within data of iovec.
84 * r = iov_send_recv(sockfd, iov, iovcnt, offset, bytes, true);
86 * is logically equivalent to
88 * char *buf = malloc(bytes);
89 * iov_to_buf(iov, iovcnt, offset, buf, bytes);
90 * r = send(sockfd, buf, bytes, 0);
91 * free(buf);
93 * For iov_send_recv() _whole_ area being sent or received
94 * should be within the iovec, not only beginning of it.
96 ssize_t iov_send_recv(int sockfd, const struct iovec *iov, unsigned iov_cnt,
97 size_t offset, size_t bytes, bool do_send);
98 #define iov_recv(sockfd, iov, iov_cnt, offset, bytes) \
99 iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, false)
100 #define iov_send(sockfd, iov, iov_cnt, offset, bytes) \
101 iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, true)
104 * Produce a text hexdump of iovec `iov' with `iov_cnt' number of elements
105 * in file `fp', prefixing each line with `prefix' and processing not more
106 * than `limit' data bytes.
108 void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt,
109 FILE *fp, const char *prefix, size_t limit);
112 * Partial copy of vector from iov to dst_iov (data is not copied).
113 * dst_iov overlaps iov at a specified offset.
114 * size of dst_iov is at most bytes. dst vector count is returned.
116 unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt,
117 const struct iovec *iov, unsigned int iov_cnt,
118 size_t offset, size_t bytes);
121 * Remove a given number of bytes from the front or back of a vector.
122 * This may update iov and/or iov_cnt to exclude iovec elements that are
123 * no longer required.
125 * The number of bytes actually discarded is returned. This number may be
126 * smaller than requested if the vector is too small.
128 size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
129 size_t bytes);
130 size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
131 size_t bytes);
133 /* Information needed to undo an iov_discard_*() operation */
134 typedef struct {
135 struct iovec *modified_iov;
136 struct iovec orig;
137 } IOVDiscardUndo;
140 * Undo an iov_discard_front_undoable() or iov_discard_back_undoable()
141 * operation. If multiple operations are made then each one needs a separate
142 * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order
143 * that the operations were made.
145 void iov_discard_undo(IOVDiscardUndo *undo);
148 * Undoable versions of iov_discard_front() and iov_discard_back(). Use
149 * iov_discard_undo() to reset to the state before the discard operations.
151 size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt,
152 size_t bytes, IOVDiscardUndo *undo);
153 size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt,
154 size_t bytes, IOVDiscardUndo *undo);
156 typedef struct QEMUIOVector {
157 struct iovec *iov;
158 int niov;
161 * For external @iov (qemu_iovec_init_external()) or allocated @iov
162 * (qemu_iovec_init()), @size is the cumulative size of iovecs and
163 * @local_iov is invalid and unused.
165 * For embedded @iov (QEMU_IOVEC_INIT_BUF() or qemu_iovec_init_buf()),
166 * @iov is equal to &@local_iov, and @size is valid, as it has same
167 * offset and type as @local_iov.iov_len, which is guaranteed by
168 * static assertion below.
170 * @nalloc is always valid and is -1 both for embedded and external
171 * cases. It is included in the union only to ensure the padding prior
172 * to the @size field will not result in a 0-length array.
174 union {
175 struct {
176 int nalloc;
177 struct iovec local_iov;
179 struct {
180 char __pad[sizeof(int) + offsetof(struct iovec, iov_len)];
181 size_t size;
184 } QEMUIOVector;
186 QEMU_BUILD_BUG_ON(offsetof(QEMUIOVector, size) !=
187 offsetof(QEMUIOVector, local_iov.iov_len));
189 #define QEMU_IOVEC_INIT_BUF(self, buf, len) \
191 .iov = &(self).local_iov, \
192 .niov = 1, \
193 .nalloc = -1, \
194 .local_iov = { \
195 .iov_base = (void *)(buf), /* cast away const */ \
196 .iov_len = (len), \
197 }, \
201 * qemu_iovec_init_buf
203 * Initialize embedded QEMUIOVector.
205 * Note: "const" is used over @buf pointer to make it simple to pass
206 * const pointers, appearing in read functions. Then this "const" is
207 * cast away by QEMU_IOVEC_INIT_BUF().
209 static inline void qemu_iovec_init_buf(QEMUIOVector *qiov,
210 const void *buf, size_t len)
212 *qiov = (QEMUIOVector) QEMU_IOVEC_INIT_BUF(*qiov, buf, len);
215 static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
217 /* Only supports embedded iov */
218 assert(qiov->nalloc == -1 && qiov->iov == &qiov->local_iov);
220 return qiov->local_iov.iov_base;
223 void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
224 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
225 int qemu_iovec_init_extended(
226 QEMUIOVector *qiov,
227 void *head_buf, size_t head_len,
228 QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len,
229 void *tail_buf, size_t tail_len);
230 void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source,
231 size_t offset, size_t len);
232 int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len);
233 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
234 void qemu_iovec_concat(QEMUIOVector *dst,
235 QEMUIOVector *src, size_t soffset, size_t sbytes);
236 size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
237 struct iovec *src_iov, unsigned int src_cnt,
238 size_t soffset, size_t sbytes);
239 bool qemu_iovec_is_zero(QEMUIOVector *qiov, size_t qiov_offeset, size_t bytes);
240 void qemu_iovec_destroy(QEMUIOVector *qiov);
241 void qemu_iovec_reset(QEMUIOVector *qiov);
242 size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
243 void *buf, size_t bytes);
244 size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
245 const void *buf, size_t bytes);
246 size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
247 int fillc, size_t bytes);
248 ssize_t qemu_iovec_compare(QEMUIOVector *a, QEMUIOVector *b);
249 void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf);
250 void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes);
252 #endif