pcihp: compose PCNT callchain right before its user _GPE._E01
[qemu.git] / include / io / channel.h
blob78b15f787079eb2cbc83a355ee4d1ba82db49d36
1 /*
2 * QEMU I/O channels
4 * Copyright (c) 2015 Red Hat, Inc.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef QIO_CHANNEL_H
22 #define QIO_CHANNEL_H
24 #include "qom/object.h"
25 #include "qemu/coroutine-core.h"
26 #include "block/aio.h"
28 #define TYPE_QIO_CHANNEL "qio-channel"
29 OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass,
30 QIO_CHANNEL)
33 #define QIO_CHANNEL_ERR_BLOCK -2
35 #define QIO_CHANNEL_WRITE_FLAG_ZERO_COPY 0x1
37 typedef enum QIOChannelFeature QIOChannelFeature;
39 enum QIOChannelFeature {
40 QIO_CHANNEL_FEATURE_FD_PASS,
41 QIO_CHANNEL_FEATURE_SHUTDOWN,
42 QIO_CHANNEL_FEATURE_LISTEN,
43 QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY,
47 typedef enum QIOChannelShutdown QIOChannelShutdown;
49 enum QIOChannelShutdown {
50 QIO_CHANNEL_SHUTDOWN_READ = 1,
51 QIO_CHANNEL_SHUTDOWN_WRITE = 2,
52 QIO_CHANNEL_SHUTDOWN_BOTH = 3,
55 typedef gboolean (*QIOChannelFunc)(QIOChannel *ioc,
56 GIOCondition condition,
57 gpointer data);
59 /**
60 * QIOChannel:
62 * The QIOChannel defines the core API for a generic I/O channel
63 * class hierarchy. It is inspired by GIOChannel, but has the
64 * following differences
66 * - Use QOM to properly support arbitrary subclassing
67 * - Support use of iovecs for efficient I/O with multiple blocks
68 * - None of the character set translation, binary data exclusively
69 * - Direct support for QEMU Error object reporting
70 * - File descriptor passing
72 * This base class is abstract so cannot be instantiated. There
73 * will be subclasses for dealing with sockets, files, and higher
74 * level protocols such as TLS, WebSocket, etc.
77 struct QIOChannel {
78 Object parent;
79 unsigned int features; /* bitmask of QIOChannelFeatures */
80 char *name;
81 AioContext *ctx;
82 Coroutine *read_coroutine;
83 Coroutine *write_coroutine;
84 #ifdef _WIN32
85 HANDLE event; /* For use with GSource on Win32 */
86 #endif
89 /**
90 * QIOChannelClass:
92 * This class defines the contract that all subclasses
93 * must follow to provide specific channel implementations.
94 * The first five callbacks are mandatory to support, others
95 * provide additional optional features.
97 * Consult the corresponding public API docs for a description
98 * of the semantics of each callback. io_shutdown in particular
99 * must be thread-safe, terminate quickly and must not block.
101 struct QIOChannelClass {
102 ObjectClass parent;
104 /* Mandatory callbacks */
105 ssize_t (*io_writev)(QIOChannel *ioc,
106 const struct iovec *iov,
107 size_t niov,
108 int *fds,
109 size_t nfds,
110 int flags,
111 Error **errp);
112 ssize_t (*io_readv)(QIOChannel *ioc,
113 const struct iovec *iov,
114 size_t niov,
115 int **fds,
116 size_t *nfds,
117 Error **errp);
118 int (*io_close)(QIOChannel *ioc,
119 Error **errp);
120 GSource * (*io_create_watch)(QIOChannel *ioc,
121 GIOCondition condition);
122 int (*io_set_blocking)(QIOChannel *ioc,
123 bool enabled,
124 Error **errp);
126 /* Optional callbacks */
127 int (*io_shutdown)(QIOChannel *ioc,
128 QIOChannelShutdown how,
129 Error **errp);
130 void (*io_set_cork)(QIOChannel *ioc,
131 bool enabled);
132 void (*io_set_delay)(QIOChannel *ioc,
133 bool enabled);
134 off_t (*io_seek)(QIOChannel *ioc,
135 off_t offset,
136 int whence,
137 Error **errp);
138 void (*io_set_aio_fd_handler)(QIOChannel *ioc,
139 AioContext *ctx,
140 IOHandler *io_read,
141 IOHandler *io_write,
142 void *opaque);
143 int (*io_flush)(QIOChannel *ioc,
144 Error **errp);
147 /* General I/O handling functions */
150 * qio_channel_has_feature:
151 * @ioc: the channel object
152 * @feature: the feature to check support of
154 * Determine whether the channel implementation supports
155 * the optional feature named in @feature.
157 * Returns: true if supported, false otherwise.
159 bool qio_channel_has_feature(QIOChannel *ioc,
160 QIOChannelFeature feature);
163 * qio_channel_set_feature:
164 * @ioc: the channel object
165 * @feature: the feature to set support for
167 * Add channel support for the feature named in @feature.
169 void qio_channel_set_feature(QIOChannel *ioc,
170 QIOChannelFeature feature);
173 * qio_channel_set_name:
174 * @ioc: the channel object
175 * @name: the name of the channel
177 * Sets the name of the channel, which serves as an aid
178 * to debugging. The name is used when creating GSource
179 * watches for this channel.
181 void qio_channel_set_name(QIOChannel *ioc,
182 const char *name);
185 * qio_channel_readv_full:
186 * @ioc: the channel object
187 * @iov: the array of memory regions to read data into
188 * @niov: the length of the @iov array
189 * @fds: pointer to an array that will received file handles
190 * @nfds: pointer filled with number of elements in @fds on return
191 * @errp: pointer to a NULL-initialized error object
193 * Read data from the IO channel, storing it in the
194 * memory regions referenced by @iov. Each element
195 * in the @iov will be fully populated with data
196 * before the next one is used. The @niov parameter
197 * specifies the total number of elements in @iov.
199 * It is not required for all @iov to be filled with
200 * data. If the channel is in blocking mode, at least
201 * one byte of data will be read, but no more is
202 * guaranteed. If the channel is non-blocking and no
203 * data is available, it will return QIO_CHANNEL_ERR_BLOCK
205 * If the channel has passed any file descriptors,
206 * the @fds array pointer will be allocated and
207 * the elements filled with the received file
208 * descriptors. The @nfds pointer will be updated
209 * to indicate the size of the @fds array that
210 * was allocated. It is the callers responsibility
211 * to call close() on each file descriptor and to
212 * call g_free() on the array pointer in @fds.
214 * It is an error to pass a non-NULL @fds parameter
215 * unless qio_channel_has_feature() returns a true
216 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant.
218 * Returns: the number of bytes read, or -1 on error,
219 * or QIO_CHANNEL_ERR_BLOCK if no data is available
220 * and the channel is non-blocking
222 ssize_t qio_channel_readv_full(QIOChannel *ioc,
223 const struct iovec *iov,
224 size_t niov,
225 int **fds,
226 size_t *nfds,
227 Error **errp);
231 * qio_channel_writev_full:
232 * @ioc: the channel object
233 * @iov: the array of memory regions to write data from
234 * @niov: the length of the @iov array
235 * @fds: an array of file handles to send
236 * @nfds: number of file handles in @fds
237 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*)
238 * @errp: pointer to a NULL-initialized error object
240 * Write data to the IO channel, reading it from the
241 * memory regions referenced by @iov. Each element
242 * in the @iov will be fully sent, before the next
243 * one is used. The @niov parameter specifies the
244 * total number of elements in @iov.
246 * It is not required for all @iov data to be fully
247 * sent. If the channel is in blocking mode, at least
248 * one byte of data will be sent, but no more is
249 * guaranteed. If the channel is non-blocking and no
250 * data can be sent, it will return QIO_CHANNEL_ERR_BLOCK
252 * If there are file descriptors to send, the @fds
253 * array should be non-NULL and provide the handles.
254 * All file descriptors will be sent if at least one
255 * byte of data was sent.
257 * It is an error to pass a non-NULL @fds parameter
258 * unless qio_channel_has_feature() returns a true
259 * value for the QIO_CHANNEL_FEATURE_FD_PASS constant.
261 * Returns: the number of bytes sent, or -1 on error,
262 * or QIO_CHANNEL_ERR_BLOCK if no data is can be sent
263 * and the channel is non-blocking
265 ssize_t qio_channel_writev_full(QIOChannel *ioc,
266 const struct iovec *iov,
267 size_t niov,
268 int *fds,
269 size_t nfds,
270 int flags,
271 Error **errp);
274 * qio_channel_readv_all_eof:
275 * @ioc: the channel object
276 * @iov: the array of memory regions to read data into
277 * @niov: the length of the @iov array
278 * @errp: pointer to a NULL-initialized error object
280 * Read data from the IO channel, storing it in the
281 * memory regions referenced by @iov. Each element
282 * in the @iov will be fully populated with data
283 * before the next one is used. The @niov parameter
284 * specifies the total number of elements in @iov.
286 * The function will wait for all requested data
287 * to be read, yielding from the current coroutine
288 * if required.
290 * If end-of-file occurs before any data is read,
291 * no error is reported; otherwise, if it occurs
292 * before all requested data has been read, an error
293 * will be reported.
295 * Returns: 1 if all bytes were read, 0 if end-of-file
296 * occurs without data, or -1 on error
298 int qio_channel_readv_all_eof(QIOChannel *ioc,
299 const struct iovec *iov,
300 size_t niov,
301 Error **errp);
304 * qio_channel_readv_all:
305 * @ioc: the channel object
306 * @iov: the array of memory regions to read data into
307 * @niov: the length of the @iov array
308 * @errp: pointer to a NULL-initialized error object
310 * Read data from the IO channel, storing it in the
311 * memory regions referenced by @iov. Each element
312 * in the @iov will be fully populated with data
313 * before the next one is used. The @niov parameter
314 * specifies the total number of elements in @iov.
316 * The function will wait for all requested data
317 * to be read, yielding from the current coroutine
318 * if required.
320 * If end-of-file occurs before all requested data
321 * has been read, an error will be reported.
323 * Returns: 0 if all bytes were read, or -1 on error
325 int qio_channel_readv_all(QIOChannel *ioc,
326 const struct iovec *iov,
327 size_t niov,
328 Error **errp);
332 * qio_channel_writev_all:
333 * @ioc: the channel object
334 * @iov: the array of memory regions to write data from
335 * @niov: the length of the @iov array
336 * @errp: pointer to a NULL-initialized error object
338 * Write data to the IO channel, reading it from the
339 * memory regions referenced by @iov. Each element
340 * in the @iov will be fully sent, before the next
341 * one is used. The @niov parameter specifies the
342 * total number of elements in @iov.
344 * The function will wait for all requested data
345 * to be written, yielding from the current coroutine
346 * if required.
348 * Returns: 0 if all bytes were written, or -1 on error
350 int qio_channel_writev_all(QIOChannel *ioc,
351 const struct iovec *iov,
352 size_t niov,
353 Error **errp);
356 * qio_channel_readv:
357 * @ioc: the channel object
358 * @iov: the array of memory regions to read data into
359 * @niov: the length of the @iov array
360 * @errp: pointer to a NULL-initialized error object
362 * Behaves as qio_channel_readv_full() but does not support
363 * receiving of file handles.
365 ssize_t qio_channel_readv(QIOChannel *ioc,
366 const struct iovec *iov,
367 size_t niov,
368 Error **errp);
371 * qio_channel_writev:
372 * @ioc: the channel object
373 * @iov: the array of memory regions to write data from
374 * @niov: the length of the @iov array
375 * @errp: pointer to a NULL-initialized error object
377 * Behaves as qio_channel_writev_full() but does not support
378 * sending of file handles.
380 ssize_t qio_channel_writev(QIOChannel *ioc,
381 const struct iovec *iov,
382 size_t niov,
383 Error **errp);
386 * qio_channel_read:
387 * @ioc: the channel object
388 * @buf: the memory region to read data into
389 * @buflen: the length of @buf
390 * @errp: pointer to a NULL-initialized error object
392 * Behaves as qio_channel_readv_full() but does not support
393 * receiving of file handles, and only supports reading into
394 * a single memory region.
396 ssize_t qio_channel_read(QIOChannel *ioc,
397 char *buf,
398 size_t buflen,
399 Error **errp);
402 * qio_channel_write:
403 * @ioc: the channel object
404 * @buf: the memory regions to send data from
405 * @buflen: the length of @buf
406 * @errp: pointer to a NULL-initialized error object
408 * Behaves as qio_channel_writev_full() but does not support
409 * sending of file handles, and only supports writing from a
410 * single memory region.
412 ssize_t qio_channel_write(QIOChannel *ioc,
413 const char *buf,
414 size_t buflen,
415 Error **errp);
418 * qio_channel_read_all_eof:
419 * @ioc: the channel object
420 * @buf: the memory region to read data into
421 * @buflen: the number of bytes to @buf
422 * @errp: pointer to a NULL-initialized error object
424 * Reads @buflen bytes into @buf, possibly blocking or (if the
425 * channel is non-blocking) yielding from the current coroutine
426 * multiple times until the entire content is read. If end-of-file
427 * occurs immediately it is not an error, but if it occurs after
428 * data has been read it will return an error rather than a
429 * short-read. Otherwise behaves as qio_channel_read().
431 * Returns: 1 if all bytes were read, 0 if end-of-file occurs
432 * without data, or -1 on error
434 int qio_channel_read_all_eof(QIOChannel *ioc,
435 char *buf,
436 size_t buflen,
437 Error **errp);
440 * qio_channel_read_all:
441 * @ioc: the channel object
442 * @buf: the memory region to read data into
443 * @buflen: the number of bytes to @buf
444 * @errp: pointer to a NULL-initialized error object
446 * Reads @buflen bytes into @buf, possibly blocking or (if the
447 * channel is non-blocking) yielding from the current coroutine
448 * multiple times until the entire content is read. If end-of-file
449 * occurs it will return an error rather than a short-read. Otherwise
450 * behaves as qio_channel_read().
452 * Returns: 0 if all bytes were read, or -1 on error
454 int qio_channel_read_all(QIOChannel *ioc,
455 char *buf,
456 size_t buflen,
457 Error **errp);
460 * qio_channel_write_all:
461 * @ioc: the channel object
462 * @buf: the memory region to write data into
463 * @buflen: the number of bytes to @buf
464 * @errp: pointer to a NULL-initialized error object
466 * Writes @buflen bytes from @buf, possibly blocking or (if the
467 * channel is non-blocking) yielding from the current coroutine
468 * multiple times until the entire content is written. Otherwise
469 * behaves as qio_channel_write().
471 * Returns: 0 if all bytes were written, or -1 on error
473 int qio_channel_write_all(QIOChannel *ioc,
474 const char *buf,
475 size_t buflen,
476 Error **errp);
479 * qio_channel_set_blocking:
480 * @ioc: the channel object
481 * @enabled: the blocking flag state
482 * @errp: pointer to a NULL-initialized error object
484 * If @enabled is true, then the channel is put into
485 * blocking mode, otherwise it will be non-blocking.
487 * In non-blocking mode, read/write operations may
488 * return QIO_CHANNEL_ERR_BLOCK if they would otherwise
489 * block on I/O
491 int qio_channel_set_blocking(QIOChannel *ioc,
492 bool enabled,
493 Error **errp);
496 * qio_channel_close:
497 * @ioc: the channel object
498 * @errp: pointer to a NULL-initialized error object
500 * Close the channel, flushing any pending I/O
502 * Returns: 0 on success, -1 on error
504 int qio_channel_close(QIOChannel *ioc,
505 Error **errp);
508 * qio_channel_shutdown:
509 * @ioc: the channel object
510 * @how: the direction to shutdown
511 * @errp: pointer to a NULL-initialized error object
513 * Shutdowns transmission and/or receiving of data
514 * without closing the underlying transport.
516 * Not all implementations will support this facility,
517 * so may report an error. To avoid errors, the
518 * caller may check for the feature flag
519 * QIO_CHANNEL_FEATURE_SHUTDOWN prior to calling
520 * this method.
522 * This function is thread-safe, terminates quickly and does not block.
524 * Returns: 0 on success, -1 on error
526 int qio_channel_shutdown(QIOChannel *ioc,
527 QIOChannelShutdown how,
528 Error **errp);
531 * qio_channel_set_delay:
532 * @ioc: the channel object
533 * @enabled: the new flag state
535 * Controls whether the underlying transport is
536 * permitted to delay writes in order to merge
537 * small packets. If @enabled is true, then the
538 * writes may be delayed in order to opportunistically
539 * merge small packets into larger ones. If @enabled
540 * is false, writes are dispatched immediately with
541 * no delay.
543 * When @enabled is false, applications may wish to
544 * use the qio_channel_set_cork() method to explicitly
545 * control write merging.
547 * On channels which are backed by a socket, this
548 * API corresponds to the inverse of TCP_NODELAY flag,
549 * controlling whether the Nagle algorithm is active.
551 * This setting is merely a hint, so implementations are
552 * free to ignore this without it being considered an
553 * error.
555 void qio_channel_set_delay(QIOChannel *ioc,
556 bool enabled);
559 * qio_channel_set_cork:
560 * @ioc: the channel object
561 * @enabled: the new flag state
563 * Controls whether the underlying transport is
564 * permitted to dispatch data that is written.
565 * If @enabled is true, then any data written will
566 * be queued in local buffers until @enabled is
567 * set to false once again.
569 * This feature is typically used when the automatic
570 * write coalescing facility is disabled via the
571 * qio_channel_set_delay() method.
573 * On channels which are backed by a socket, this
574 * API corresponds to the TCP_CORK flag.
576 * This setting is merely a hint, so implementations are
577 * free to ignore this without it being considered an
578 * error.
580 void qio_channel_set_cork(QIOChannel *ioc,
581 bool enabled);
585 * qio_channel_seek:
586 * @ioc: the channel object
587 * @offset: the position to seek to, relative to @whence
588 * @whence: one of the (POSIX) SEEK_* constants listed below
589 * @errp: pointer to a NULL-initialized error object
591 * Moves the current I/O position within the channel
592 * @ioc, to be @offset. The value of @offset is
593 * interpreted relative to @whence:
595 * SEEK_SET - the position is set to @offset bytes
596 * SEEK_CUR - the position is moved by @offset bytes
597 * SEEK_END - the position is set to end of the file plus @offset bytes
599 * Not all implementations will support this facility,
600 * so may report an error.
602 * Returns: the new position on success, (off_t)-1 on failure
604 off_t qio_channel_io_seek(QIOChannel *ioc,
605 off_t offset,
606 int whence,
607 Error **errp);
611 * qio_channel_create_watch:
612 * @ioc: the channel object
613 * @condition: the I/O condition to monitor
615 * Create a new main loop source that is used to watch
616 * for the I/O condition @condition. Typically the
617 * qio_channel_add_watch() method would be used instead
618 * of this, since it directly attaches a callback to
619 * the source
621 * Returns: the new main loop source.
623 GSource *qio_channel_create_watch(QIOChannel *ioc,
624 GIOCondition condition);
627 * qio_channel_add_watch:
628 * @ioc: the channel object
629 * @condition: the I/O condition to monitor
630 * @func: callback to invoke when the source becomes ready
631 * @user_data: opaque data to pass to @func
632 * @notify: callback to free @user_data
634 * Create a new main loop source that is used to watch
635 * for the I/O condition @condition. The callback @func
636 * will be registered against the source, to be invoked
637 * when the source becomes ready. The optional @user_data
638 * will be passed to @func when it is invoked. The @notify
639 * callback will be used to free @user_data when the
640 * watch is deleted
642 * The returned source ID can be used with g_source_remove()
643 * to remove and free the source when no longer required.
644 * Alternatively the @func callback can return a FALSE
645 * value.
647 * Returns: the source ID
649 guint qio_channel_add_watch(QIOChannel *ioc,
650 GIOCondition condition,
651 QIOChannelFunc func,
652 gpointer user_data,
653 GDestroyNotify notify);
656 * qio_channel_add_watch_full:
657 * @ioc: the channel object
658 * @condition: the I/O condition to monitor
659 * @func: callback to invoke when the source becomes ready
660 * @user_data: opaque data to pass to @func
661 * @notify: callback to free @user_data
662 * @context: the context to run the watch source
664 * Similar as qio_channel_add_watch(), but allows to specify context
665 * to run the watch source.
667 * Returns: the source ID
669 guint qio_channel_add_watch_full(QIOChannel *ioc,
670 GIOCondition condition,
671 QIOChannelFunc func,
672 gpointer user_data,
673 GDestroyNotify notify,
674 GMainContext *context);
677 * qio_channel_add_watch_source:
678 * @ioc: the channel object
679 * @condition: the I/O condition to monitor
680 * @func: callback to invoke when the source becomes ready
681 * @user_data: opaque data to pass to @func
682 * @notify: callback to free @user_data
683 * @context: gcontext to bind the source to
685 * Similar as qio_channel_add_watch(), but allows to specify context
686 * to run the watch source, meanwhile return the GSource object
687 * instead of tag ID, with the GSource referenced already.
689 * Note: callers is responsible to unref the source when not needed.
691 * Returns: the source pointer
693 GSource *qio_channel_add_watch_source(QIOChannel *ioc,
694 GIOCondition condition,
695 QIOChannelFunc func,
696 gpointer user_data,
697 GDestroyNotify notify,
698 GMainContext *context);
701 * qio_channel_attach_aio_context:
702 * @ioc: the channel object
703 * @ctx: the #AioContext to set the handlers on
705 * Request that qio_channel_yield() sets I/O handlers on
706 * the given #AioContext. If @ctx is %NULL, qio_channel_yield()
707 * uses QEMU's main thread event loop.
709 * You can move a #QIOChannel from one #AioContext to another even if
710 * I/O handlers are set for a coroutine. However, #QIOChannel provides
711 * no synchronization between the calls to qio_channel_yield() and
712 * qio_channel_attach_aio_context().
714 * Therefore you should first call qio_channel_detach_aio_context()
715 * to ensure that the coroutine is not entered concurrently. Then,
716 * while the coroutine has yielded, call qio_channel_attach_aio_context(),
717 * and then aio_co_schedule() to place the coroutine on the new
718 * #AioContext. The calls to qio_channel_detach_aio_context()
719 * and qio_channel_attach_aio_context() should be protected with
720 * aio_context_acquire() and aio_context_release().
722 void qio_channel_attach_aio_context(QIOChannel *ioc,
723 AioContext *ctx);
726 * qio_channel_detach_aio_context:
727 * @ioc: the channel object
729 * Disable any I/O handlers set by qio_channel_yield(). With the
730 * help of aio_co_schedule(), this allows moving a coroutine that was
731 * paused by qio_channel_yield() to another context.
733 void qio_channel_detach_aio_context(QIOChannel *ioc);
736 * qio_channel_yield:
737 * @ioc: the channel object
738 * @condition: the I/O condition to wait for
740 * Yields execution from the current coroutine until the condition
741 * indicated by @condition becomes available. @condition must
742 * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In
743 * addition, no two coroutine can be waiting on the same condition
744 * and channel at the same time.
746 * This must only be called from coroutine context. It is safe to
747 * reenter the coroutine externally while it is waiting; in this
748 * case the function will return even if @condition is not yet
749 * available.
751 void coroutine_fn qio_channel_yield(QIOChannel *ioc,
752 GIOCondition condition);
755 * qio_channel_wait:
756 * @ioc: the channel object
757 * @condition: the I/O condition to wait for
759 * Block execution from the current thread until
760 * the condition indicated by @condition becomes
761 * available.
763 * This will enter a nested event loop to perform
764 * the wait.
766 void qio_channel_wait(QIOChannel *ioc,
767 GIOCondition condition);
770 * qio_channel_set_aio_fd_handler:
771 * @ioc: the channel object
772 * @ctx: the AioContext to set the handlers on
773 * @io_read: the read handler
774 * @io_write: the write handler
775 * @opaque: the opaque value passed to the handler
777 * This is used internally by qio_channel_yield(). It can
778 * be used by channel implementations to forward the handlers
779 * to another channel (e.g. from #QIOChannelTLS to the
780 * underlying socket).
782 void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
783 AioContext *ctx,
784 IOHandler *io_read,
785 IOHandler *io_write,
786 void *opaque);
789 * qio_channel_readv_full_all_eof:
790 * @ioc: the channel object
791 * @iov: the array of memory regions to read data to
792 * @niov: the length of the @iov array
793 * @fds: an array of file handles to read
794 * @nfds: number of file handles in @fds
795 * @errp: pointer to a NULL-initialized error object
798 * Performs same function as qio_channel_readv_all_eof.
799 * Additionally, attempts to read file descriptors shared
800 * over the channel. The function will wait for all
801 * requested data to be read, yielding from the current
802 * coroutine if required. data refers to both file
803 * descriptors and the iovs.
805 * Returns: 1 if all bytes were read, 0 if end-of-file
806 * occurs without data, or -1 on error
809 int qio_channel_readv_full_all_eof(QIOChannel *ioc,
810 const struct iovec *iov,
811 size_t niov,
812 int **fds, size_t *nfds,
813 Error **errp);
816 * qio_channel_readv_full_all:
817 * @ioc: the channel object
818 * @iov: the array of memory regions to read data to
819 * @niov: the length of the @iov array
820 * @fds: an array of file handles to read
821 * @nfds: number of file handles in @fds
822 * @errp: pointer to a NULL-initialized error object
825 * Performs same function as qio_channel_readv_all_eof.
826 * Additionally, attempts to read file descriptors shared
827 * over the channel. The function will wait for all
828 * requested data to be read, yielding from the current
829 * coroutine if required. data refers to both file
830 * descriptors and the iovs.
832 * Returns: 0 if all bytes were read, or -1 on error
835 int qio_channel_readv_full_all(QIOChannel *ioc,
836 const struct iovec *iov,
837 size_t niov,
838 int **fds, size_t *nfds,
839 Error **errp);
842 * qio_channel_writev_full_all:
843 * @ioc: the channel object
844 * @iov: the array of memory regions to write data from
845 * @niov: the length of the @iov array
846 * @fds: an array of file handles to send
847 * @nfds: number of file handles in @fds
848 * @flags: write flags (QIO_CHANNEL_WRITE_FLAG_*)
849 * @errp: pointer to a NULL-initialized error object
852 * Behaves like qio_channel_writev_full but will attempt
853 * to send all data passed (file handles and memory regions).
854 * The function will wait for all requested data
855 * to be written, yielding from the current coroutine
856 * if required.
858 * If QIO_CHANNEL_WRITE_FLAG_ZERO_COPY is passed in flags,
859 * instead of waiting for all requested data to be written,
860 * this function will wait until it's all queued for writing.
861 * In this case, if the buffer gets changed between queueing and
862 * sending, the updated buffer will be sent. If this is not a
863 * desired behavior, it's suggested to call qio_channel_flush()
864 * before reusing the buffer.
866 * Returns: 0 if all bytes were written, or -1 on error
869 int qio_channel_writev_full_all(QIOChannel *ioc,
870 const struct iovec *iov,
871 size_t niov,
872 int *fds, size_t nfds,
873 int flags, Error **errp);
876 * qio_channel_flush:
877 * @ioc: the channel object
878 * @errp: pointer to a NULL-initialized error object
880 * Will block until every packet queued with
881 * qio_channel_writev_full() + QIO_CHANNEL_WRITE_FLAG_ZERO_COPY
882 * is sent, or return in case of any error.
884 * If not implemented, acts as a no-op, and returns 0.
886 * Returns -1 if any error is found,
887 * 1 if every send failed to use zero copy.
888 * 0 otherwise.
891 int qio_channel_flush(QIOChannel *ioc,
892 Error **errp);
894 #endif /* QIO_CHANNEL_H */