4 * Copyright (c) 2015 Red Hat, Inc.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "io/channel.h"
23 #include "qapi/error.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/module.h"
28 bool qio_channel_has_feature(QIOChannel
*ioc
,
29 QIOChannelFeature feature
)
31 return ioc
->features
& (1 << feature
);
35 void qio_channel_set_feature(QIOChannel
*ioc
,
36 QIOChannelFeature feature
)
38 ioc
->features
|= (1 << feature
);
42 void qio_channel_set_name(QIOChannel
*ioc
,
46 ioc
->name
= g_strdup(name
);
50 ssize_t
qio_channel_readv_full(QIOChannel
*ioc
,
51 const struct iovec
*iov
,
57 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
60 !qio_channel_has_feature(ioc
, QIO_CHANNEL_FEATURE_FD_PASS
)) {
61 error_setg_errno(errp
, EINVAL
,
62 "Channel does not support file descriptor passing");
66 return klass
->io_readv(ioc
, iov
, niov
, fds
, nfds
, errp
);
70 ssize_t
qio_channel_writev_full(QIOChannel
*ioc
,
71 const struct iovec
*iov
,
78 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
81 if (!qio_channel_has_feature(ioc
, QIO_CHANNEL_FEATURE_FD_PASS
)) {
82 error_setg_errno(errp
, EINVAL
,
83 "Channel does not support file descriptor passing");
86 if (flags
& QIO_CHANNEL_WRITE_FLAG_ZERO_COPY
) {
87 error_setg_errno(errp
, EINVAL
,
88 "Zero Copy does not support file descriptor passing");
93 if ((flags
& QIO_CHANNEL_WRITE_FLAG_ZERO_COPY
) &&
94 !qio_channel_has_feature(ioc
, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY
)) {
95 error_setg_errno(errp
, EINVAL
,
96 "Requested Zero Copy feature is not available");
100 return klass
->io_writev(ioc
, iov
, niov
, fds
, nfds
, flags
, errp
);
104 int qio_channel_readv_all_eof(QIOChannel
*ioc
,
105 const struct iovec
*iov
,
109 return qio_channel_readv_full_all_eof(ioc
, iov
, niov
, NULL
, NULL
, errp
);
112 int qio_channel_readv_all(QIOChannel
*ioc
,
113 const struct iovec
*iov
,
117 return qio_channel_readv_full_all(ioc
, iov
, niov
, NULL
, NULL
, errp
);
120 int qio_channel_readv_full_all_eof(QIOChannel
*ioc
,
121 const struct iovec
*iov
,
123 int **fds
, size_t *nfds
,
127 struct iovec
*local_iov
= g_new(struct iovec
, niov
);
128 struct iovec
*local_iov_head
= local_iov
;
129 unsigned int nlocal_iov
= niov
;
130 int **local_fds
= fds
;
131 size_t *local_nfds
= nfds
;
132 bool partial
= false;
142 nlocal_iov
= iov_copy(local_iov
, nlocal_iov
,
144 0, iov_size(iov
, niov
));
146 while ((nlocal_iov
> 0) || local_fds
) {
148 len
= qio_channel_readv_full(ioc
, local_iov
, nlocal_iov
, local_fds
,
150 if (len
== QIO_CHANNEL_ERR_BLOCK
) {
151 if (qemu_in_coroutine()) {
152 qio_channel_yield(ioc
, G_IO_IN
);
154 qio_channel_wait(ioc
, G_IO_IN
);
160 if (local_nfds
&& *local_nfds
) {
162 * Got some FDs, but no data yet. This isn't an EOF
163 * scenario (yet), so carry on to try to read data
164 * on next loop iteration
167 } else if (!partial
) {
168 /* No fds and no data - EOF before any data read */
174 "Unexpected end-of-file before all data were read");
175 /* Fallthrough into len < 0 handling */
180 /* Close any FDs we previously received */
183 for (i
= 0; i
< (*nfds
); i
++) {
194 iov_discard_front(&local_iov
, &nlocal_iov
, len
);
206 g_free(local_iov_head
);
210 int qio_channel_readv_full_all(QIOChannel
*ioc
,
211 const struct iovec
*iov
,
213 int **fds
, size_t *nfds
,
216 int ret
= qio_channel_readv_full_all_eof(ioc
, iov
, niov
, fds
, nfds
, errp
);
219 error_setg(errp
, "Unexpected end-of-file before all data were read");
229 int qio_channel_writev_all(QIOChannel
*ioc
,
230 const struct iovec
*iov
,
234 return qio_channel_writev_full_all(ioc
, iov
, niov
, NULL
, 0, 0, errp
);
237 int qio_channel_writev_full_all(QIOChannel
*ioc
,
238 const struct iovec
*iov
,
240 int *fds
, size_t nfds
,
241 int flags
, Error
**errp
)
244 struct iovec
*local_iov
= g_new(struct iovec
, niov
);
245 struct iovec
*local_iov_head
= local_iov
;
246 unsigned int nlocal_iov
= niov
;
248 nlocal_iov
= iov_copy(local_iov
, nlocal_iov
,
250 0, iov_size(iov
, niov
));
252 while (nlocal_iov
> 0) {
255 len
= qio_channel_writev_full(ioc
, local_iov
, nlocal_iov
, fds
,
258 if (len
== QIO_CHANNEL_ERR_BLOCK
) {
259 if (qemu_in_coroutine()) {
260 qio_channel_yield(ioc
, G_IO_OUT
);
262 qio_channel_wait(ioc
, G_IO_OUT
);
270 iov_discard_front(&local_iov
, &nlocal_iov
, len
);
278 g_free(local_iov_head
);
282 ssize_t
qio_channel_readv(QIOChannel
*ioc
,
283 const struct iovec
*iov
,
287 return qio_channel_readv_full(ioc
, iov
, niov
, NULL
, NULL
, errp
);
291 ssize_t
qio_channel_writev(QIOChannel
*ioc
,
292 const struct iovec
*iov
,
296 return qio_channel_writev_full(ioc
, iov
, niov
, NULL
, 0, 0, errp
);
300 ssize_t
qio_channel_read(QIOChannel
*ioc
,
305 struct iovec iov
= { .iov_base
= buf
, .iov_len
= buflen
};
306 return qio_channel_readv_full(ioc
, &iov
, 1, NULL
, NULL
, errp
);
310 ssize_t
qio_channel_write(QIOChannel
*ioc
,
315 struct iovec iov
= { .iov_base
= (char *)buf
, .iov_len
= buflen
};
316 return qio_channel_writev_full(ioc
, &iov
, 1, NULL
, 0, 0, errp
);
320 int qio_channel_read_all_eof(QIOChannel
*ioc
,
325 struct iovec iov
= { .iov_base
= buf
, .iov_len
= buflen
};
326 return qio_channel_readv_all_eof(ioc
, &iov
, 1, errp
);
330 int qio_channel_read_all(QIOChannel
*ioc
,
335 struct iovec iov
= { .iov_base
= buf
, .iov_len
= buflen
};
336 return qio_channel_readv_all(ioc
, &iov
, 1, errp
);
340 int qio_channel_write_all(QIOChannel
*ioc
,
345 struct iovec iov
= { .iov_base
= (char *)buf
, .iov_len
= buflen
};
346 return qio_channel_writev_all(ioc
, &iov
, 1, errp
);
350 int qio_channel_set_blocking(QIOChannel
*ioc
,
354 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
355 return klass
->io_set_blocking(ioc
, enabled
, errp
);
359 int qio_channel_close(QIOChannel
*ioc
,
362 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
363 return klass
->io_close(ioc
, errp
);
367 GSource
*qio_channel_create_watch(QIOChannel
*ioc
,
368 GIOCondition condition
)
370 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
371 GSource
*ret
= klass
->io_create_watch(ioc
, condition
);
374 g_source_set_name(ret
, ioc
->name
);
381 void qio_channel_set_aio_fd_handler(QIOChannel
*ioc
,
387 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
389 klass
->io_set_aio_fd_handler(ioc
, ctx
, io_read
, io_write
, opaque
);
392 guint
qio_channel_add_watch_full(QIOChannel
*ioc
,
393 GIOCondition condition
,
396 GDestroyNotify notify
,
397 GMainContext
*context
)
402 source
= qio_channel_create_watch(ioc
, condition
);
404 g_source_set_callback(source
, (GSourceFunc
)func
, user_data
, notify
);
406 id
= g_source_attach(source
, context
);
407 g_source_unref(source
);
412 guint
qio_channel_add_watch(QIOChannel
*ioc
,
413 GIOCondition condition
,
416 GDestroyNotify notify
)
418 return qio_channel_add_watch_full(ioc
, condition
, func
,
419 user_data
, notify
, NULL
);
422 GSource
*qio_channel_add_watch_source(QIOChannel
*ioc
,
423 GIOCondition condition
,
426 GDestroyNotify notify
,
427 GMainContext
*context
)
432 id
= qio_channel_add_watch_full(ioc
, condition
, func
,
433 user_data
, notify
, context
);
434 source
= g_main_context_find_source_by_id(context
, id
);
435 g_source_ref(source
);
440 int qio_channel_shutdown(QIOChannel
*ioc
,
441 QIOChannelShutdown how
,
444 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
446 if (!klass
->io_shutdown
) {
447 error_setg(errp
, "Data path shutdown not supported");
451 return klass
->io_shutdown(ioc
, how
, errp
);
455 void qio_channel_set_delay(QIOChannel
*ioc
,
458 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
460 if (klass
->io_set_delay
) {
461 klass
->io_set_delay(ioc
, enabled
);
466 void qio_channel_set_cork(QIOChannel
*ioc
,
469 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
471 if (klass
->io_set_cork
) {
472 klass
->io_set_cork(ioc
, enabled
);
477 off_t
qio_channel_io_seek(QIOChannel
*ioc
,
482 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
484 if (!klass
->io_seek
) {
485 error_setg(errp
, "Channel does not support random access");
489 return klass
->io_seek(ioc
, offset
, whence
, errp
);
492 int qio_channel_flush(QIOChannel
*ioc
,
495 QIOChannelClass
*klass
= QIO_CHANNEL_GET_CLASS(ioc
);
497 if (!klass
->io_flush
||
498 !qio_channel_has_feature(ioc
, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY
)) {
502 return klass
->io_flush(ioc
, errp
);
506 static void qio_channel_restart_read(void *opaque
)
508 QIOChannel
*ioc
= opaque
;
509 Coroutine
*co
= ioc
->read_coroutine
;
511 /* Assert that aio_co_wake() reenters the coroutine directly */
512 assert(qemu_get_current_aio_context() ==
513 qemu_coroutine_get_aio_context(co
));
517 static void qio_channel_restart_write(void *opaque
)
519 QIOChannel
*ioc
= opaque
;
520 Coroutine
*co
= ioc
->write_coroutine
;
522 /* Assert that aio_co_wake() reenters the coroutine directly */
523 assert(qemu_get_current_aio_context() ==
524 qemu_coroutine_get_aio_context(co
));
528 static void qio_channel_set_aio_fd_handlers(QIOChannel
*ioc
)
530 IOHandler
*rd_handler
= NULL
, *wr_handler
= NULL
;
533 if (ioc
->read_coroutine
) {
534 rd_handler
= qio_channel_restart_read
;
536 if (ioc
->write_coroutine
) {
537 wr_handler
= qio_channel_restart_write
;
540 ctx
= ioc
->ctx
? ioc
->ctx
: iohandler_get_aio_context();
541 qio_channel_set_aio_fd_handler(ioc
, ctx
, rd_handler
, wr_handler
, ioc
);
544 void qio_channel_attach_aio_context(QIOChannel
*ioc
,
547 assert(!ioc
->read_coroutine
);
548 assert(!ioc
->write_coroutine
);
552 void qio_channel_detach_aio_context(QIOChannel
*ioc
)
554 ioc
->read_coroutine
= NULL
;
555 ioc
->write_coroutine
= NULL
;
556 qio_channel_set_aio_fd_handlers(ioc
);
560 void coroutine_fn
qio_channel_yield(QIOChannel
*ioc
,
561 GIOCondition condition
)
563 assert(qemu_in_coroutine());
564 if (condition
== G_IO_IN
) {
565 assert(!ioc
->read_coroutine
);
566 ioc
->read_coroutine
= qemu_coroutine_self();
567 } else if (condition
== G_IO_OUT
) {
568 assert(!ioc
->write_coroutine
);
569 ioc
->write_coroutine
= qemu_coroutine_self();
573 qio_channel_set_aio_fd_handlers(ioc
);
574 qemu_coroutine_yield();
576 /* Allow interrupting the operation by reentering the coroutine other than
577 * through the aio_fd_handlers. */
578 if (condition
== G_IO_IN
&& ioc
->read_coroutine
) {
579 ioc
->read_coroutine
= NULL
;
580 qio_channel_set_aio_fd_handlers(ioc
);
581 } else if (condition
== G_IO_OUT
&& ioc
->write_coroutine
) {
582 ioc
->write_coroutine
= NULL
;
583 qio_channel_set_aio_fd_handlers(ioc
);
588 static gboolean
qio_channel_wait_complete(QIOChannel
*ioc
,
589 GIOCondition condition
,
592 GMainLoop
*loop
= opaque
;
594 g_main_loop_quit(loop
);
599 void qio_channel_wait(QIOChannel
*ioc
,
600 GIOCondition condition
)
602 GMainContext
*ctxt
= g_main_context_new();
603 GMainLoop
*loop
= g_main_loop_new(ctxt
, TRUE
);
606 source
= qio_channel_create_watch(ioc
, condition
);
608 g_source_set_callback(source
,
609 (GSourceFunc
)qio_channel_wait_complete
,
613 g_source_attach(source
, ctxt
);
615 g_main_loop_run(loop
);
617 g_source_unref(source
);
618 g_main_loop_unref(loop
);
619 g_main_context_unref(ctxt
);
623 static void qio_channel_finalize(Object
*obj
)
625 QIOChannel
*ioc
= QIO_CHANNEL(obj
);
631 CloseHandle(ioc
->event
);
636 static const TypeInfo qio_channel_info
= {
637 .parent
= TYPE_OBJECT
,
638 .name
= TYPE_QIO_CHANNEL
,
639 .instance_size
= sizeof(QIOChannel
),
640 .instance_finalize
= qio_channel_finalize
,
642 .class_size
= sizeof(QIOChannelClass
),
646 static void qio_channel_register_types(void)
648 type_register_static(&qio_channel_info
);
652 type_init(qio_channel_register_types
);