4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
25 #include "qemu/madvise.h"
26 #include "qemu/error-report.h"
28 #include "migration.h"
29 #include "migration-stats.h"
30 #include "qemu-file.h"
33 #include "qapi/error.h"
35 #include "io/channel-file.h"
37 #define IO_BUF_SIZE 32768
38 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
45 int buf_size
; /* 0 when writing */
46 uint8_t buf
[IO_BUF_SIZE
];
48 DECLARE_BITMAP(may_free
, MAX_IOV_SIZE
);
49 struct iovec iov
[MAX_IOV_SIZE
];
53 Error
*last_error_obj
;
57 * Stop a file from being read/written - not all backing files can do this
58 * typically only sockets can.
60 * TODO: convert to propagate Error objects instead of squashing
61 * to a fixed errno value
63 int qemu_file_shutdown(QEMUFile
*f
)
68 * We must set qemufile error before the real shutdown(), otherwise
69 * there can be a race window where we thought IO all went though
70 * (because last_error==NULL) but actually IO has already stopped.
72 * If without correct ordering, the race can happen like this:
74 * page receiver other thread
75 * ------------- ------------
78 * returns 0 (buffer all zero)
79 * (we didn't check this retcode)
80 * try to detect IO error
81 * last_error==NULL, IO okay
82 * install ALL-ZERO page
87 qemu_file_set_error(f
, -EIO
);
90 if (!qio_channel_has_feature(f
->ioc
,
91 QIO_CHANNEL_FEATURE_SHUTDOWN
)) {
95 if (qio_channel_shutdown(f
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, &err
) < 0) {
96 error_report_err(err
);
103 static QEMUFile
*qemu_file_new_impl(QIOChannel
*ioc
, bool is_writable
)
107 f
= g_new0(QEMUFile
, 1);
111 f
->is_writable
= is_writable
;
117 * Result: QEMUFile* for a 'return path' for comms in the opposite direction
118 * NULL if not available
120 QEMUFile
*qemu_file_get_return_path(QEMUFile
*f
)
122 return qemu_file_new_impl(f
->ioc
, !f
->is_writable
);
125 QEMUFile
*qemu_file_new_output(QIOChannel
*ioc
)
127 return qemu_file_new_impl(ioc
, true);
130 QEMUFile
*qemu_file_new_input(QIOChannel
*ioc
)
132 return qemu_file_new_impl(ioc
, false);
136 * Get last error for stream f with optional Error*
138 * Return negative error value if there has been an error on previous
139 * operations, return 0 if no error happened.
141 * If errp is specified, a verbose error message will be copied over.
143 int qemu_file_get_error_obj(QEMUFile
*f
, Error
**errp
)
145 if (!f
->last_error
) {
149 /* There is an error */
151 if (f
->last_error_obj
) {
152 *errp
= error_copy(f
->last_error_obj
);
154 error_setg_errno(errp
, -f
->last_error
, "Channel error");
158 return f
->last_error
;
162 * Get last error for either stream f1 or f2 with optional Error*.
163 * The error returned (non-zero) can be either from f1 or f2.
165 * If any of the qemufile* is NULL, then skip the check on that file.
167 * When there is no error on both qemufile, zero is returned.
169 int qemu_file_get_error_obj_any(QEMUFile
*f1
, QEMUFile
*f2
, Error
**errp
)
174 ret
= qemu_file_get_error_obj(f1
, errp
);
175 /* If there's already error detected, return */
182 ret
= qemu_file_get_error_obj(f2
, errp
);
189 * Set the last error for stream f with optional Error*
191 void qemu_file_set_error_obj(QEMUFile
*f
, int ret
, Error
*err
)
193 if (f
->last_error
== 0 && ret
) {
195 error_propagate(&f
->last_error_obj
, err
);
197 error_report_err(err
);
202 * Get last error for stream f
204 * Return negative error value if there has been an error on previous
205 * operations, return 0 if no error happened.
208 int qemu_file_get_error(QEMUFile
*f
)
210 return f
->last_error
;
214 * Set the last error for stream f
216 void qemu_file_set_error(QEMUFile
*f
, int ret
)
218 qemu_file_set_error_obj(f
, ret
, NULL
);
221 static bool qemu_file_is_writable(QEMUFile
*f
)
223 return f
->is_writable
;
226 static void qemu_iovec_release_ram(QEMUFile
*f
)
231 /* Find and release all the contiguous memory ranges marked as may_free. */
232 idx
= find_next_bit(f
->may_free
, f
->iovcnt
, 0);
233 if (idx
>= f
->iovcnt
) {
238 /* The madvise() in the loop is called for iov within a continuous range and
239 * then reinitialize the iov. And in the end, madvise() is called for the
242 while ((idx
= find_next_bit(f
->may_free
, f
->iovcnt
, idx
+ 1)) < f
->iovcnt
) {
243 /* check for adjacent buffer and coalesce them */
244 if (iov
.iov_base
+ iov
.iov_len
== f
->iov
[idx
].iov_base
) {
245 iov
.iov_len
+= f
->iov
[idx
].iov_len
;
248 if (qemu_madvise(iov
.iov_base
, iov
.iov_len
, QEMU_MADV_DONTNEED
) < 0) {
249 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
250 iov
.iov_base
, iov
.iov_len
, strerror(errno
));
254 if (qemu_madvise(iov
.iov_base
, iov
.iov_len
, QEMU_MADV_DONTNEED
) < 0) {
255 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
256 iov
.iov_base
, iov
.iov_len
, strerror(errno
));
258 memset(f
->may_free
, 0, sizeof(f
->may_free
));
261 bool qemu_file_is_seekable(QEMUFile
*f
)
263 return qio_channel_has_feature(f
->ioc
, QIO_CHANNEL_FEATURE_SEEKABLE
);
267 * Flushes QEMUFile buffer
269 * This will flush all pending data. If data was only partially flushed, it
270 * will set an error state.
272 int qemu_fflush(QEMUFile
*f
)
274 if (!qemu_file_is_writable(f
)) {
275 return f
->last_error
;
279 return f
->last_error
;
282 Error
*local_error
= NULL
;
283 if (qio_channel_writev_all(f
->ioc
,
286 qemu_file_set_error_obj(f
, -EIO
, local_error
);
288 uint64_t size
= iov_size(f
->iov
, f
->iovcnt
);
289 stat64_add(&mig_stats
.qemu_file_transferred
, size
);
292 qemu_iovec_release_ram(f
);
297 return f
->last_error
;
301 * Attempt to fill the buffer from the underlying file
302 * Returns the number of bytes read, or negative value for an error.
304 * Note that it can return a partially full buffer even in a not error/not EOF
305 * case if the underlying file descriptor gives a short read, and that can
306 * happen even on a blocking fd.
308 static ssize_t coroutine_mixed_fn
qemu_fill_buffer(QEMUFile
*f
)
312 Error
*local_error
= NULL
;
314 assert(!qemu_file_is_writable(f
));
316 pending
= f
->buf_size
- f
->buf_index
;
318 memmove(f
->buf
, f
->buf
+ f
->buf_index
, pending
);
321 f
->buf_size
= pending
;
323 if (qemu_file_get_error(f
)) {
328 len
= qio_channel_read(f
->ioc
,
329 (char *)f
->buf
+ pending
,
330 IO_BUF_SIZE
- pending
,
332 if (len
== QIO_CHANNEL_ERR_BLOCK
) {
333 if (qemu_in_coroutine()) {
334 qio_channel_yield(f
->ioc
, G_IO_IN
);
336 qio_channel_wait(f
->ioc
, G_IO_IN
);
338 } else if (len
< 0) {
341 } while (len
== QIO_CHANNEL_ERR_BLOCK
);
345 } else if (len
== 0) {
346 qemu_file_set_error_obj(f
, -EIO
, local_error
);
348 qemu_file_set_error_obj(f
, len
, local_error
);
356 * Returns negative error value if any error happened on previous operations or
357 * while closing the file. Returns 0 or positive number on success.
359 * The meaning of return value on success depends on the specific backend
362 int qemu_fclose(QEMUFile
*f
)
364 int ret
= qemu_fflush(f
);
365 int ret2
= qio_channel_close(f
->ioc
, NULL
);
369 g_clear_pointer(&f
->ioc
, object_unref
);
370 error_free(f
->last_error_obj
);
372 trace_qemu_file_fclose();
377 * Add buf to iovec. Do flush if iovec is full.
380 * 1 iovec is full and flushed
381 * 0 iovec is not flushed
384 static int add_to_iovec(QEMUFile
*f
, const uint8_t *buf
, size_t size
,
387 /* check for adjacent buffer and coalesce them */
388 if (f
->iovcnt
> 0 && buf
== f
->iov
[f
->iovcnt
- 1].iov_base
+
389 f
->iov
[f
->iovcnt
- 1].iov_len
&&
390 may_free
== test_bit(f
->iovcnt
- 1, f
->may_free
))
392 f
->iov
[f
->iovcnt
- 1].iov_len
+= size
;
394 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
395 /* Should only happen if a previous fflush failed */
396 assert(qemu_file_get_error(f
) || !qemu_file_is_writable(f
));
400 set_bit(f
->iovcnt
, f
->may_free
);
402 f
->iov
[f
->iovcnt
].iov_base
= (uint8_t *)buf
;
403 f
->iov
[f
->iovcnt
++].iov_len
= size
;
406 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
414 static void add_buf_to_iovec(QEMUFile
*f
, size_t len
)
416 if (!add_to_iovec(f
, f
->buf
+ f
->buf_index
, len
, false)) {
418 if (f
->buf_index
== IO_BUF_SIZE
) {
424 void qemu_put_buffer_async(QEMUFile
*f
, const uint8_t *buf
, size_t size
,
431 add_to_iovec(f
, buf
, size
, may_free
);
434 void qemu_put_buffer(QEMUFile
*f
, const uint8_t *buf
, size_t size
)
443 l
= IO_BUF_SIZE
- f
->buf_index
;
447 memcpy(f
->buf
+ f
->buf_index
, buf
, l
);
448 add_buf_to_iovec(f
, l
);
449 if (qemu_file_get_error(f
)) {
457 void qemu_put_buffer_at(QEMUFile
*f
, const uint8_t *buf
, size_t buflen
,
468 ret
= qio_channel_pwrite(f
->ioc
, (char *)buf
, buflen
, pos
, &err
);
471 qemu_file_set_error_obj(f
, -EIO
, err
);
475 if ((ssize_t
)ret
== QIO_CHANNEL_ERR_BLOCK
) {
476 qemu_file_set_error_obj(f
, -EAGAIN
, NULL
);
481 error_setg(&err
, "Partial write of size %zu, expected %zu", ret
,
483 qemu_file_set_error_obj(f
, -EIO
, err
);
487 stat64_add(&mig_stats
.qemu_file_transferred
, buflen
);
493 size_t qemu_get_buffer_at(QEMUFile
*f
, const uint8_t *buf
, size_t buflen
,
503 ret
= qio_channel_pread(f
->ioc
, (char *)buf
, buflen
, pos
, &err
);
505 if ((ssize_t
)ret
== -1 || err
) {
506 qemu_file_set_error_obj(f
, -EIO
, err
);
510 if ((ssize_t
)ret
== QIO_CHANNEL_ERR_BLOCK
) {
511 qemu_file_set_error_obj(f
, -EAGAIN
, NULL
);
516 error_setg(&err
, "Partial read of size %zu, expected %zu", ret
, buflen
);
517 qemu_file_set_error_obj(f
, -EIO
, err
);
524 void qemu_set_offset(QEMUFile
*f
, off_t off
, int whence
)
529 if (qemu_file_is_writable(f
)) {
532 /* Drop all cached buffers if existed; will trigger a re-fill later */
537 ret
= qio_channel_io_seek(f
->ioc
, off
, whence
, &err
);
538 if (ret
== (off_t
)-1) {
539 qemu_file_set_error_obj(f
, -EIO
, err
);
543 off_t
qemu_get_offset(QEMUFile
*f
)
550 ret
= qio_channel_io_seek(f
->ioc
, 0, SEEK_CUR
, &err
);
551 if (ret
== (off_t
)-1) {
552 qemu_file_set_error_obj(f
, -EIO
, err
);
558 void qemu_put_byte(QEMUFile
*f
, int v
)
564 f
->buf
[f
->buf_index
] = v
;
565 add_buf_to_iovec(f
, 1);
568 void qemu_file_skip(QEMUFile
*f
, int size
)
570 if (f
->buf_index
+ size
<= f
->buf_size
) {
571 f
->buf_index
+= size
;
576 * Read 'size' bytes from file (at 'offset') without moving the
577 * pointer and set 'buf' to point to that data.
579 * It will return size bytes unless there was an error, in which case it will
580 * return as many as it managed to read (assuming blocking fd's which
581 * all current QEMUFile are)
583 size_t coroutine_mixed_fn
qemu_peek_buffer(QEMUFile
*f
, uint8_t **buf
, size_t size
, size_t offset
)
588 assert(!qemu_file_is_writable(f
));
589 assert(offset
< IO_BUF_SIZE
);
590 assert(size
<= IO_BUF_SIZE
- offset
);
592 /* The 1st byte to read from */
593 index
= f
->buf_index
+ offset
;
594 /* The number of available bytes starting at index */
595 pending
= f
->buf_size
- index
;
598 * qemu_fill_buffer might return just a few bytes, even when there isn't
599 * an error, so loop collecting them until we get enough.
601 while (pending
< size
) {
602 int received
= qemu_fill_buffer(f
);
608 index
= f
->buf_index
+ offset
;
609 pending
= f
->buf_size
- index
;
615 if (size
> pending
) {
619 *buf
= f
->buf
+ index
;
624 * Read 'size' bytes of data from the file into buf.
625 * 'size' can be larger than the internal buffer.
627 * It will return size bytes unless there was an error, in which case it will
628 * return as many as it managed to read (assuming blocking fd's which
629 * all current QEMUFile are)
631 size_t coroutine_mixed_fn
qemu_get_buffer(QEMUFile
*f
, uint8_t *buf
, size_t size
)
633 size_t pending
= size
;
636 while (pending
> 0) {
640 res
= qemu_peek_buffer(f
, &src
, MIN(pending
, IO_BUF_SIZE
), 0);
644 memcpy(buf
, src
, res
);
645 qemu_file_skip(f
, res
);
654 * Read 'size' bytes of data from the file.
655 * 'size' can be larger than the internal buffer.
658 * may be held on an internal buffer (in which case *buf is updated
659 * to point to it) that is valid until the next qemu_file operation.
661 * will be copied to the *buf that was passed in.
663 * The code tries to avoid the copy if possible.
665 * It will return size bytes unless there was an error, in which case it will
666 * return as many as it managed to read (assuming blocking fd's which
667 * all current QEMUFile are)
669 * Note: Since **buf may get changed, the caller should take care to
670 * keep a pointer to the original buffer if it needs to deallocate it.
672 size_t coroutine_mixed_fn
qemu_get_buffer_in_place(QEMUFile
*f
, uint8_t **buf
, size_t size
)
674 if (size
< IO_BUF_SIZE
) {
678 res
= qemu_peek_buffer(f
, &src
, size
, 0);
681 qemu_file_skip(f
, res
);
687 return qemu_get_buffer(f
, *buf
, size
);
691 * Peeks a single byte from the buffer; this isn't guaranteed to work if
692 * offset leaves a gap after the previous read/peeked data.
694 int coroutine_mixed_fn
qemu_peek_byte(QEMUFile
*f
, int offset
)
696 int index
= f
->buf_index
+ offset
;
698 assert(!qemu_file_is_writable(f
));
699 assert(offset
< IO_BUF_SIZE
);
701 if (index
>= f
->buf_size
) {
703 index
= f
->buf_index
+ offset
;
704 if (index
>= f
->buf_size
) {
708 return f
->buf
[index
];
711 int coroutine_mixed_fn
qemu_get_byte(QEMUFile
*f
)
715 result
= qemu_peek_byte(f
, 0);
716 qemu_file_skip(f
, 1);
720 uint64_t qemu_file_transferred(QEMUFile
*f
)
722 uint64_t ret
= stat64_get(&mig_stats
.qemu_file_transferred
);
725 g_assert(qemu_file_is_writable(f
));
727 for (i
= 0; i
< f
->iovcnt
; i
++) {
728 ret
+= f
->iov
[i
].iov_len
;
734 void qemu_put_be16(QEMUFile
*f
, unsigned int v
)
736 qemu_put_byte(f
, v
>> 8);
740 void qemu_put_be32(QEMUFile
*f
, unsigned int v
)
742 qemu_put_byte(f
, v
>> 24);
743 qemu_put_byte(f
, v
>> 16);
744 qemu_put_byte(f
, v
>> 8);
748 void qemu_put_be64(QEMUFile
*f
, uint64_t v
)
750 qemu_put_be32(f
, v
>> 32);
754 unsigned int qemu_get_be16(QEMUFile
*f
)
757 v
= qemu_get_byte(f
) << 8;
758 v
|= qemu_get_byte(f
);
762 unsigned int qemu_get_be32(QEMUFile
*f
)
765 v
= (unsigned int)qemu_get_byte(f
) << 24;
766 v
|= qemu_get_byte(f
) << 16;
767 v
|= qemu_get_byte(f
) << 8;
768 v
|= qemu_get_byte(f
);
772 uint64_t qemu_get_be64(QEMUFile
*f
)
775 v
= (uint64_t)qemu_get_be32(f
) << 32;
776 v
|= qemu_get_be32(f
);
781 * Get a string whose length is determined by a single preceding byte
782 * A preallocated 256 byte buffer must be passed in.
783 * Returns: len on success and a 0 terminated string in the buffer
785 * (Note a 0 length string will return 0 either way)
787 size_t coroutine_fn
qemu_get_counted_string(QEMUFile
*f
, char buf
[256])
789 size_t len
= qemu_get_byte(f
);
790 size_t res
= qemu_get_buffer(f
, (uint8_t *)buf
, len
);
794 return res
== len
? res
: 0;
798 * Put a string with one preceding byte containing its length. The length of
799 * the string should be less than 256.
801 void qemu_put_counted_string(QEMUFile
*f
, const char *str
)
803 size_t len
= strlen(str
);
806 qemu_put_byte(f
, len
);
807 qemu_put_buffer(f
, (const uint8_t *)str
, len
);
811 * Set the blocking state of the QEMUFile.
812 * Note: On some transports the OS only keeps a single blocking state for
813 * both directions, and thus changing the blocking on the main
814 * QEMUFile can also affect the return path.
816 void qemu_file_set_blocking(QEMUFile
*f
, bool block
)
818 qio_channel_set_blocking(f
->ioc
, block
, NULL
);
824 * Get the ioc object for the file, without incrementing
825 * the reference count.
827 * Returns: the ioc object
829 QIOChannel
*qemu_file_get_ioc(QEMUFile
*file
)
835 * Read size bytes from QEMUFile f and write them to fd.
837 int qemu_file_get_to_fd(QEMUFile
*f
, int fd
, size_t size
)
840 size_t pending
= f
->buf_size
- f
->buf_index
;
844 rc
= qemu_fill_buffer(f
);
854 rc
= write(fd
, f
->buf
+ f
->buf_index
, MIN(pending
, size
));