4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
26 #include "qemu/madvise.h"
27 #include "qemu/error-report.h"
29 #include "migration.h"
30 #include "migration-stats.h"
31 #include "qemu-file.h"
34 #include "qapi/error.h"
36 #define IO_BUF_SIZE 32768
37 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
40 const QEMUFileHooks
*hooks
;
44 /* The sum of bytes transferred on the wire */
45 uint64_t total_transferred
;
48 int buf_size
; /* 0 when writing */
49 uint8_t buf
[IO_BUF_SIZE
];
51 DECLARE_BITMAP(may_free
, MAX_IOV_SIZE
);
52 struct iovec iov
[MAX_IOV_SIZE
];
56 Error
*last_error_obj
;
60 * Stop a file from being read/written - not all backing files can do this
61 * typically only sockets can.
63 * TODO: convert to propagate Error objects instead of squashing
64 * to a fixed errno value
66 int qemu_file_shutdown(QEMUFile
*f
)
71 * We must set qemufile error before the real shutdown(), otherwise
72 * there can be a race window where we thought IO all went though
73 * (because last_error==NULL) but actually IO has already stopped.
75 * If without correct ordering, the race can happen like this:
77 * page receiver other thread
78 * ------------- ------------
81 * returns 0 (buffer all zero)
82 * (we didn't check this retcode)
83 * try to detect IO error
84 * last_error==NULL, IO okay
85 * install ALL-ZERO page
90 qemu_file_set_error(f
, -EIO
);
93 if (!qio_channel_has_feature(f
->ioc
,
94 QIO_CHANNEL_FEATURE_SHUTDOWN
)) {
98 if (qio_channel_shutdown(f
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
) < 0) {
105 bool qemu_file_mode_is_not_valid(const char *mode
)
108 (mode
[0] != 'r' && mode
[0] != 'w') ||
109 mode
[1] != 'b' || mode
[2] != 0) {
110 fprintf(stderr
, "qemu_fopen: Argument validity check failed\n");
117 static QEMUFile
*qemu_file_new_impl(QIOChannel
*ioc
, bool is_writable
)
121 f
= g_new0(QEMUFile
, 1);
125 f
->is_writable
= is_writable
;
131 * Result: QEMUFile* for a 'return path' for comms in the opposite direction
132 * NULL if not available
134 QEMUFile
*qemu_file_get_return_path(QEMUFile
*f
)
136 return qemu_file_new_impl(f
->ioc
, !f
->is_writable
);
139 QEMUFile
*qemu_file_new_output(QIOChannel
*ioc
)
141 return qemu_file_new_impl(ioc
, true);
144 QEMUFile
*qemu_file_new_input(QIOChannel
*ioc
)
146 return qemu_file_new_impl(ioc
, false);
149 void qemu_file_set_hooks(QEMUFile
*f
, const QEMUFileHooks
*hooks
)
155 * Get last error for stream f with optional Error*
157 * Return negative error value if there has been an error on previous
158 * operations, return 0 if no error happened.
159 * Optional, it returns Error* in errp, but it may be NULL even if return value
163 int qemu_file_get_error_obj(QEMUFile
*f
, Error
**errp
)
166 *errp
= f
->last_error_obj
? error_copy(f
->last_error_obj
) : NULL
;
168 return f
->last_error
;
172 * Get last error for either stream f1 or f2 with optional Error*.
173 * The error returned (non-zero) can be either from f1 or f2.
175 * If any of the qemufile* is NULL, then skip the check on that file.
177 * When there is no error on both qemufile, zero is returned.
179 int qemu_file_get_error_obj_any(QEMUFile
*f1
, QEMUFile
*f2
, Error
**errp
)
184 ret
= qemu_file_get_error_obj(f1
, errp
);
185 /* If there's already error detected, return */
192 ret
= qemu_file_get_error_obj(f2
, errp
);
199 * Set the last error for stream f with optional Error*
201 void qemu_file_set_error_obj(QEMUFile
*f
, int ret
, Error
*err
)
203 if (f
->last_error
== 0 && ret
) {
205 error_propagate(&f
->last_error_obj
, err
);
207 error_report_err(err
);
212 * Get last error for stream f
214 * Return negative error value if there has been an error on previous
215 * operations, return 0 if no error happened.
218 int qemu_file_get_error(QEMUFile
*f
)
220 return qemu_file_get_error_obj(f
, NULL
);
224 * Set the last error for stream f
226 void qemu_file_set_error(QEMUFile
*f
, int ret
)
228 qemu_file_set_error_obj(f
, ret
, NULL
);
231 bool qemu_file_is_writable(QEMUFile
*f
)
233 return f
->is_writable
;
236 static void qemu_iovec_release_ram(QEMUFile
*f
)
241 /* Find and release all the contiguous memory ranges marked as may_free. */
242 idx
= find_next_bit(f
->may_free
, f
->iovcnt
, 0);
243 if (idx
>= f
->iovcnt
) {
248 /* The madvise() in the loop is called for iov within a continuous range and
249 * then reinitialize the iov. And in the end, madvise() is called for the
252 while ((idx
= find_next_bit(f
->may_free
, f
->iovcnt
, idx
+ 1)) < f
->iovcnt
) {
253 /* check for adjacent buffer and coalesce them */
254 if (iov
.iov_base
+ iov
.iov_len
== f
->iov
[idx
].iov_base
) {
255 iov
.iov_len
+= f
->iov
[idx
].iov_len
;
258 if (qemu_madvise(iov
.iov_base
, iov
.iov_len
, QEMU_MADV_DONTNEED
) < 0) {
259 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
260 iov
.iov_base
, iov
.iov_len
, strerror(errno
));
264 if (qemu_madvise(iov
.iov_base
, iov
.iov_len
, QEMU_MADV_DONTNEED
) < 0) {
265 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
266 iov
.iov_base
, iov
.iov_len
, strerror(errno
));
268 memset(f
->may_free
, 0, sizeof(f
->may_free
));
273 * Flushes QEMUFile buffer
275 * This will flush all pending data. If data was only partially flushed, it
276 * will set an error state.
278 void qemu_fflush(QEMUFile
*f
)
280 if (!qemu_file_is_writable(f
)) {
284 if (qemu_file_get_error(f
)) {
288 Error
*local_error
= NULL
;
289 if (qio_channel_writev_all(f
->ioc
,
292 qemu_file_set_error_obj(f
, -EIO
, local_error
);
294 uint64_t size
= iov_size(f
->iov
, f
->iovcnt
);
295 migration_rate_account(size
);
296 f
->total_transferred
+= size
;
299 qemu_iovec_release_ram(f
);
306 void ram_control_before_iterate(QEMUFile
*f
, uint64_t flags
)
310 if (f
->hooks
&& f
->hooks
->before_ram_iterate
) {
311 ret
= f
->hooks
->before_ram_iterate(f
, flags
, NULL
);
313 qemu_file_set_error(f
, ret
);
318 void ram_control_after_iterate(QEMUFile
*f
, uint64_t flags
)
322 if (f
->hooks
&& f
->hooks
->after_ram_iterate
) {
323 ret
= f
->hooks
->after_ram_iterate(f
, flags
, NULL
);
325 qemu_file_set_error(f
, ret
);
330 void ram_control_load_hook(QEMUFile
*f
, uint64_t flags
, void *data
)
332 if (f
->hooks
&& f
->hooks
->hook_ram_load
) {
333 int ret
= f
->hooks
->hook_ram_load(f
, flags
, data
);
335 qemu_file_set_error(f
, ret
);
340 size_t ram_control_save_page(QEMUFile
*f
, ram_addr_t block_offset
,
341 ram_addr_t offset
, size_t size
,
342 uint64_t *bytes_sent
)
344 if (f
->hooks
&& f
->hooks
->save_page
) {
345 int ret
= f
->hooks
->save_page(f
, block_offset
,
346 offset
, size
, bytes_sent
);
347 if (ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
348 migration_rate_account(size
);
351 if (ret
!= RAM_SAVE_CONTROL_DELAYED
&&
352 ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
353 if (bytes_sent
&& *bytes_sent
> 0) {
354 qemu_file_credit_transfer(f
, *bytes_sent
);
355 } else if (ret
< 0) {
356 qemu_file_set_error(f
, ret
);
363 return RAM_SAVE_CONTROL_NOT_SUPP
;
367 * Attempt to fill the buffer from the underlying file
368 * Returns the number of bytes read, or negative value for an error.
370 * Note that it can return a partially full buffer even in a not error/not EOF
371 * case if the underlying file descriptor gives a short read, and that can
372 * happen even on a blocking fd.
374 static ssize_t coroutine_mixed_fn
qemu_fill_buffer(QEMUFile
*f
)
378 Error
*local_error
= NULL
;
380 assert(!qemu_file_is_writable(f
));
382 pending
= f
->buf_size
- f
->buf_index
;
384 memmove(f
->buf
, f
->buf
+ f
->buf_index
, pending
);
387 f
->buf_size
= pending
;
389 if (qemu_file_get_error(f
)) {
394 len
= qio_channel_read(f
->ioc
,
395 (char *)f
->buf
+ pending
,
396 IO_BUF_SIZE
- pending
,
398 if (len
== QIO_CHANNEL_ERR_BLOCK
) {
399 if (qemu_in_coroutine()) {
400 qio_channel_yield(f
->ioc
, G_IO_IN
);
402 qio_channel_wait(f
->ioc
, G_IO_IN
);
404 } else if (len
< 0) {
407 } while (len
== QIO_CHANNEL_ERR_BLOCK
);
411 f
->total_transferred
+= len
;
412 } else if (len
== 0) {
413 qemu_file_set_error_obj(f
, -EIO
, local_error
);
415 qemu_file_set_error_obj(f
, len
, local_error
);
421 void qemu_file_credit_transfer(QEMUFile
*f
, size_t size
)
423 f
->total_transferred
+= size
;
428 * Returns negative error value if any error happened on previous operations or
429 * while closing the file. Returns 0 or positive number on success.
431 * The meaning of return value on success depends on the specific backend
434 int qemu_fclose(QEMUFile
*f
)
438 ret
= qemu_file_get_error(f
);
440 ret2
= qio_channel_close(f
->ioc
, NULL
);
444 g_clear_pointer(&f
->ioc
, object_unref
);
446 /* If any error was spotted before closing, we should report it
447 * instead of the close() return value.
452 error_free(f
->last_error_obj
);
454 trace_qemu_file_fclose();
459 * Add buf to iovec. Do flush if iovec is full.
462 * 1 iovec is full and flushed
463 * 0 iovec is not flushed
466 static int add_to_iovec(QEMUFile
*f
, const uint8_t *buf
, size_t size
,
469 /* check for adjacent buffer and coalesce them */
470 if (f
->iovcnt
> 0 && buf
== f
->iov
[f
->iovcnt
- 1].iov_base
+
471 f
->iov
[f
->iovcnt
- 1].iov_len
&&
472 may_free
== test_bit(f
->iovcnt
- 1, f
->may_free
))
474 f
->iov
[f
->iovcnt
- 1].iov_len
+= size
;
476 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
477 /* Should only happen if a previous fflush failed */
478 assert(qemu_file_get_error(f
) || !qemu_file_is_writable(f
));
482 set_bit(f
->iovcnt
, f
->may_free
);
484 f
->iov
[f
->iovcnt
].iov_base
= (uint8_t *)buf
;
485 f
->iov
[f
->iovcnt
++].iov_len
= size
;
488 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
496 static void add_buf_to_iovec(QEMUFile
*f
, size_t len
)
498 if (!add_to_iovec(f
, f
->buf
+ f
->buf_index
, len
, false)) {
500 if (f
->buf_index
== IO_BUF_SIZE
) {
506 void qemu_put_buffer_async(QEMUFile
*f
, const uint8_t *buf
, size_t size
,
513 add_to_iovec(f
, buf
, size
, may_free
);
516 void qemu_put_buffer(QEMUFile
*f
, const uint8_t *buf
, size_t size
)
525 l
= IO_BUF_SIZE
- f
->buf_index
;
529 memcpy(f
->buf
+ f
->buf_index
, buf
, l
);
530 add_buf_to_iovec(f
, l
);
531 if (qemu_file_get_error(f
)) {
539 void qemu_put_byte(QEMUFile
*f
, int v
)
545 f
->buf
[f
->buf_index
] = v
;
546 add_buf_to_iovec(f
, 1);
549 void qemu_file_skip(QEMUFile
*f
, int size
)
551 if (f
->buf_index
+ size
<= f
->buf_size
) {
552 f
->buf_index
+= size
;
557 * Read 'size' bytes from file (at 'offset') without moving the
558 * pointer and set 'buf' to point to that data.
560 * It will return size bytes unless there was an error, in which case it will
561 * return as many as it managed to read (assuming blocking fd's which
562 * all current QEMUFile are)
564 size_t coroutine_mixed_fn
qemu_peek_buffer(QEMUFile
*f
, uint8_t **buf
, size_t size
, size_t offset
)
569 assert(!qemu_file_is_writable(f
));
570 assert(offset
< IO_BUF_SIZE
);
571 assert(size
<= IO_BUF_SIZE
- offset
);
573 /* The 1st byte to read from */
574 index
= f
->buf_index
+ offset
;
575 /* The number of available bytes starting at index */
576 pending
= f
->buf_size
- index
;
579 * qemu_fill_buffer might return just a few bytes, even when there isn't
580 * an error, so loop collecting them until we get enough.
582 while (pending
< size
) {
583 int received
= qemu_fill_buffer(f
);
589 index
= f
->buf_index
+ offset
;
590 pending
= f
->buf_size
- index
;
596 if (size
> pending
) {
600 *buf
= f
->buf
+ index
;
605 * Read 'size' bytes of data from the file into buf.
606 * 'size' can be larger than the internal buffer.
608 * It will return size bytes unless there was an error, in which case it will
609 * return as many as it managed to read (assuming blocking fd's which
610 * all current QEMUFile are)
612 size_t coroutine_mixed_fn
qemu_get_buffer(QEMUFile
*f
, uint8_t *buf
, size_t size
)
614 size_t pending
= size
;
617 while (pending
> 0) {
621 res
= qemu_peek_buffer(f
, &src
, MIN(pending
, IO_BUF_SIZE
), 0);
625 memcpy(buf
, src
, res
);
626 qemu_file_skip(f
, res
);
635 * Read 'size' bytes of data from the file.
636 * 'size' can be larger than the internal buffer.
639 * may be held on an internal buffer (in which case *buf is updated
640 * to point to it) that is valid until the next qemu_file operation.
642 * will be copied to the *buf that was passed in.
644 * The code tries to avoid the copy if possible.
646 * It will return size bytes unless there was an error, in which case it will
647 * return as many as it managed to read (assuming blocking fd's which
648 * all current QEMUFile are)
650 * Note: Since **buf may get changed, the caller should take care to
651 * keep a pointer to the original buffer if it needs to deallocate it.
653 size_t coroutine_mixed_fn
qemu_get_buffer_in_place(QEMUFile
*f
, uint8_t **buf
, size_t size
)
655 if (size
< IO_BUF_SIZE
) {
659 res
= qemu_peek_buffer(f
, &src
, size
, 0);
662 qemu_file_skip(f
, res
);
668 return qemu_get_buffer(f
, *buf
, size
);
672 * Peeks a single byte from the buffer; this isn't guaranteed to work if
673 * offset leaves a gap after the previous read/peeked data.
675 int coroutine_mixed_fn
qemu_peek_byte(QEMUFile
*f
, int offset
)
677 int index
= f
->buf_index
+ offset
;
679 assert(!qemu_file_is_writable(f
));
680 assert(offset
< IO_BUF_SIZE
);
682 if (index
>= f
->buf_size
) {
684 index
= f
->buf_index
+ offset
;
685 if (index
>= f
->buf_size
) {
689 return f
->buf
[index
];
692 int coroutine_mixed_fn
qemu_get_byte(QEMUFile
*f
)
696 result
= qemu_peek_byte(f
, 0);
697 qemu_file_skip(f
, 1);
701 uint64_t qemu_file_transferred_fast(QEMUFile
*f
)
703 uint64_t ret
= f
->total_transferred
;
706 for (i
= 0; i
< f
->iovcnt
; i
++) {
707 ret
+= f
->iov
[i
].iov_len
;
713 uint64_t qemu_file_transferred(QEMUFile
*f
)
716 return f
->total_transferred
;
719 void qemu_put_be16(QEMUFile
*f
, unsigned int v
)
721 qemu_put_byte(f
, v
>> 8);
725 void qemu_put_be32(QEMUFile
*f
, unsigned int v
)
727 qemu_put_byte(f
, v
>> 24);
728 qemu_put_byte(f
, v
>> 16);
729 qemu_put_byte(f
, v
>> 8);
733 void qemu_put_be64(QEMUFile
*f
, uint64_t v
)
735 qemu_put_be32(f
, v
>> 32);
739 unsigned int qemu_get_be16(QEMUFile
*f
)
742 v
= qemu_get_byte(f
) << 8;
743 v
|= qemu_get_byte(f
);
747 unsigned int qemu_get_be32(QEMUFile
*f
)
750 v
= (unsigned int)qemu_get_byte(f
) << 24;
751 v
|= qemu_get_byte(f
) << 16;
752 v
|= qemu_get_byte(f
) << 8;
753 v
|= qemu_get_byte(f
);
757 uint64_t qemu_get_be64(QEMUFile
*f
)
760 v
= (uint64_t)qemu_get_be32(f
) << 32;
761 v
|= qemu_get_be32(f
);
765 /* return the size after compression, or negative value on error */
766 static int qemu_compress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
767 const uint8_t *source
, size_t source_len
)
771 err
= deflateReset(stream
);
776 stream
->avail_in
= source_len
;
777 stream
->next_in
= (uint8_t *)source
;
778 stream
->avail_out
= dest_len
;
779 stream
->next_out
= dest
;
781 err
= deflate(stream
, Z_FINISH
);
782 if (err
!= Z_STREAM_END
) {
786 return stream
->next_out
- dest
;
789 /* Compress size bytes of data start at p and store the compressed
790 * data to the buffer of f.
792 * Since the file is dummy file with empty_ops, return -1 if f has no space to
793 * save the compressed data.
795 ssize_t
qemu_put_compression_data(QEMUFile
*f
, z_stream
*stream
,
796 const uint8_t *p
, size_t size
)
798 ssize_t blen
= IO_BUF_SIZE
- f
->buf_index
- sizeof(int32_t);
800 if (blen
< compressBound(size
)) {
804 blen
= qemu_compress_data(stream
, f
->buf
+ f
->buf_index
+ sizeof(int32_t),
810 qemu_put_be32(f
, blen
);
811 add_buf_to_iovec(f
, blen
);
812 return blen
+ sizeof(int32_t);
815 /* Put the data in the buffer of f_src to the buffer of f_des, and
816 * then reset the buf_index of f_src to 0.
819 int qemu_put_qemu_file(QEMUFile
*f_des
, QEMUFile
*f_src
)
823 if (f_src
->buf_index
> 0) {
824 len
= f_src
->buf_index
;
825 qemu_put_buffer(f_des
, f_src
->buf
, f_src
->buf_index
);
826 f_src
->buf_index
= 0;
833 * Check if the writable buffer is empty
836 bool qemu_file_buffer_empty(QEMUFile
*file
)
838 assert(qemu_file_is_writable(file
));
840 return !file
->iovcnt
;
844 * Get a string whose length is determined by a single preceding byte
845 * A preallocated 256 byte buffer must be passed in.
846 * Returns: len on success and a 0 terminated string in the buffer
848 * (Note a 0 length string will return 0 either way)
850 size_t coroutine_fn
qemu_get_counted_string(QEMUFile
*f
, char buf
[256])
852 size_t len
= qemu_get_byte(f
);
853 size_t res
= qemu_get_buffer(f
, (uint8_t *)buf
, len
);
857 return res
== len
? res
: 0;
861 * Put a string with one preceding byte containing its length. The length of
862 * the string should be less than 256.
864 void qemu_put_counted_string(QEMUFile
*f
, const char *str
)
866 size_t len
= strlen(str
);
869 qemu_put_byte(f
, len
);
870 qemu_put_buffer(f
, (const uint8_t *)str
, len
);
874 * Set the blocking state of the QEMUFile.
875 * Note: On some transports the OS only keeps a single blocking state for
876 * both directions, and thus changing the blocking on the main
877 * QEMUFile can also affect the return path.
879 void qemu_file_set_blocking(QEMUFile
*f
, bool block
)
881 qio_channel_set_blocking(f
->ioc
, block
, NULL
);
887 * Get the ioc object for the file, without incrementing
888 * the reference count.
890 * Returns: the ioc object
892 QIOChannel
*qemu_file_get_ioc(QEMUFile
*file
)
898 * Read size bytes from QEMUFile f and write them to fd.
900 int qemu_file_get_to_fd(QEMUFile
*f
, int fd
, size_t size
)
903 size_t pending
= f
->buf_size
- f
->buf_index
;
907 rc
= qemu_fill_buffer(f
);
917 rc
= write(fd
, f
->buf
+ f
->buf_index
, MIN(pending
, size
));