4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
26 #include "qemu/madvise.h"
27 #include "qemu/error-report.h"
29 #include "migration.h"
30 #include "migration-stats.h"
31 #include "qemu-file.h"
34 #include "qapi/error.h"
36 #define IO_BUF_SIZE 32768
37 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
40 const QEMUFileHooks
*hooks
;
44 /* The sum of bytes transferred on the wire */
45 uint64_t total_transferred
;
48 int buf_size
; /* 0 when writing */
49 uint8_t buf
[IO_BUF_SIZE
];
51 DECLARE_BITMAP(may_free
, MAX_IOV_SIZE
);
52 struct iovec iov
[MAX_IOV_SIZE
];
56 Error
*last_error_obj
;
60 * Stop a file from being read/written - not all backing files can do this
61 * typically only sockets can.
63 * TODO: convert to propagate Error objects instead of squashing
64 * to a fixed errno value
66 int qemu_file_shutdown(QEMUFile
*f
)
71 * We must set qemufile error before the real shutdown(), otherwise
72 * there can be a race window where we thought IO all went though
73 * (because last_error==NULL) but actually IO has already stopped.
75 * If without correct ordering, the race can happen like this:
77 * page receiver other thread
78 * ------------- ------------
81 * returns 0 (buffer all zero)
82 * (we didn't check this retcode)
83 * try to detect IO error
84 * last_error==NULL, IO okay
85 * install ALL-ZERO page
90 qemu_file_set_error(f
, -EIO
);
93 if (!qio_channel_has_feature(f
->ioc
,
94 QIO_CHANNEL_FEATURE_SHUTDOWN
)) {
98 if (qio_channel_shutdown(f
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
) < 0) {
105 bool qemu_file_mode_is_not_valid(const char *mode
)
108 (mode
[0] != 'r' && mode
[0] != 'w') ||
109 mode
[1] != 'b' || mode
[2] != 0) {
110 fprintf(stderr
, "qemu_fopen: Argument validity check failed\n");
117 static QEMUFile
*qemu_file_new_impl(QIOChannel
*ioc
, bool is_writable
)
121 f
= g_new0(QEMUFile
, 1);
125 f
->is_writable
= is_writable
;
131 * Result: QEMUFile* for a 'return path' for comms in the opposite direction
132 * NULL if not available
134 QEMUFile
*qemu_file_get_return_path(QEMUFile
*f
)
136 return qemu_file_new_impl(f
->ioc
, !f
->is_writable
);
139 QEMUFile
*qemu_file_new_output(QIOChannel
*ioc
)
141 return qemu_file_new_impl(ioc
, true);
144 QEMUFile
*qemu_file_new_input(QIOChannel
*ioc
)
146 return qemu_file_new_impl(ioc
, false);
149 void qemu_file_set_hooks(QEMUFile
*f
, const QEMUFileHooks
*hooks
)
155 * Get last error for stream f with optional Error*
157 * Return negative error value if there has been an error on previous
158 * operations, return 0 if no error happened.
159 * Optional, it returns Error* in errp, but it may be NULL even if return value
163 int qemu_file_get_error_obj(QEMUFile
*f
, Error
**errp
)
166 *errp
= f
->last_error_obj
? error_copy(f
->last_error_obj
) : NULL
;
168 return f
->last_error
;
172 * Get last error for either stream f1 or f2 with optional Error*.
173 * The error returned (non-zero) can be either from f1 or f2.
175 * If any of the qemufile* is NULL, then skip the check on that file.
177 * When there is no error on both qemufile, zero is returned.
179 int qemu_file_get_error_obj_any(QEMUFile
*f1
, QEMUFile
*f2
, Error
**errp
)
184 ret
= qemu_file_get_error_obj(f1
, errp
);
185 /* If there's already error detected, return */
192 ret
= qemu_file_get_error_obj(f2
, errp
);
199 * Set the last error for stream f with optional Error*
201 void qemu_file_set_error_obj(QEMUFile
*f
, int ret
, Error
*err
)
203 if (f
->last_error
== 0 && ret
) {
205 error_propagate(&f
->last_error_obj
, err
);
207 error_report_err(err
);
212 * Get last error for stream f
214 * Return negative error value if there has been an error on previous
215 * operations, return 0 if no error happened.
218 int qemu_file_get_error(QEMUFile
*f
)
220 return qemu_file_get_error_obj(f
, NULL
);
224 * Set the last error for stream f
226 void qemu_file_set_error(QEMUFile
*f
, int ret
)
228 qemu_file_set_error_obj(f
, ret
, NULL
);
231 bool qemu_file_is_writable(QEMUFile
*f
)
233 return f
->is_writable
;
236 static void qemu_iovec_release_ram(QEMUFile
*f
)
241 /* Find and release all the contiguous memory ranges marked as may_free. */
242 idx
= find_next_bit(f
->may_free
, f
->iovcnt
, 0);
243 if (idx
>= f
->iovcnt
) {
248 /* The madvise() in the loop is called for iov within a continuous range and
249 * then reinitialize the iov. And in the end, madvise() is called for the
252 while ((idx
= find_next_bit(f
->may_free
, f
->iovcnt
, idx
+ 1)) < f
->iovcnt
) {
253 /* check for adjacent buffer and coalesce them */
254 if (iov
.iov_base
+ iov
.iov_len
== f
->iov
[idx
].iov_base
) {
255 iov
.iov_len
+= f
->iov
[idx
].iov_len
;
258 if (qemu_madvise(iov
.iov_base
, iov
.iov_len
, QEMU_MADV_DONTNEED
) < 0) {
259 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
260 iov
.iov_base
, iov
.iov_len
, strerror(errno
));
264 if (qemu_madvise(iov
.iov_base
, iov
.iov_len
, QEMU_MADV_DONTNEED
) < 0) {
265 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
266 iov
.iov_base
, iov
.iov_len
, strerror(errno
));
268 memset(f
->may_free
, 0, sizeof(f
->may_free
));
273 * Flushes QEMUFile buffer
275 * This will flush all pending data. If data was only partially flushed, it
276 * will set an error state.
278 void qemu_fflush(QEMUFile
*f
)
280 if (!qemu_file_is_writable(f
)) {
284 if (qemu_file_get_error(f
)) {
288 Error
*local_error
= NULL
;
289 if (qio_channel_writev_all(f
->ioc
,
292 qemu_file_set_error_obj(f
, -EIO
, local_error
);
294 uint64_t size
= iov_size(f
->iov
, f
->iovcnt
);
295 f
->total_transferred
+= size
;
298 qemu_iovec_release_ram(f
);
305 void ram_control_before_iterate(QEMUFile
*f
, uint64_t flags
)
309 if (f
->hooks
&& f
->hooks
->before_ram_iterate
) {
310 ret
= f
->hooks
->before_ram_iterate(f
, flags
, NULL
);
312 qemu_file_set_error(f
, ret
);
317 void ram_control_after_iterate(QEMUFile
*f
, uint64_t flags
)
321 if (f
->hooks
&& f
->hooks
->after_ram_iterate
) {
322 ret
= f
->hooks
->after_ram_iterate(f
, flags
, NULL
);
324 qemu_file_set_error(f
, ret
);
329 void ram_control_load_hook(QEMUFile
*f
, uint64_t flags
, void *data
)
331 if (f
->hooks
&& f
->hooks
->hook_ram_load
) {
332 int ret
= f
->hooks
->hook_ram_load(f
, flags
, data
);
334 qemu_file_set_error(f
, ret
);
339 size_t ram_control_save_page(QEMUFile
*f
, ram_addr_t block_offset
,
340 ram_addr_t offset
, size_t size
,
341 uint64_t *bytes_sent
)
343 if (f
->hooks
&& f
->hooks
->save_page
) {
344 int ret
= f
->hooks
->save_page(f
, block_offset
,
345 offset
, size
, bytes_sent
);
347 if (ret
!= RAM_SAVE_CONTROL_DELAYED
&&
348 ret
!= RAM_SAVE_CONTROL_NOT_SUPP
) {
349 if (bytes_sent
&& *bytes_sent
> 0) {
350 qemu_file_credit_transfer(f
, *bytes_sent
);
351 } else if (ret
< 0) {
352 qemu_file_set_error(f
, ret
);
359 return RAM_SAVE_CONTROL_NOT_SUPP
;
363 * Attempt to fill the buffer from the underlying file
364 * Returns the number of bytes read, or negative value for an error.
366 * Note that it can return a partially full buffer even in a not error/not EOF
367 * case if the underlying file descriptor gives a short read, and that can
368 * happen even on a blocking fd.
370 static ssize_t coroutine_mixed_fn
qemu_fill_buffer(QEMUFile
*f
)
374 Error
*local_error
= NULL
;
376 assert(!qemu_file_is_writable(f
));
378 pending
= f
->buf_size
- f
->buf_index
;
380 memmove(f
->buf
, f
->buf
+ f
->buf_index
, pending
);
383 f
->buf_size
= pending
;
385 if (qemu_file_get_error(f
)) {
390 len
= qio_channel_read(f
->ioc
,
391 (char *)f
->buf
+ pending
,
392 IO_BUF_SIZE
- pending
,
394 if (len
== QIO_CHANNEL_ERR_BLOCK
) {
395 if (qemu_in_coroutine()) {
396 qio_channel_yield(f
->ioc
, G_IO_IN
);
398 qio_channel_wait(f
->ioc
, G_IO_IN
);
400 } else if (len
< 0) {
403 } while (len
== QIO_CHANNEL_ERR_BLOCK
);
407 f
->total_transferred
+= len
;
408 } else if (len
== 0) {
409 qemu_file_set_error_obj(f
, -EIO
, local_error
);
411 qemu_file_set_error_obj(f
, len
, local_error
);
417 void qemu_file_credit_transfer(QEMUFile
*f
, size_t size
)
419 f
->total_transferred
+= size
;
424 * Returns negative error value if any error happened on previous operations or
425 * while closing the file. Returns 0 or positive number on success.
427 * The meaning of return value on success depends on the specific backend
430 int qemu_fclose(QEMUFile
*f
)
434 ret
= qemu_file_get_error(f
);
436 ret2
= qio_channel_close(f
->ioc
, NULL
);
440 g_clear_pointer(&f
->ioc
, object_unref
);
442 /* If any error was spotted before closing, we should report it
443 * instead of the close() return value.
448 error_free(f
->last_error_obj
);
450 trace_qemu_file_fclose();
455 * Add buf to iovec. Do flush if iovec is full.
458 * 1 iovec is full and flushed
459 * 0 iovec is not flushed
462 static int add_to_iovec(QEMUFile
*f
, const uint8_t *buf
, size_t size
,
465 /* check for adjacent buffer and coalesce them */
466 if (f
->iovcnt
> 0 && buf
== f
->iov
[f
->iovcnt
- 1].iov_base
+
467 f
->iov
[f
->iovcnt
- 1].iov_len
&&
468 may_free
== test_bit(f
->iovcnt
- 1, f
->may_free
))
470 f
->iov
[f
->iovcnt
- 1].iov_len
+= size
;
472 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
473 /* Should only happen if a previous fflush failed */
474 assert(qemu_file_get_error(f
) || !qemu_file_is_writable(f
));
478 set_bit(f
->iovcnt
, f
->may_free
);
480 f
->iov
[f
->iovcnt
].iov_base
= (uint8_t *)buf
;
481 f
->iov
[f
->iovcnt
++].iov_len
= size
;
484 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
492 static void add_buf_to_iovec(QEMUFile
*f
, size_t len
)
494 if (!add_to_iovec(f
, f
->buf
+ f
->buf_index
, len
, false)) {
496 if (f
->buf_index
== IO_BUF_SIZE
) {
502 void qemu_put_buffer_async(QEMUFile
*f
, const uint8_t *buf
, size_t size
,
509 add_to_iovec(f
, buf
, size
, may_free
);
512 void qemu_put_buffer(QEMUFile
*f
, const uint8_t *buf
, size_t size
)
521 l
= IO_BUF_SIZE
- f
->buf_index
;
525 memcpy(f
->buf
+ f
->buf_index
, buf
, l
);
526 add_buf_to_iovec(f
, l
);
527 if (qemu_file_get_error(f
)) {
535 void qemu_put_byte(QEMUFile
*f
, int v
)
541 f
->buf
[f
->buf_index
] = v
;
542 add_buf_to_iovec(f
, 1);
545 void qemu_file_skip(QEMUFile
*f
, int size
)
547 if (f
->buf_index
+ size
<= f
->buf_size
) {
548 f
->buf_index
+= size
;
553 * Read 'size' bytes from file (at 'offset') without moving the
554 * pointer and set 'buf' to point to that data.
556 * It will return size bytes unless there was an error, in which case it will
557 * return as many as it managed to read (assuming blocking fd's which
558 * all current QEMUFile are)
560 size_t coroutine_mixed_fn
qemu_peek_buffer(QEMUFile
*f
, uint8_t **buf
, size_t size
, size_t offset
)
565 assert(!qemu_file_is_writable(f
));
566 assert(offset
< IO_BUF_SIZE
);
567 assert(size
<= IO_BUF_SIZE
- offset
);
569 /* The 1st byte to read from */
570 index
= f
->buf_index
+ offset
;
571 /* The number of available bytes starting at index */
572 pending
= f
->buf_size
- index
;
575 * qemu_fill_buffer might return just a few bytes, even when there isn't
576 * an error, so loop collecting them until we get enough.
578 while (pending
< size
) {
579 int received
= qemu_fill_buffer(f
);
585 index
= f
->buf_index
+ offset
;
586 pending
= f
->buf_size
- index
;
592 if (size
> pending
) {
596 *buf
= f
->buf
+ index
;
601 * Read 'size' bytes of data from the file into buf.
602 * 'size' can be larger than the internal buffer.
604 * It will return size bytes unless there was an error, in which case it will
605 * return as many as it managed to read (assuming blocking fd's which
606 * all current QEMUFile are)
608 size_t coroutine_mixed_fn
qemu_get_buffer(QEMUFile
*f
, uint8_t *buf
, size_t size
)
610 size_t pending
= size
;
613 while (pending
> 0) {
617 res
= qemu_peek_buffer(f
, &src
, MIN(pending
, IO_BUF_SIZE
), 0);
621 memcpy(buf
, src
, res
);
622 qemu_file_skip(f
, res
);
631 * Read 'size' bytes of data from the file.
632 * 'size' can be larger than the internal buffer.
635 * may be held on an internal buffer (in which case *buf is updated
636 * to point to it) that is valid until the next qemu_file operation.
638 * will be copied to the *buf that was passed in.
640 * The code tries to avoid the copy if possible.
642 * It will return size bytes unless there was an error, in which case it will
643 * return as many as it managed to read (assuming blocking fd's which
644 * all current QEMUFile are)
646 * Note: Since **buf may get changed, the caller should take care to
647 * keep a pointer to the original buffer if it needs to deallocate it.
649 size_t coroutine_mixed_fn
qemu_get_buffer_in_place(QEMUFile
*f
, uint8_t **buf
, size_t size
)
651 if (size
< IO_BUF_SIZE
) {
655 res
= qemu_peek_buffer(f
, &src
, size
, 0);
658 qemu_file_skip(f
, res
);
664 return qemu_get_buffer(f
, *buf
, size
);
668 * Peeks a single byte from the buffer; this isn't guaranteed to work if
669 * offset leaves a gap after the previous read/peeked data.
671 int coroutine_mixed_fn
qemu_peek_byte(QEMUFile
*f
, int offset
)
673 int index
= f
->buf_index
+ offset
;
675 assert(!qemu_file_is_writable(f
));
676 assert(offset
< IO_BUF_SIZE
);
678 if (index
>= f
->buf_size
) {
680 index
= f
->buf_index
+ offset
;
681 if (index
>= f
->buf_size
) {
685 return f
->buf
[index
];
688 int coroutine_mixed_fn
qemu_get_byte(QEMUFile
*f
)
692 result
= qemu_peek_byte(f
, 0);
693 qemu_file_skip(f
, 1);
697 uint64_t qemu_file_transferred_fast(QEMUFile
*f
)
699 uint64_t ret
= f
->total_transferred
;
702 for (i
= 0; i
< f
->iovcnt
; i
++) {
703 ret
+= f
->iov
[i
].iov_len
;
709 uint64_t qemu_file_transferred(QEMUFile
*f
)
712 return f
->total_transferred
;
715 void qemu_put_be16(QEMUFile
*f
, unsigned int v
)
717 qemu_put_byte(f
, v
>> 8);
721 void qemu_put_be32(QEMUFile
*f
, unsigned int v
)
723 qemu_put_byte(f
, v
>> 24);
724 qemu_put_byte(f
, v
>> 16);
725 qemu_put_byte(f
, v
>> 8);
729 void qemu_put_be64(QEMUFile
*f
, uint64_t v
)
731 qemu_put_be32(f
, v
>> 32);
735 unsigned int qemu_get_be16(QEMUFile
*f
)
738 v
= qemu_get_byte(f
) << 8;
739 v
|= qemu_get_byte(f
);
743 unsigned int qemu_get_be32(QEMUFile
*f
)
746 v
= (unsigned int)qemu_get_byte(f
) << 24;
747 v
|= qemu_get_byte(f
) << 16;
748 v
|= qemu_get_byte(f
) << 8;
749 v
|= qemu_get_byte(f
);
753 uint64_t qemu_get_be64(QEMUFile
*f
)
756 v
= (uint64_t)qemu_get_be32(f
) << 32;
757 v
|= qemu_get_be32(f
);
761 /* return the size after compression, or negative value on error */
762 static int qemu_compress_data(z_stream
*stream
, uint8_t *dest
, size_t dest_len
,
763 const uint8_t *source
, size_t source_len
)
767 err
= deflateReset(stream
);
772 stream
->avail_in
= source_len
;
773 stream
->next_in
= (uint8_t *)source
;
774 stream
->avail_out
= dest_len
;
775 stream
->next_out
= dest
;
777 err
= deflate(stream
, Z_FINISH
);
778 if (err
!= Z_STREAM_END
) {
782 return stream
->next_out
- dest
;
785 /* Compress size bytes of data start at p and store the compressed
786 * data to the buffer of f.
788 * Since the file is dummy file with empty_ops, return -1 if f has no space to
789 * save the compressed data.
791 ssize_t
qemu_put_compression_data(QEMUFile
*f
, z_stream
*stream
,
792 const uint8_t *p
, size_t size
)
794 ssize_t blen
= IO_BUF_SIZE
- f
->buf_index
- sizeof(int32_t);
796 if (blen
< compressBound(size
)) {
800 blen
= qemu_compress_data(stream
, f
->buf
+ f
->buf_index
+ sizeof(int32_t),
806 qemu_put_be32(f
, blen
);
807 add_buf_to_iovec(f
, blen
);
808 return blen
+ sizeof(int32_t);
811 /* Put the data in the buffer of f_src to the buffer of f_des, and
812 * then reset the buf_index of f_src to 0.
815 int qemu_put_qemu_file(QEMUFile
*f_des
, QEMUFile
*f_src
)
819 if (f_src
->buf_index
> 0) {
820 len
= f_src
->buf_index
;
821 qemu_put_buffer(f_des
, f_src
->buf
, f_src
->buf_index
);
822 f_src
->buf_index
= 0;
829 * Check if the writable buffer is empty
832 bool qemu_file_buffer_empty(QEMUFile
*file
)
834 assert(qemu_file_is_writable(file
));
836 return !file
->iovcnt
;
840 * Get a string whose length is determined by a single preceding byte
841 * A preallocated 256 byte buffer must be passed in.
842 * Returns: len on success and a 0 terminated string in the buffer
844 * (Note a 0 length string will return 0 either way)
846 size_t coroutine_fn
qemu_get_counted_string(QEMUFile
*f
, char buf
[256])
848 size_t len
= qemu_get_byte(f
);
849 size_t res
= qemu_get_buffer(f
, (uint8_t *)buf
, len
);
853 return res
== len
? res
: 0;
857 * Put a string with one preceding byte containing its length. The length of
858 * the string should be less than 256.
860 void qemu_put_counted_string(QEMUFile
*f
, const char *str
)
862 size_t len
= strlen(str
);
865 qemu_put_byte(f
, len
);
866 qemu_put_buffer(f
, (const uint8_t *)str
, len
);
870 * Set the blocking state of the QEMUFile.
871 * Note: On some transports the OS only keeps a single blocking state for
872 * both directions, and thus changing the blocking on the main
873 * QEMUFile can also affect the return path.
875 void qemu_file_set_blocking(QEMUFile
*f
, bool block
)
877 qio_channel_set_blocking(f
->ioc
, block
, NULL
);
883 * Get the ioc object for the file, without incrementing
884 * the reference count.
886 * Returns: the ioc object
888 QIOChannel
*qemu_file_get_ioc(QEMUFile
*file
)
894 * Read size bytes from QEMUFile f and write them to fd.
896 int qemu_file_get_to_fd(QEMUFile
*f
, int fd
, size_t size
)
899 size_t pending
= f
->buf_size
- f
->buf_index
;
903 rc
= qemu_fill_buffer(f
);
913 rc
= write(fd
, f
->buf
+ f
->buf_index
, MIN(pending
, size
));