target/arm: Use TCG_COND_TSTNE in gen_cmtst_vec
[qemu/kevin.git] / migration / qemu-file.c
blobb6d2f588bd74719682ac369873e4b68ddc517aaf
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "qemu/madvise.h"
26 #include "qemu/error-report.h"
27 #include "qemu/iov.h"
28 #include "migration.h"
29 #include "migration-stats.h"
30 #include "qemu-file.h"
31 #include "trace.h"
32 #include "options.h"
33 #include "qapi/error.h"
34 #include "rdma.h"
35 #include "io/channel-file.h"
37 #define IO_BUF_SIZE 32768
38 #define MAX_IOV_SIZE MIN_CONST(IOV_MAX, 64)
40 struct QEMUFile {
41 QIOChannel *ioc;
42 bool is_writable;
44 int buf_index;
45 int buf_size; /* 0 when writing */
46 uint8_t buf[IO_BUF_SIZE];
48 DECLARE_BITMAP(may_free, MAX_IOV_SIZE);
49 struct iovec iov[MAX_IOV_SIZE];
50 unsigned int iovcnt;
52 int last_error;
53 Error *last_error_obj;
57 * Stop a file from being read/written - not all backing files can do this
58 * typically only sockets can.
60 * TODO: convert to propagate Error objects instead of squashing
61 * to a fixed errno value
63 int qemu_file_shutdown(QEMUFile *f)
65 Error *err = NULL;
68 * We must set qemufile error before the real shutdown(), otherwise
69 * there can be a race window where we thought IO all went though
70 * (because last_error==NULL) but actually IO has already stopped.
72 * If without correct ordering, the race can happen like this:
74 * page receiver other thread
75 * ------------- ------------
76 * qemu_get_buffer()
77 * do shutdown()
78 * returns 0 (buffer all zero)
79 * (we didn't check this retcode)
80 * try to detect IO error
81 * last_error==NULL, IO okay
82 * install ALL-ZERO page
83 * set last_error
84 * --> guest crash!
86 if (!f->last_error) {
87 qemu_file_set_error(f, -EIO);
90 if (!qio_channel_has_feature(f->ioc,
91 QIO_CHANNEL_FEATURE_SHUTDOWN)) {
92 return -ENOSYS;
95 if (qio_channel_shutdown(f->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, &err) < 0) {
96 error_report_err(err);
97 return -EIO;
100 return 0;
103 static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable)
105 QEMUFile *f;
107 f = g_new0(QEMUFile, 1);
109 object_ref(ioc);
110 f->ioc = ioc;
111 f->is_writable = is_writable;
113 return f;
117 * Result: QEMUFile* for a 'return path' for comms in the opposite direction
118 * NULL if not available
120 QEMUFile *qemu_file_get_return_path(QEMUFile *f)
122 return qemu_file_new_impl(f->ioc, !f->is_writable);
125 QEMUFile *qemu_file_new_output(QIOChannel *ioc)
127 return qemu_file_new_impl(ioc, true);
130 QEMUFile *qemu_file_new_input(QIOChannel *ioc)
132 return qemu_file_new_impl(ioc, false);
136 * Get last error for stream f with optional Error*
138 * Return negative error value if there has been an error on previous
139 * operations, return 0 if no error happened.
141 * If errp is specified, a verbose error message will be copied over.
143 int qemu_file_get_error_obj(QEMUFile *f, Error **errp)
145 if (!f->last_error) {
146 return 0;
149 /* There is an error */
150 if (errp) {
151 if (f->last_error_obj) {
152 *errp = error_copy(f->last_error_obj);
153 } else {
154 error_setg_errno(errp, -f->last_error, "Channel error");
158 return f->last_error;
162 * Get last error for either stream f1 or f2 with optional Error*.
163 * The error returned (non-zero) can be either from f1 or f2.
165 * If any of the qemufile* is NULL, then skip the check on that file.
167 * When there is no error on both qemufile, zero is returned.
169 int qemu_file_get_error_obj_any(QEMUFile *f1, QEMUFile *f2, Error **errp)
171 int ret = 0;
173 if (f1) {
174 ret = qemu_file_get_error_obj(f1, errp);
175 /* If there's already error detected, return */
176 if (ret) {
177 return ret;
181 if (f2) {
182 ret = qemu_file_get_error_obj(f2, errp);
185 return ret;
189 * Set the last error for stream f with optional Error*
191 void qemu_file_set_error_obj(QEMUFile *f, int ret, Error *err)
193 if (f->last_error == 0 && ret) {
194 f->last_error = ret;
195 error_propagate(&f->last_error_obj, err);
196 } else if (err) {
197 error_report_err(err);
202 * Get last error for stream f
204 * Return negative error value if there has been an error on previous
205 * operations, return 0 if no error happened.
208 int qemu_file_get_error(QEMUFile *f)
210 return f->last_error;
214 * Set the last error for stream f
216 void qemu_file_set_error(QEMUFile *f, int ret)
218 qemu_file_set_error_obj(f, ret, NULL);
221 static bool qemu_file_is_writable(QEMUFile *f)
223 return f->is_writable;
226 static void qemu_iovec_release_ram(QEMUFile *f)
228 struct iovec iov;
229 unsigned long idx;
231 /* Find and release all the contiguous memory ranges marked as may_free. */
232 idx = find_next_bit(f->may_free, f->iovcnt, 0);
233 if (idx >= f->iovcnt) {
234 return;
236 iov = f->iov[idx];
238 /* The madvise() in the loop is called for iov within a continuous range and
239 * then reinitialize the iov. And in the end, madvise() is called for the
240 * last iov.
242 while ((idx = find_next_bit(f->may_free, f->iovcnt, idx + 1)) < f->iovcnt) {
243 /* check for adjacent buffer and coalesce them */
244 if (iov.iov_base + iov.iov_len == f->iov[idx].iov_base) {
245 iov.iov_len += f->iov[idx].iov_len;
246 continue;
248 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) {
249 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
250 iov.iov_base, iov.iov_len, strerror(errno));
252 iov = f->iov[idx];
254 if (qemu_madvise(iov.iov_base, iov.iov_len, QEMU_MADV_DONTNEED) < 0) {
255 error_report("migrate: madvise DONTNEED failed %p %zd: %s",
256 iov.iov_base, iov.iov_len, strerror(errno));
258 memset(f->may_free, 0, sizeof(f->may_free));
261 bool qemu_file_is_seekable(QEMUFile *f)
263 return qio_channel_has_feature(f->ioc, QIO_CHANNEL_FEATURE_SEEKABLE);
267 * Flushes QEMUFile buffer
269 * This will flush all pending data. If data was only partially flushed, it
270 * will set an error state.
272 int qemu_fflush(QEMUFile *f)
274 if (!qemu_file_is_writable(f)) {
275 return f->last_error;
278 if (f->last_error) {
279 return f->last_error;
281 if (f->iovcnt > 0) {
282 Error *local_error = NULL;
283 if (qio_channel_writev_all(f->ioc,
284 f->iov, f->iovcnt,
285 &local_error) < 0) {
286 qemu_file_set_error_obj(f, -EIO, local_error);
287 } else {
288 uint64_t size = iov_size(f->iov, f->iovcnt);
289 stat64_add(&mig_stats.qemu_file_transferred, size);
292 qemu_iovec_release_ram(f);
295 f->buf_index = 0;
296 f->iovcnt = 0;
297 return f->last_error;
301 * Attempt to fill the buffer from the underlying file
302 * Returns the number of bytes read, or negative value for an error.
304 * Note that it can return a partially full buffer even in a not error/not EOF
305 * case if the underlying file descriptor gives a short read, and that can
306 * happen even on a blocking fd.
308 static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
310 int len;
311 int pending;
312 Error *local_error = NULL;
314 assert(!qemu_file_is_writable(f));
316 pending = f->buf_size - f->buf_index;
317 if (pending > 0) {
318 memmove(f->buf, f->buf + f->buf_index, pending);
320 f->buf_index = 0;
321 f->buf_size = pending;
323 if (qemu_file_get_error(f)) {
324 return 0;
327 do {
328 len = qio_channel_read(f->ioc,
329 (char *)f->buf + pending,
330 IO_BUF_SIZE - pending,
331 &local_error);
332 if (len == QIO_CHANNEL_ERR_BLOCK) {
333 if (qemu_in_coroutine()) {
334 qio_channel_yield(f->ioc, G_IO_IN);
335 } else {
336 qio_channel_wait(f->ioc, G_IO_IN);
338 } else if (len < 0) {
339 len = -EIO;
341 } while (len == QIO_CHANNEL_ERR_BLOCK);
343 if (len > 0) {
344 f->buf_size += len;
345 } else if (len == 0) {
346 qemu_file_set_error_obj(f, -EIO, local_error);
347 } else {
348 qemu_file_set_error_obj(f, len, local_error);
351 return len;
354 /** Closes the file
356 * Returns negative error value if any error happened on previous operations or
357 * while closing the file. Returns 0 or positive number on success.
359 * The meaning of return value on success depends on the specific backend
360 * being used.
362 int qemu_fclose(QEMUFile *f)
364 int ret = qemu_fflush(f);
365 int ret2 = qio_channel_close(f->ioc, NULL);
366 if (ret >= 0) {
367 ret = ret2;
369 g_clear_pointer(&f->ioc, object_unref);
370 error_free(f->last_error_obj);
371 g_free(f);
372 trace_qemu_file_fclose();
373 return ret;
377 * Add buf to iovec. Do flush if iovec is full.
379 * Return values:
380 * 1 iovec is full and flushed
381 * 0 iovec is not flushed
384 static int add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size,
385 bool may_free)
387 /* check for adjacent buffer and coalesce them */
388 if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base +
389 f->iov[f->iovcnt - 1].iov_len &&
390 may_free == test_bit(f->iovcnt - 1, f->may_free))
392 f->iov[f->iovcnt - 1].iov_len += size;
393 } else {
394 if (f->iovcnt >= MAX_IOV_SIZE) {
395 /* Should only happen if a previous fflush failed */
396 assert(qemu_file_get_error(f) || !qemu_file_is_writable(f));
397 return 1;
399 if (may_free) {
400 set_bit(f->iovcnt, f->may_free);
402 f->iov[f->iovcnt].iov_base = (uint8_t *)buf;
403 f->iov[f->iovcnt++].iov_len = size;
406 if (f->iovcnt >= MAX_IOV_SIZE) {
407 qemu_fflush(f);
408 return 1;
411 return 0;
414 static void add_buf_to_iovec(QEMUFile *f, size_t len)
416 if (!add_to_iovec(f, f->buf + f->buf_index, len, false)) {
417 f->buf_index += len;
418 if (f->buf_index == IO_BUF_SIZE) {
419 qemu_fflush(f);
424 void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size,
425 bool may_free)
427 if (f->last_error) {
428 return;
431 add_to_iovec(f, buf, size, may_free);
434 void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size)
436 size_t l;
438 if (f->last_error) {
439 return;
442 while (size > 0) {
443 l = IO_BUF_SIZE - f->buf_index;
444 if (l > size) {
445 l = size;
447 memcpy(f->buf + f->buf_index, buf, l);
448 add_buf_to_iovec(f, l);
449 if (qemu_file_get_error(f)) {
450 break;
452 buf += l;
453 size -= l;
457 void qemu_put_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
458 off_t pos)
460 Error *err = NULL;
461 size_t ret;
463 if (f->last_error) {
464 return;
467 qemu_fflush(f);
468 ret = qio_channel_pwrite(f->ioc, (char *)buf, buflen, pos, &err);
470 if (err) {
471 qemu_file_set_error_obj(f, -EIO, err);
472 return;
475 if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) {
476 qemu_file_set_error_obj(f, -EAGAIN, NULL);
477 return;
480 if (ret != buflen) {
481 error_setg(&err, "Partial write of size %zu, expected %zu", ret,
482 buflen);
483 qemu_file_set_error_obj(f, -EIO, err);
484 return;
487 stat64_add(&mig_stats.qemu_file_transferred, buflen);
489 return;
493 size_t qemu_get_buffer_at(QEMUFile *f, const uint8_t *buf, size_t buflen,
494 off_t pos)
496 Error *err = NULL;
497 size_t ret;
499 if (f->last_error) {
500 return 0;
503 ret = qio_channel_pread(f->ioc, (char *)buf, buflen, pos, &err);
505 if ((ssize_t)ret == -1 || err) {
506 qemu_file_set_error_obj(f, -EIO, err);
507 return 0;
510 if ((ssize_t)ret == QIO_CHANNEL_ERR_BLOCK) {
511 qemu_file_set_error_obj(f, -EAGAIN, NULL);
512 return 0;
515 if (ret != buflen) {
516 error_setg(&err, "Partial read of size %zu, expected %zu", ret, buflen);
517 qemu_file_set_error_obj(f, -EIO, err);
518 return 0;
521 return ret;
524 void qemu_set_offset(QEMUFile *f, off_t off, int whence)
526 Error *err = NULL;
527 off_t ret;
529 if (qemu_file_is_writable(f)) {
530 qemu_fflush(f);
531 } else {
532 /* Drop all cached buffers if existed; will trigger a re-fill later */
533 f->buf_index = 0;
534 f->buf_size = 0;
537 ret = qio_channel_io_seek(f->ioc, off, whence, &err);
538 if (ret == (off_t)-1) {
539 qemu_file_set_error_obj(f, -EIO, err);
543 off_t qemu_get_offset(QEMUFile *f)
545 Error *err = NULL;
546 off_t ret;
548 qemu_fflush(f);
550 ret = qio_channel_io_seek(f->ioc, 0, SEEK_CUR, &err);
551 if (ret == (off_t)-1) {
552 qemu_file_set_error_obj(f, -EIO, err);
554 return ret;
558 void qemu_put_byte(QEMUFile *f, int v)
560 if (f->last_error) {
561 return;
564 f->buf[f->buf_index] = v;
565 add_buf_to_iovec(f, 1);
568 void qemu_file_skip(QEMUFile *f, int size)
570 if (f->buf_index + size <= f->buf_size) {
571 f->buf_index += size;
576 * Read 'size' bytes from file (at 'offset') without moving the
577 * pointer and set 'buf' to point to that data.
579 * It will return size bytes unless there was an error, in which case it will
580 * return as many as it managed to read (assuming blocking fd's which
581 * all current QEMUFile are)
583 size_t coroutine_mixed_fn qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset)
585 ssize_t pending;
586 size_t index;
588 assert(!qemu_file_is_writable(f));
589 assert(offset < IO_BUF_SIZE);
590 assert(size <= IO_BUF_SIZE - offset);
592 /* The 1st byte to read from */
593 index = f->buf_index + offset;
594 /* The number of available bytes starting at index */
595 pending = f->buf_size - index;
598 * qemu_fill_buffer might return just a few bytes, even when there isn't
599 * an error, so loop collecting them until we get enough.
601 while (pending < size) {
602 int received = qemu_fill_buffer(f);
604 if (received <= 0) {
605 break;
608 index = f->buf_index + offset;
609 pending = f->buf_size - index;
612 if (pending <= 0) {
613 return 0;
615 if (size > pending) {
616 size = pending;
619 *buf = f->buf + index;
620 return size;
624 * Read 'size' bytes of data from the file into buf.
625 * 'size' can be larger than the internal buffer.
627 * It will return size bytes unless there was an error, in which case it will
628 * return as many as it managed to read (assuming blocking fd's which
629 * all current QEMUFile are)
631 size_t coroutine_mixed_fn qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size)
633 size_t pending = size;
634 size_t done = 0;
636 while (pending > 0) {
637 size_t res;
638 uint8_t *src;
640 res = qemu_peek_buffer(f, &src, MIN(pending, IO_BUF_SIZE), 0);
641 if (res == 0) {
642 return done;
644 memcpy(buf, src, res);
645 qemu_file_skip(f, res);
646 buf += res;
647 pending -= res;
648 done += res;
650 return done;
654 * Read 'size' bytes of data from the file.
655 * 'size' can be larger than the internal buffer.
657 * The data:
658 * may be held on an internal buffer (in which case *buf is updated
659 * to point to it) that is valid until the next qemu_file operation.
660 * OR
661 * will be copied to the *buf that was passed in.
663 * The code tries to avoid the copy if possible.
665 * It will return size bytes unless there was an error, in which case it will
666 * return as many as it managed to read (assuming blocking fd's which
667 * all current QEMUFile are)
669 * Note: Since **buf may get changed, the caller should take care to
670 * keep a pointer to the original buffer if it needs to deallocate it.
672 size_t coroutine_mixed_fn qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size)
674 if (size < IO_BUF_SIZE) {
675 size_t res;
676 uint8_t *src = NULL;
678 res = qemu_peek_buffer(f, &src, size, 0);
680 if (res == size) {
681 qemu_file_skip(f, res);
682 *buf = src;
683 return res;
687 return qemu_get_buffer(f, *buf, size);
691 * Peeks a single byte from the buffer; this isn't guaranteed to work if
692 * offset leaves a gap after the previous read/peeked data.
694 int coroutine_mixed_fn qemu_peek_byte(QEMUFile *f, int offset)
696 int index = f->buf_index + offset;
698 assert(!qemu_file_is_writable(f));
699 assert(offset < IO_BUF_SIZE);
701 if (index >= f->buf_size) {
702 qemu_fill_buffer(f);
703 index = f->buf_index + offset;
704 if (index >= f->buf_size) {
705 return 0;
708 return f->buf[index];
711 int coroutine_mixed_fn qemu_get_byte(QEMUFile *f)
713 int result;
715 result = qemu_peek_byte(f, 0);
716 qemu_file_skip(f, 1);
717 return result;
720 uint64_t qemu_file_transferred(QEMUFile *f)
722 uint64_t ret = stat64_get(&mig_stats.qemu_file_transferred);
723 int i;
725 g_assert(qemu_file_is_writable(f));
727 for (i = 0; i < f->iovcnt; i++) {
728 ret += f->iov[i].iov_len;
731 return ret;
734 void qemu_put_be16(QEMUFile *f, unsigned int v)
736 qemu_put_byte(f, v >> 8);
737 qemu_put_byte(f, v);
740 void qemu_put_be32(QEMUFile *f, unsigned int v)
742 qemu_put_byte(f, v >> 24);
743 qemu_put_byte(f, v >> 16);
744 qemu_put_byte(f, v >> 8);
745 qemu_put_byte(f, v);
748 void qemu_put_be64(QEMUFile *f, uint64_t v)
750 qemu_put_be32(f, v >> 32);
751 qemu_put_be32(f, v);
754 unsigned int qemu_get_be16(QEMUFile *f)
756 unsigned int v;
757 v = qemu_get_byte(f) << 8;
758 v |= qemu_get_byte(f);
759 return v;
762 unsigned int qemu_get_be32(QEMUFile *f)
764 unsigned int v;
765 v = (unsigned int)qemu_get_byte(f) << 24;
766 v |= qemu_get_byte(f) << 16;
767 v |= qemu_get_byte(f) << 8;
768 v |= qemu_get_byte(f);
769 return v;
772 uint64_t qemu_get_be64(QEMUFile *f)
774 uint64_t v;
775 v = (uint64_t)qemu_get_be32(f) << 32;
776 v |= qemu_get_be32(f);
777 return v;
781 * Get a string whose length is determined by a single preceding byte
782 * A preallocated 256 byte buffer must be passed in.
783 * Returns: len on success and a 0 terminated string in the buffer
784 * else 0
785 * (Note a 0 length string will return 0 either way)
787 size_t coroutine_fn qemu_get_counted_string(QEMUFile *f, char buf[256])
789 size_t len = qemu_get_byte(f);
790 size_t res = qemu_get_buffer(f, (uint8_t *)buf, len);
792 buf[res] = 0;
794 return res == len ? res : 0;
798 * Put a string with one preceding byte containing its length. The length of
799 * the string should be less than 256.
801 void qemu_put_counted_string(QEMUFile *f, const char *str)
803 size_t len = strlen(str);
805 assert(len < 256);
806 qemu_put_byte(f, len);
807 qemu_put_buffer(f, (const uint8_t *)str, len);
811 * Set the blocking state of the QEMUFile.
812 * Note: On some transports the OS only keeps a single blocking state for
813 * both directions, and thus changing the blocking on the main
814 * QEMUFile can also affect the return path.
816 void qemu_file_set_blocking(QEMUFile *f, bool block)
818 qio_channel_set_blocking(f->ioc, block, NULL);
822 * qemu_file_get_ioc:
824 * Get the ioc object for the file, without incrementing
825 * the reference count.
827 * Returns: the ioc object
829 QIOChannel *qemu_file_get_ioc(QEMUFile *file)
831 return file->ioc;
835 * Read size bytes from QEMUFile f and write them to fd.
837 int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size)
839 while (size) {
840 size_t pending = f->buf_size - f->buf_index;
841 ssize_t rc;
843 if (!pending) {
844 rc = qemu_fill_buffer(f);
845 if (rc < 0) {
846 return rc;
848 if (rc == 0) {
849 return -EIO;
851 continue;
854 rc = write(fd, f->buf + f->buf_index, MIN(pending, size));
855 if (rc < 0) {
856 return -errno;
858 if (rc == 0) {
859 return -EIO;
861 f->buf_index += rc;
862 size -= rc;
865 return 0;