4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu-common.h"
26 #include "qemu/sockets.h"
27 #include "block/coroutine.h"
28 #include "migration/migration.h"
29 #include "migration/qemu-file.h"
32 #define IO_BUF_SIZE 32768
33 #define MAX_IOV_SIZE MIN(IOV_MAX, 64)
36 const QEMUFileOps
*ops
;
42 int64_t pos
; /* start of buffer when writing, end of buffer
45 int buf_size
; /* 0 when writing */
46 uint8_t buf
[IO_BUF_SIZE
];
48 struct iovec iov
[MAX_IOV_SIZE
];
54 typedef struct QEMUFileStdio
{
59 static int stdio_get_fd(void *opaque
)
61 QEMUFileStdio
*s
= opaque
;
63 return fileno(s
->stdio_file
);
66 static int stdio_put_buffer(void *opaque
, const uint8_t *buf
, int64_t pos
,
69 QEMUFileStdio
*s
= opaque
;
72 res
= fwrite(buf
, 1, size
, s
->stdio_file
);
80 static int stdio_get_buffer(void *opaque
, uint8_t *buf
, int64_t pos
, int size
)
82 QEMUFileStdio
*s
= opaque
;
83 FILE *fp
= s
->stdio_file
;
88 bytes
= fread(buf
, 1, size
, fp
);
89 if (bytes
!= 0 || !ferror(fp
)) {
92 if (errno
== EAGAIN
) {
93 yield_until_fd_readable(fileno(fp
));
94 } else if (errno
!= EINTR
) {
101 static int stdio_pclose(void *opaque
)
103 QEMUFileStdio
*s
= opaque
;
105 ret
= pclose(s
->stdio_file
);
108 } else if (!WIFEXITED(ret
) || WEXITSTATUS(ret
) != 0) {
109 /* close succeeded, but non-zero exit code: */
110 ret
= -EIO
; /* fake errno value */
116 static int stdio_fclose(void *opaque
)
118 QEMUFileStdio
*s
= opaque
;
121 if (qemu_file_is_writable(s
->file
)) {
122 int fd
= fileno(s
->stdio_file
);
125 ret
= fstat(fd
, &st
);
126 if (ret
== 0 && S_ISREG(st
.st_mode
)) {
128 * If the file handle is a regular file make sure the
129 * data is flushed to disk before signaling success.
138 if (fclose(s
->stdio_file
) == EOF
) {
145 static const QEMUFileOps stdio_pipe_read_ops
= {
146 .get_fd
= stdio_get_fd
,
147 .get_buffer
= stdio_get_buffer
,
148 .close
= stdio_pclose
151 static const QEMUFileOps stdio_pipe_write_ops
= {
152 .get_fd
= stdio_get_fd
,
153 .put_buffer
= stdio_put_buffer
,
154 .close
= stdio_pclose
157 QEMUFile
*qemu_popen_cmd(const char *command
, const char *mode
)
162 if (mode
== NULL
|| (mode
[0] != 'r' && mode
[0] != 'w') || mode
[1] != 0) {
163 fprintf(stderr
, "qemu_popen: Argument validity check failed\n");
167 stdio_file
= popen(command
, mode
);
168 if (stdio_file
== NULL
) {
172 s
= g_malloc0(sizeof(QEMUFileStdio
));
174 s
->stdio_file
= stdio_file
;
176 if (mode
[0] == 'r') {
177 s
->file
= qemu_fopen_ops(s
, &stdio_pipe_read_ops
);
179 s
->file
= qemu_fopen_ops(s
, &stdio_pipe_write_ops
);
184 static const QEMUFileOps stdio_file_read_ops
= {
185 .get_fd
= stdio_get_fd
,
186 .get_buffer
= stdio_get_buffer
,
187 .close
= stdio_fclose
190 static const QEMUFileOps stdio_file_write_ops
= {
191 .get_fd
= stdio_get_fd
,
192 .put_buffer
= stdio_put_buffer
,
193 .close
= stdio_fclose
196 bool qemu_file_mode_is_not_valid(const char *mode
)
199 (mode
[0] != 'r' && mode
[0] != 'w') ||
200 mode
[1] != 'b' || mode
[2] != 0) {
201 fprintf(stderr
, "qemu_fopen: Argument validity check failed\n");
208 QEMUFile
*qemu_fopen(const char *filename
, const char *mode
)
212 if (qemu_file_mode_is_not_valid(mode
)) {
216 s
= g_malloc0(sizeof(QEMUFileStdio
));
218 s
->stdio_file
= fopen(filename
, mode
);
219 if (!s
->stdio_file
) {
223 if (mode
[0] == 'w') {
224 s
->file
= qemu_fopen_ops(s
, &stdio_file_write_ops
);
226 s
->file
= qemu_fopen_ops(s
, &stdio_file_read_ops
);
234 QEMUFile
*qemu_fopen_ops(void *opaque
, const QEMUFileOps
*ops
)
238 f
= g_malloc0(sizeof(QEMUFile
));
246 * Get last error for stream f
248 * Return negative error value if there has been an error on previous
249 * operations, return 0 if no error happened.
252 int qemu_file_get_error(QEMUFile
*f
)
254 return f
->last_error
;
257 void qemu_file_set_error(QEMUFile
*f
, int ret
)
259 if (f
->last_error
== 0) {
264 bool qemu_file_is_writable(QEMUFile
*f
)
266 return f
->ops
->writev_buffer
|| f
->ops
->put_buffer
;
270 * Flushes QEMUFile buffer
272 * If there is writev_buffer QEMUFileOps it uses it otherwise uses
275 void qemu_fflush(QEMUFile
*f
)
279 if (!qemu_file_is_writable(f
)) {
283 if (f
->ops
->writev_buffer
) {
285 ret
= f
->ops
->writev_buffer(f
->opaque
, f
->iov
, f
->iovcnt
, f
->pos
);
288 if (f
->buf_index
> 0) {
289 ret
= f
->ops
->put_buffer(f
->opaque
, f
->buf
, f
->pos
, f
->buf_index
);
298 qemu_file_set_error(f
, ret
);
302 void ram_control_before_iterate(QEMUFile
*f
, uint64_t flags
)
306 if (f
->ops
->before_ram_iterate
) {
307 ret
= f
->ops
->before_ram_iterate(f
, f
->opaque
, flags
);
309 qemu_file_set_error(f
, ret
);
314 void ram_control_after_iterate(QEMUFile
*f
, uint64_t flags
)
318 if (f
->ops
->after_ram_iterate
) {
319 ret
= f
->ops
->after_ram_iterate(f
, f
->opaque
, flags
);
321 qemu_file_set_error(f
, ret
);
326 void ram_control_load_hook(QEMUFile
*f
, uint64_t flags
)
330 if (f
->ops
->hook_ram_load
) {
331 ret
= f
->ops
->hook_ram_load(f
, f
->opaque
, flags
);
333 qemu_file_set_error(f
, ret
);
336 qemu_file_set_error(f
, ret
);
340 size_t ram_control_save_page(QEMUFile
*f
, ram_addr_t block_offset
,
341 ram_addr_t offset
, size_t size
, int *bytes_sent
)
343 if (f
->ops
->save_page
) {
344 int ret
= f
->ops
->save_page(f
, f
->opaque
, block_offset
,
345 offset
, size
, bytes_sent
);
347 if (ret
!= RAM_SAVE_CONTROL_DELAYED
) {
348 if (bytes_sent
&& *bytes_sent
> 0) {
349 qemu_update_position(f
, *bytes_sent
);
350 } else if (ret
< 0) {
351 qemu_file_set_error(f
, ret
);
358 return RAM_SAVE_CONTROL_NOT_SUPP
;
362 * Attempt to fill the buffer from the underlying file
363 * Returns the number of bytes read, or negative value for an error.
365 * Note that it can return a partially full buffer even in a not error/not EOF
366 * case if the underlying file descriptor gives a short read, and that can
367 * happen even on a blocking fd.
369 static ssize_t
qemu_fill_buffer(QEMUFile
*f
)
374 assert(!qemu_file_is_writable(f
));
376 pending
= f
->buf_size
- f
->buf_index
;
378 memmove(f
->buf
, f
->buf
+ f
->buf_index
, pending
);
381 f
->buf_size
= pending
;
383 len
= f
->ops
->get_buffer(f
->opaque
, f
->buf
+ pending
, f
->pos
,
384 IO_BUF_SIZE
- pending
);
388 } else if (len
== 0) {
389 qemu_file_set_error(f
, -EIO
);
390 } else if (len
!= -EAGAIN
) {
391 qemu_file_set_error(f
, len
);
397 int qemu_get_fd(QEMUFile
*f
)
399 if (f
->ops
->get_fd
) {
400 return f
->ops
->get_fd(f
->opaque
);
405 void qemu_update_position(QEMUFile
*f
, size_t size
)
412 * Returns negative error value if any error happened on previous operations or
413 * while closing the file. Returns 0 or positive number on success.
415 * The meaning of return value on success depends on the specific backend
418 int qemu_fclose(QEMUFile
*f
)
422 ret
= qemu_file_get_error(f
);
425 int ret2
= f
->ops
->close(f
->opaque
);
430 /* If any error was spotted before closing, we should report it
431 * instead of the close() return value.
437 trace_qemu_file_fclose();
441 static void add_to_iovec(QEMUFile
*f
, const uint8_t *buf
, int size
)
443 /* check for adjacent buffer and coalesce them */
444 if (f
->iovcnt
> 0 && buf
== f
->iov
[f
->iovcnt
- 1].iov_base
+
445 f
->iov
[f
->iovcnt
- 1].iov_len
) {
446 f
->iov
[f
->iovcnt
- 1].iov_len
+= size
;
448 f
->iov
[f
->iovcnt
].iov_base
= (uint8_t *)buf
;
449 f
->iov
[f
->iovcnt
++].iov_len
= size
;
452 if (f
->iovcnt
>= MAX_IOV_SIZE
) {
457 void qemu_put_buffer_async(QEMUFile
*f
, const uint8_t *buf
, int size
)
459 if (!f
->ops
->writev_buffer
) {
460 qemu_put_buffer(f
, buf
, size
);
468 f
->bytes_xfer
+= size
;
469 add_to_iovec(f
, buf
, size
);
472 void qemu_put_buffer(QEMUFile
*f
, const uint8_t *buf
, int size
)
481 l
= IO_BUF_SIZE
- f
->buf_index
;
485 memcpy(f
->buf
+ f
->buf_index
, buf
, l
);
487 if (f
->ops
->writev_buffer
) {
488 add_to_iovec(f
, f
->buf
+ f
->buf_index
, l
);
491 if (f
->buf_index
== IO_BUF_SIZE
) {
494 if (qemu_file_get_error(f
)) {
502 void qemu_put_byte(QEMUFile
*f
, int v
)
508 f
->buf
[f
->buf_index
] = v
;
510 if (f
->ops
->writev_buffer
) {
511 add_to_iovec(f
, f
->buf
+ f
->buf_index
, 1);
514 if (f
->buf_index
== IO_BUF_SIZE
) {
519 void qemu_file_skip(QEMUFile
*f
, int size
)
521 if (f
->buf_index
+ size
<= f
->buf_size
) {
522 f
->buf_index
+= size
;
527 * Read 'size' bytes from file (at 'offset') into buf without moving the
530 * It will return size bytes unless there was an error, in which case it will
531 * return as many as it managed to read (assuming blocking fd's which
532 * all current QEMUFile are)
534 int qemu_peek_buffer(QEMUFile
*f
, uint8_t *buf
, int size
, size_t offset
)
539 assert(!qemu_file_is_writable(f
));
540 assert(offset
< IO_BUF_SIZE
);
541 assert(size
<= IO_BUF_SIZE
- offset
);
543 /* The 1st byte to read from */
544 index
= f
->buf_index
+ offset
;
545 /* The number of available bytes starting at index */
546 pending
= f
->buf_size
- index
;
549 * qemu_fill_buffer might return just a few bytes, even when there isn't
550 * an error, so loop collecting them until we get enough.
552 while (pending
< size
) {
553 int received
= qemu_fill_buffer(f
);
559 index
= f
->buf_index
+ offset
;
560 pending
= f
->buf_size
- index
;
566 if (size
> pending
) {
570 memcpy(buf
, f
->buf
+ index
, size
);
575 * Read 'size' bytes of data from the file into buf.
576 * 'size' can be larger than the internal buffer.
578 * It will return size bytes unless there was an error, in which case it will
579 * return as many as it managed to read (assuming blocking fd's which
580 * all current QEMUFile are)
582 int qemu_get_buffer(QEMUFile
*f
, uint8_t *buf
, int size
)
587 while (pending
> 0) {
590 res
= qemu_peek_buffer(f
, buf
, MIN(pending
, IO_BUF_SIZE
), 0);
594 qemu_file_skip(f
, res
);
603 * Peeks a single byte from the buffer; this isn't guaranteed to work if
604 * offset leaves a gap after the previous read/peeked data.
606 int qemu_peek_byte(QEMUFile
*f
, int offset
)
608 int index
= f
->buf_index
+ offset
;
610 assert(!qemu_file_is_writable(f
));
611 assert(offset
< IO_BUF_SIZE
);
613 if (index
>= f
->buf_size
) {
615 index
= f
->buf_index
+ offset
;
616 if (index
>= f
->buf_size
) {
620 return f
->buf
[index
];
623 int qemu_get_byte(QEMUFile
*f
)
627 result
= qemu_peek_byte(f
, 0);
628 qemu_file_skip(f
, 1);
632 int64_t qemu_ftell(QEMUFile
*f
)
638 int qemu_file_rate_limit(QEMUFile
*f
)
640 if (qemu_file_get_error(f
)) {
643 if (f
->xfer_limit
> 0 && f
->bytes_xfer
> f
->xfer_limit
) {
649 int64_t qemu_file_get_rate_limit(QEMUFile
*f
)
651 return f
->xfer_limit
;
654 void qemu_file_set_rate_limit(QEMUFile
*f
, int64_t limit
)
656 f
->xfer_limit
= limit
;
659 void qemu_file_reset_rate_limit(QEMUFile
*f
)
664 void qemu_put_be16(QEMUFile
*f
, unsigned int v
)
666 qemu_put_byte(f
, v
>> 8);
670 void qemu_put_be32(QEMUFile
*f
, unsigned int v
)
672 qemu_put_byte(f
, v
>> 24);
673 qemu_put_byte(f
, v
>> 16);
674 qemu_put_byte(f
, v
>> 8);
678 void qemu_put_be64(QEMUFile
*f
, uint64_t v
)
680 qemu_put_be32(f
, v
>> 32);
684 unsigned int qemu_get_be16(QEMUFile
*f
)
687 v
= qemu_get_byte(f
) << 8;
688 v
|= qemu_get_byte(f
);
692 unsigned int qemu_get_be32(QEMUFile
*f
)
695 v
= qemu_get_byte(f
) << 24;
696 v
|= qemu_get_byte(f
) << 16;
697 v
|= qemu_get_byte(f
) << 8;
698 v
|= qemu_get_byte(f
);
702 uint64_t qemu_get_be64(QEMUFile
*f
)
705 v
= (uint64_t)qemu_get_be32(f
) << 32;
706 v
|= qemu_get_be32(f
);
710 #define QSB_CHUNK_SIZE (1 << 10)
711 #define QSB_MAX_CHUNK_SIZE (16 * QSB_CHUNK_SIZE)
714 * Create a QEMUSizedBuffer
715 * This type of buffer uses scatter-gather lists internally and
716 * can grow to any size. Any data array in the scatter-gather list
717 * can hold different amount of bytes.
719 * @buffer: Optional buffer to copy into the QSB
720 * @len: size of initial buffer; if @buffer is given, buffer must
721 * hold at least len bytes
723 * Returns a pointer to a QEMUSizedBuffer or NULL on allocation failure
725 QEMUSizedBuffer
*qsb_create(const uint8_t *buffer
, size_t len
)
727 QEMUSizedBuffer
*qsb
;
728 size_t alloc_len
, num_chunks
, i
, to_copy
;
729 size_t chunk_size
= (len
> QSB_MAX_CHUNK_SIZE
)
733 num_chunks
= DIV_ROUND_UP(len
? len
: QSB_CHUNK_SIZE
, chunk_size
);
734 alloc_len
= num_chunks
* chunk_size
;
736 qsb
= g_try_new0(QEMUSizedBuffer
, 1);
741 qsb
->iov
= g_try_new0(struct iovec
, num_chunks
);
747 qsb
->n_iov
= num_chunks
;
749 for (i
= 0; i
< num_chunks
; i
++) {
750 qsb
->iov
[i
].iov_base
= g_try_malloc0(chunk_size
);
751 if (!qsb
->iov
[i
].iov_base
) {
752 /* qsb_free is safe since g_free can cope with NULL */
757 qsb
->iov
[i
].iov_len
= chunk_size
;
759 to_copy
= (len
- qsb
->used
) > chunk_size
760 ? chunk_size
: (len
- qsb
->used
);
761 memcpy(qsb
->iov
[i
].iov_base
, &buffer
[qsb
->used
], to_copy
);
762 qsb
->used
+= to_copy
;
766 qsb
->size
= alloc_len
;
772 * Free the QEMUSizedBuffer
774 * @qsb: The QEMUSizedBuffer to free
776 void qsb_free(QEMUSizedBuffer
*qsb
)
784 for (i
= 0; i
< qsb
->n_iov
; i
++) {
785 g_free(qsb
->iov
[i
].iov_base
);
792 * Get the number of used bytes in the QEMUSizedBuffer
794 * @qsb: A QEMUSizedBuffer
796 * Returns the number of bytes currently used in this buffer
798 size_t qsb_get_length(const QEMUSizedBuffer
*qsb
)
804 * Set the length of the buffer; the primary usage of this
805 * function is to truncate the number of used bytes in the buffer.
806 * The size will not be extended beyond the current number of
807 * allocated bytes in the QEMUSizedBuffer.
809 * @qsb: A QEMUSizedBuffer
810 * @new_len: The new length of bytes in the buffer
812 * Returns the number of bytes the buffer was truncated or extended
815 size_t qsb_set_length(QEMUSizedBuffer
*qsb
, size_t new_len
)
817 if (new_len
<= qsb
->size
) {
820 qsb
->used
= qsb
->size
;
826 * Get the iovec that holds the data for a given position @pos.
828 * @qsb: A QEMUSizedBuffer
829 * @pos: The index of a byte in the buffer
830 * @d_off: Pointer to an offset that this function will indicate
831 * at what position within the returned iovec the byte
834 * Returns the index of the iovec that holds the byte at the given
835 * index @pos in the byte stream; a negative number if the iovec
836 * for the given position @pos does not exist.
838 static ssize_t
qsb_get_iovec(const QEMUSizedBuffer
*qsb
,
839 off_t pos
, off_t
*d_off
)
844 if (pos
> qsb
->used
) {
848 for (i
= 0; i
< qsb
->n_iov
; i
++) {
849 if (curr
+ qsb
->iov
[i
].iov_len
> pos
) {
853 curr
+= qsb
->iov
[i
].iov_len
;
859 * Convert the QEMUSizedBuffer into a flat buffer.
861 * Note: If at all possible, try to avoid this function since it
862 * may unnecessarily copy memory around.
864 * @qsb: pointer to QEMUSizedBuffer
865 * @start: offset to start at
866 * @count: number of bytes to copy
867 * @buf: a pointer to a buffer to write into (at least @count bytes)
869 * Returns the number of bytes copied into the output buffer
871 ssize_t
qsb_get_buffer(const QEMUSizedBuffer
*qsb
, off_t start
,
872 size_t count
, uint8_t *buffer
)
874 const struct iovec
*iov
;
875 size_t to_copy
, all_copy
;
881 if (start
> qsb
->used
) {
885 all_copy
= qsb
->used
- start
;
886 if (all_copy
> count
) {
892 index
= qsb_get_iovec(qsb
, start
, &s_off
);
897 while (all_copy
> 0) {
898 iov
= &qsb
->iov
[index
];
902 to_copy
= iov
->iov_len
- s_off
;
903 if (to_copy
> all_copy
) {
906 memcpy(&buffer
[d_off
], &s
[s_off
], to_copy
);
919 * Grow the QEMUSizedBuffer to the given size and allocate
922 * @qsb: A QEMUSizedBuffer
923 * @new_size: The new size of the buffer
926 * a negative error code in case of memory allocation failure
928 * the new size of the buffer. The returned size may be greater or equal
931 static ssize_t
qsb_grow(QEMUSizedBuffer
*qsb
, size_t new_size
)
933 size_t needed_chunks
, i
;
935 if (qsb
->size
< new_size
) {
936 struct iovec
*new_iov
;
937 size_t size_diff
= new_size
- qsb
->size
;
938 size_t chunk_size
= (size_diff
> QSB_MAX_CHUNK_SIZE
)
939 ? QSB_MAX_CHUNK_SIZE
: QSB_CHUNK_SIZE
;
941 needed_chunks
= DIV_ROUND_UP(size_diff
, chunk_size
);
943 new_iov
= g_try_new(struct iovec
, qsb
->n_iov
+ needed_chunks
);
944 if (new_iov
== NULL
) {
948 /* Allocate new chunks as needed into new_iov */
949 for (i
= qsb
->n_iov
; i
< qsb
->n_iov
+ needed_chunks
; i
++) {
950 new_iov
[i
].iov_base
= g_try_malloc0(chunk_size
);
951 new_iov
[i
].iov_len
= chunk_size
;
952 if (!new_iov
[i
].iov_base
) {
955 /* Free previously allocated new chunks */
956 for (j
= qsb
->n_iov
; j
< i
; j
++) {
957 g_free(new_iov
[j
].iov_base
);
966 * Now we can't get any allocation errors, copy over to new iov
969 for (i
= 0; i
< qsb
->n_iov
; i
++) {
970 new_iov
[i
] = qsb
->iov
[i
];
973 qsb
->n_iov
+= needed_chunks
;
976 qsb
->size
+= (needed_chunks
* chunk_size
);
983 * Write into the QEMUSizedBuffer at a given position and a given
984 * number of bytes. This function will automatically grow the
987 * @qsb: A QEMUSizedBuffer
988 * @source: A byte array to copy data from
989 * @pos: The position within the @qsb to write data to
990 * @size: The number of bytes to copy into the @qsb
992 * Returns @size or a negative error code in case of memory allocation failure,
993 * or with an invalid 'pos'
995 ssize_t
qsb_write_at(QEMUSizedBuffer
*qsb
, const uint8_t *source
,
996 off_t pos
, size_t count
)
998 ssize_t rc
= qsb_grow(qsb
, pos
+ count
);
1000 size_t all_copy
= count
;
1001 const struct iovec
*iov
;
1004 off_t d_off
, s_off
= 0;
1010 if (pos
+ count
> qsb
->used
) {
1011 qsb
->used
= pos
+ count
;
1014 index
= qsb_get_iovec(qsb
, pos
, &d_off
);
1019 while (all_copy
> 0) {
1020 iov
= &qsb
->iov
[index
];
1022 dest
= iov
->iov_base
;
1024 to_copy
= iov
->iov_len
- d_off
;
1025 if (to_copy
> all_copy
) {
1029 memcpy(&dest
[d_off
], &source
[s_off
], to_copy
);
1032 all_copy
-= to_copy
;
1042 * Create a deep copy of the given QEMUSizedBuffer.
1044 * @qsb: A QEMUSizedBuffer
1046 * Returns a clone of @qsb or NULL on allocation failure
1048 QEMUSizedBuffer
*qsb_clone(const QEMUSizedBuffer
*qsb
)
1050 QEMUSizedBuffer
*out
= qsb_create(NULL
, qsb_get_length(qsb
));
1059 for (i
= 0; i
< qsb
->n_iov
; i
++) {
1060 res
= qsb_write_at(out
, qsb
->iov
[i
].iov_base
,
1061 pos
, qsb
->iov
[i
].iov_len
);
1072 typedef struct QEMUBuffer
{
1073 QEMUSizedBuffer
*qsb
;
1077 static int buf_get_buffer(void *opaque
, uint8_t *buf
, int64_t pos
, int size
)
1079 QEMUBuffer
*s
= opaque
;
1080 ssize_t len
= qsb_get_length(s
->qsb
) - pos
;
1089 return qsb_get_buffer(s
->qsb
, pos
, len
, buf
);
1092 static int buf_put_buffer(void *opaque
, const uint8_t *buf
,
1093 int64_t pos
, int size
)
1095 QEMUBuffer
*s
= opaque
;
1097 return qsb_write_at(s
->qsb
, buf
, pos
, size
);
1100 static int buf_close(void *opaque
)
1102 QEMUBuffer
*s
= opaque
;
1111 const QEMUSizedBuffer
*qemu_buf_get(QEMUFile
*f
)
1122 static const QEMUFileOps buf_read_ops
= {
1123 .get_buffer
= buf_get_buffer
,
1127 static const QEMUFileOps buf_write_ops
= {
1128 .put_buffer
= buf_put_buffer
,
1132 QEMUFile
*qemu_bufopen(const char *mode
, QEMUSizedBuffer
*input
)
1136 if (mode
== NULL
|| (mode
[0] != 'r' && mode
[0] != 'w') ||
1138 error_report("qemu_bufopen: Argument validity check failed");
1142 s
= g_malloc0(sizeof(QEMUBuffer
));
1143 if (mode
[0] == 'r') {
1147 if (s
->qsb
== NULL
) {
1148 s
->qsb
= qsb_create(NULL
, 0);
1152 error_report("qemu_bufopen: qsb_create failed");
1157 if (mode
[0] == 'r') {
1158 s
->file
= qemu_fopen_ops(s
, &buf_read_ops
);
1160 s
->file
= qemu_fopen_ops(s
, &buf_write_ops
);