Merge tag 'v9.0.0-rc3'
[qemu/ar7.git] / block / fvd.h
blob7648269c07878292010a9b14be0e538c5d4f4dbb
1 /*
2 * Copyright (c) 2010-2011 IBM
4 * Authors:
5 * Chunqiang Tang <ctang@us.ibm.com>
7 * This work is licensed under the terms of the GNU GPL, version 2.
8 * See the COPYING file in the top-level directory.
9 */
11 /*=============================================================================
12 * A short description: this is the header of the FVD block device driver.
13 *============================================================================*/
15 #include <sys/vfs.h>
16 #include <sys/mman.h>
17 #include <pthread.h>
18 #include <execinfo.h>
19 #include <sys/ioctl.h>
20 #include "block/block_int.h"
21 #include "qemu/option.h"
22 #include "qemu/queue.h"
23 #include "qemu/timer.h"
24 #include "block/block.h"
25 #include "block/blksim.h"
26 #include "block/fvd-ext.h"
28 #define FVD_MAGIC (('Q' << 24) | ('C' << 16) | (0xF5 << 8) | 0xA9)
29 #define FVD_VERSION 1
31 extern bool in_qemu_tool;
33 /* Profile-directed prefetch. (to be implemented). */
34 typedef struct __attribute__ ((__packed__)) PrefetchProfileEntry {
35 int64_t offset; /* in bytes */
37 /* In the unit of FvdHeader.prefetch_profile_entry_len_unit, i.e.,
38 * len_in_bytes = len * FvdHeader.unit_of_PrefetchProfileEntry_len. */
39 uint32_t len;
40 } PrefetchProfileEntry;
43 * The FVD format consists of:
44 * + Header fields of FvdHeader.
45 * + Bitmap, starting on a 4KB page boundary at a location specified by
46 * FvdHeader.bitmap_offset.
47 * + Table, starting on a 4KB page boundary at a location specified by
48 * FvdHeader.table_offset.
49 * + Journal, starting on a 4KB page boundary at a location specified by
50 * FvdHeader.journal_offset.
51 * + Prefetch profile entries, starting on a 4KB page boundary at a location
52 * specified by FvdHeader.prefetch_profile_offset. (to be implemented)
53 * + Virtual disk data, starting on a 4KB page boundary. Optionally, disk
54 * data can be stored in a separate data file specified by
55 * FvdHeader.data_file.
57 typedef struct __attribute__ ((__packed__)) FvdHeader {
58 uint32_t magic;
59 uint32_t version;
61 /* This field is set to TRUE after whole-image prefetching finishes. */
62 int32_t all_data_in_fvd_img;
64 int64_t virtual_disk_size; /* in bytes. Disk size perceived by the VM. */
65 int64_t metadata_size; /* in bytes. */
66 char base_img[1024];
67 char base_img_fmt[16];
68 int64_t base_img_size; /* in bytes. */
69 int64_t bitmap_offset; /* in bytes. Aligned on DEF_PAGE_SIZE. */
70 int64_t bitmap_size; /* in bytes. Rounded up to DEF_PAGE_SIZE */
71 int32_t block_size; /* in bytes. */
72 int32_t copy_on_read; /* TRUE or FALSE */
73 int64_t max_outstanding_copy_on_read_data; /* in bytes. */
75 /* If (data_file[0]==0), the FVD metadata and data are stored in one file.*/
76 char data_file[1024];
77 char data_file_fmt[16];
79 /******** Begin: for prefetching. *******************************/
80 /* in seconds. -1 means disable whole image prefetching. */
81 int32_t prefetch_start_delay;
83 /* in bytes. Aligned on DEF_PAGE_SIZE. (to be implemented) */
84 int64_t prefetch_profile_offset;
86 /* Number of PrefetchProfileEntry. (to be implemented) */
87 int64_t prefetch_profile_entries;
89 int32_t num_prefetch_slots; /* Max number of outstanding prefetch writes. */
90 int32_t bytes_per_prefetch; /* For whole image prefetching. */
91 int32_t prefetch_read_throughput_measure_time; /* in milliseconds. */
92 int32_t prefetch_write_throughput_measure_time; /* in milliseconds. */
94 /* Controls the calculation of the moving average of throughput. Must be a
95 * value between [0,100].
96 * actual_normalized_alpha = * prefetch_perf_calc_alpha / 100.0 */
97 int32_t prefetch_perf_calc_alpha;
99 int32_t prefetch_min_read_throughput; /* in KB/second. */
100 int32_t prefetch_min_write_throughput; /* in KB/second. */
101 int32_t prefetch_max_read_throughput; /* in KB/second. */
102 int32_t prefetch_max_write_throughput; /* in KB/second. */
104 /* in milliseconds. When prefetch read/write throughput is low, prefetch
105 * pauses for a random time uniformly distributed in
106 * [0, prefetch_throttle_time]. */
107 int32_t prefetch_throttle_time;
108 /******** End: for prefetching. *******************************/
110 /******** Begin: for compact image. *****************************/
111 int32_t compact_image; /* TRUE or FALSE */
112 int64_t table_offset; /* in bytes. */
113 int64_t chunk_size; /* in bytes. */
114 int64_t storage_grow_unit; /* in bytes. */
115 char add_storage_cmd[2048];
116 /******** End: for compact image. *******************************/
118 /******** Begin: for journal. ***********************************/
119 int64_t journal_offset; /* in bytes. */
120 int64_t journal_size; /* in bytes. */
121 int32_t clean_shutdown; /* TRUE if VM's last shutdown was graceful. */
122 /******** End: for journal. *************************************/
125 * This field is TRUE if the image mandates that the storage layer
126 * (BDRVFvdState.fvd_data) must return TRUE for bdrv_has_zero_init().
127 * This is the case if the optimization described in Section 3.3.3 of the
128 * FVD-cow paper is enabled (see function search_holes()). If 'qemu-img
129 * create' sets need_zero_init to TRUE, 'qemu-img update' can be used to
130 * manually reset it to FALSE, if the user always manually pre-fills the
131 * storage (e.g., a raw partition) with zeros. If the image is stored on a
132 * file system, it already supports zero_init, and hence there is no need
133 * to manually manipulate this field.
135 int32_t need_zero_init;
137 /* If TRUE, FVD dumps a prefetch profile after the VM shuts down.
138 * (to be implemented) */
139 int32_t generate_prefetch_profile;
141 /* See the comment on PrefetchProfileEntry.len. (to be implemented) */
142 int32_t unit_of_PrefetchProfileEntry_len;
144 /* in seconds. -1 means disable profile-directed prefetching.
145 * (to be implemented) */
146 int32_t profile_directed_prefetch_start_delay;
148 /* Possible values are "no", "writethrough", "writeback", or
149 * "writenocache". (to be implemented) */
150 char write_updates_base_img[16];
151 } FvdHeader;
153 typedef struct BDRVFvdState {
154 BlockDriverState *fvd_metadata;
155 BlockDriverState *fvd_data;
156 int64_t virtual_disk_size; /*in bytes. */
157 int64_t bitmap_offset; /* in sectors */
158 int64_t bitmap_size; /* in bytes. */
159 int64_t data_offset; /* in sectors. Begin of real data. */
160 int64_t nb_sectors_in_base_img;
161 int32_t block_size; /* in sectors. */
162 int copy_on_read; /* TRUE or FALSE */
163 int64_t max_outstanding_copy_on_read_data; /* in bytes. */
164 int64_t outstanding_copy_on_read_data; /* in bytes. */
165 int data_region_prepared; /* TRUE or FALSE */
166 QLIST_HEAD(WriteLocks, FvdAIOCB) write_locks; /* All writes. */
167 QLIST_HEAD(CopyLocks, FvdAIOCB) copy_locks; /* copy-on-read and CoW. */
169 /* Keep two copies of bitmap to reduce the overhead of updating the
170 * on-disk bitmap, i.e., copy-on-read and prefetching do not update the
171 * on-disk bitmap. See Section 3.3.4 of the FVD-cow paper. */
172 uint8_t *fresh_bitmap;
173 uint8_t *stale_bitmap;
175 /******** Begin: for prefetching. ***********************************/
176 struct FvdAIOCB **prefetch_acb;
177 int prefetch_state; /* PREFETCH_STATE_RUNNING, FINISHED, or DISABLED. */
178 int prefetch_error; /* TRUE or FALSE */
179 int num_prefetch_slots;
180 int num_filled_prefetch_slots;
181 int next_prefetch_read_slot;
182 int prefetch_read_active; /* TRUE or FALSE */
183 int pause_prefetch_requested; /* TRUE or FALSE */
184 int prefetch_start_delay; /* in seconds */
185 int64_t unclaimed_prefetch_region_start;
186 int64_t prefetch_read_time; /* in milliseconds. */
187 int64_t prefetch_write_time; /* in milliseconds. */
188 int64_t prefetch_data_read; /* in bytes. */
189 int64_t prefetch_data_written; /* in bytes. */
190 double prefetch_read_throughput; /* in bytes/millisecond. */
191 double prefetch_write_throughput; /* in bytes/millisecond. */
192 double prefetch_min_read_throughput; /* in bytes/millisecond. */
193 double prefetch_min_write_throughput; /* in bytes/millisecond. */
194 int64_t prefetch_read_throughput_measure_time; /* in millisecond. */
195 int64_t prefetch_write_throughput_measure_time; /* in millisecond.*/
196 int prefetch_throttle_time; /* in millisecond. */
197 int sectors_per_prefetch;
198 QEMUTimer *prefetch_timer;
199 /* prefetch_perf_calc_alpha = FvdHeader.prefetch_perf_calc_alpha/100.0 */
200 double prefetch_perf_calc_alpha;
201 /******** End: for prefetching. ***********************************/
203 /******** Begin: for compact image. *************************************/
204 uint32_t *table; /* Mapping table stored in memory in little endian. */
205 int64_t data_storage; /* in sectors. */
206 int64_t used_storage; /* in sectors. */
207 int64_t chunk_size; /* in sectors. */
208 int64_t storage_grow_unit; /* in sectors. */
209 int64_t table_offset; /* in sectors. */
210 char *add_storage_cmd;
211 /******** Begin: for compact image. *************************************/
213 /******** Begin: for journal. *******************************************/
214 int64_t journal_offset; /* in sectors. */
215 int64_t journal_size; /* in sectors. */
216 int64_t next_journal_sector; /* in sector. */
217 int ongoing_journal_updates; /* Number of ongoing journal updates. */
218 int dirty_image; /* TRUE or FALSE. */
220 /* Requests waiting for metadata flush and journal recycle to finish. */
221 QLIST_HEAD(JournalFlush, FvdAIOCB) wait_for_journal;
222 /******** End: for journal. ********************************************/
224 #ifdef FVD_DEBUG
225 int64_t total_copy_on_read_data; /* in bytes. */
226 int64_t total_prefetch_data; /* in bytes. */
227 #endif
228 } BDRVFvdState;
230 /* Begin of data type definitions. */
231 struct FvdAIOCB;
233 typedef struct JournalCB {
234 BlockDriverAIOCB *hd_acb;
235 QEMUIOVector qiov;
236 struct iovec iov;
237 QLIST_ENTRY(FvdAIOCB) next_wait_for_journal;
238 } JournalCB;
240 /* CopyLock is used by AIOWriteCB and AIOCopyCB. */
241 typedef struct CopyLock {
242 QLIST_ENTRY(FvdAIOCB) next;
243 int64_t begin;
244 int64_t end;
245 QLIST_HEAD(DependentWritesHead, FvdAIOCB) dependent_writes;
246 } CopyLock;
248 typedef struct ChildAIOReadCB {
249 BlockDriverAIOCB *hd_acb;
250 struct iovec iov;
251 QEMUIOVector qiov;
252 int64_t sector_num;
253 int nb_sectors;
254 int done;
255 } ChildAIOReadCB;
257 typedef struct AIOReadCB {
258 QEMUIOVector *qiov;
259 int ret;
260 ChildAIOReadCB read_backing;
261 ChildAIOReadCB read_fvd;
262 } AIOReadCB;
264 /* For copy-on-read and prefetching. */
265 typedef struct AIOCopyCB {
266 BlockDriverAIOCB *hd_acb;
267 struct iovec iov;
268 QEMUIOVector qiov;
269 uint8_t *buf;
270 int64_t buffered_sector_begin;
271 int64_t buffered_sector_end;
272 int64_t last_prefetch_op_start_time; /* For prefetch only. */
273 } AIOCopyCB;
275 typedef struct AIOWriteCB {
276 BlockDriverAIOCB *hd_acb;
277 QEMUIOVector *qiov;
278 uint8_t *cow_buf;
279 QEMUIOVector *cow_qiov;
280 int64_t cow_start_sector;
281 int update_table; /* TRUE or FALSE. */
282 int ret;
283 QLIST_ENTRY(FvdAIOCB) next_write_lock; /* See BDRVFvdState.write_locks */
285 /* See FvdAIOCB.write.dependent_writes. */
286 QLIST_ENTRY(FvdAIOCB) next_dependent_write;
287 } AIOWriteCB;
289 /* For AIOStoreCompactCB and AIOLoadCompactCB. */
290 typedef struct CompactChildCB {
291 struct FvdAIOCB *acb;
292 BlockDriverAIOCB *hd_acb;
293 } CompactChildCB;
295 /* For storing data to a compact image. */
296 typedef struct AIOStoreCompactCB {
297 CompactChildCB one_child;
298 CompactChildCB *children;
299 int update_table;
300 int num_children;
301 int finished_children;
302 struct FvdAIOCB *parent_acb;
303 int ret;
304 int soft_write; /*TRUE if the store is caused by copy-on-read or prefetch.*/
305 QEMUIOVector *orig_qiov;
306 } AIOStoreCompactCB;
308 /* For loading data from a compact image. */
309 typedef struct AIOLoadCompactCB {
310 CompactChildCB *children;
311 CompactChildCB one_child;
312 int num_children;
313 int finished_children;
314 struct FvdAIOCB *parent_acb;
315 int ret;
316 QEMUIOVector *orig_qiov;
317 } AIOLoadCompactCB;
319 typedef struct AIOFlushCB {
320 BlockDriverAIOCB *data_acb;
321 BlockDriverAIOCB *metadata_acb;
322 int num_finished;
323 int ret;
324 } AIOFlushCB;
326 typedef struct AIOWrapperCB {
327 QEMUBH *bh;
328 } AIOWrapperCB;
330 typedef enum { OP_READ = 1, OP_WRITE, OP_COPY, OP_STORE_COMPACT,
331 OP_LOAD_COMPACT, OP_WRAPPER, OP_FLUSH } op_type;
333 #ifdef FVD_DEBUG
334 /* For debugging memory leadk. */
335 typedef struct alloc_tracer_t {
336 int64_t magic;
337 int alloc_tracer;
338 const char *alloc_file;
339 int alloc_line;
340 size_t size;
341 } alloc_tracer_t;
342 #endif
344 typedef struct FvdAIOCB {
345 BlockDriverAIOCB common;
346 op_type type;
347 int64_t sector_num;
348 int nb_sectors;
349 JournalCB jcb; /* For AIOWriteCB and AIOStoreCompactCB. */
350 CopyLock copy_lock; /* For AIOWriteCB and AIOCopyCB. */
352 /* Use a union so that all requests can efficiently share one big AIOCBInfo.*/
353 union {
354 AIOWrapperCB wrapper;
355 AIOReadCB read;
356 AIOWriteCB write;
357 AIOCopyCB copy;
358 AIOLoadCompactCB load;
359 AIOStoreCompactCB store;
360 AIOFlushCB flush;
363 #ifdef FVD_DEBUG
364 int64_t magic;
365 alloc_tracer_t tracer;
367 /* Uniquely identifies a request across all processing activities. */
368 unsigned long long int uuid;
369 #endif
370 } FvdAIOCB;
372 static AIOCBInfo fvd_aio_pool;
373 static BlockDriver bdrv_fvd;
374 static QemuOptsList fvd_create_opts;
376 /* Function prototypes. */
377 static int do_aio_write(struct FvdAIOCB *acb);
378 static void finish_write_data(void *opaque, int ret);
379 static void restart_dependent_writes(struct FvdAIOCB *acb);
380 static void finish_prefetch_read(void *opaque, int ret);
381 static int read_fvd_header(BDRVFvdState * s, FvdHeader * header);
382 static int update_fvd_header(BDRVFvdState * s, FvdHeader * header);
383 #if 0
384 static void fvd_aio_cancel(BlockDriverAIOCB * blockacb);
385 #endif
386 static BlockDriverAIOCB *store_data_in_compact_image(struct FvdAIOCB *acb,
387 int soft_write, struct FvdAIOCB *parent_acb, BlockDriverState * bs,
388 int64_t sector_num, QEMUIOVector * qiov, int nb_sectors,
389 BlockDriverCompletionFunc * cb, void *opaque);
390 static BlockDriverAIOCB *load_data_from_compact_image(struct FvdAIOCB *acb,
391 struct FvdAIOCB *parent_acb, BlockDriverState * bs,
392 int64_t sector_num, QEMUIOVector * qiov, int nb_sectors,
393 BlockDriverCompletionFunc * cb, void *opaque);
394 static void free_write_resource(struct FvdAIOCB *acb);
395 static void write_metadata_to_journal(struct FvdAIOCB *acb);
396 static void flush_metadata_to_disk(BlockDriverState * bs);
397 static void free_journal_sectors(BDRVFvdState * s);
398 static int fvd_create(const char *filename, QemuOpts *options,
399 Error **errp);
400 static int fvd_probe(const uint8_t * buf, int buf_size, const char *filename);
401 static int64_t coroutine_fn fvd_get_block_status(BlockDriverState *bs,
402 int64_t sector_num,
403 int nb_sectors, int *pnum);
404 static int fvd_flush(BlockDriverState * bs);
405 static BlockDriverAIOCB *fvd_aio_readv(BlockDriverState * bs,
406 int64_t sector_num, QEMUIOVector * qiov, int nb_sectors,
407 BlockDriverCompletionFunc * cb, void *opaque);
408 static BlockDriverAIOCB *fvd_aio_writev(BlockDriverState * bs,
409 int64_t sector_num, QEMUIOVector * qiov, int nb_sectors,
410 BlockDriverCompletionFunc * cb, void *opaque);
411 static BlockDriverAIOCB *fvd_aio_flush(BlockDriverState * bs,
412 BlockDriverCompletionFunc * cb, void *opaque);
413 static int fvd_get_info(BlockDriverState * bs, BlockDriverInfo * bdi);
414 static int fvd_update(BlockDriverState * bs, int argc, char **argv);
415 static int fvd_has_zero_init(BlockDriverState * bs);
416 #if 0
417 static void fvd_read_cancel(FvdAIOCB * acb);
418 static void fvd_write_cancel(FvdAIOCB * acb);
419 static void fvd_copy_cancel(FvdAIOCB * acb);
420 static void fvd_load_compact_cancel(FvdAIOCB * acb);
421 static void fvd_store_compact_cancel(FvdAIOCB * acb);
422 static void fvd_wrapper_cancel(FvdAIOCB * acb);
423 #endif
424 static void flush_metadata_to_disk_on_exit (BlockDriverState *bs);
425 static inline BlockDriverAIOCB *load_data(FvdAIOCB * parent_acb,
426 BlockDriverState * bs, int64_t sector_num, QEMUIOVector * orig_qiov,
427 int nb_sectors, BlockDriverCompletionFunc * cb, void *opaque);
428 static inline BlockDriverAIOCB *store_data(int soft_write,
429 FvdAIOCB * parent_acb, BlockDriverState * bs, int64_t sector_num,
430 QEMUIOVector * orig_qiov, int nb_sectors,
431 BlockDriverCompletionFunc * cb, void *opaque);
433 /* Default configurations. */
434 #define DEF_PAGE_SIZE 4096 /* bytes */
435 #define BYTES_PER_PREFETCH 1048576 /* bytes */
436 #define PREFETCH_THROTTLING_TIME 30000 /* milliseconds */
437 #define NUM_PREFETCH_SLOTS 2
438 #define PREFETCH_MIN_MEASURE_READ_TIME 100 /* milliseconds */
439 #define PREFETCH_MIN_MEASURE_WRITE_TIME 100 /* milliseconds */
440 #define PREFETCH_MIN_READ_THROUGHPUT 5120 /* KB/s */
441 #define PREFETCH_MIN_WRITE_THROUGHPUT 5120 /* KB/s */
442 #define PREFETCH_MAX_READ_THROUGHPUT 1000000000L /* KB/s */
443 #define PREFETCH_MAX_WRITE_THROUGHPUT 1000000000L /* KB/s */
444 #define PREFETCH_PERF_CALC_ALPHA 80 /* in [0,100]. */
445 #define MAX_OUTSTANDING_COPY_ON_READ_DATA 2000000 /* bytes */
446 #define MODERATE_BITMAP_SIZE 4194304L /* bytes */
447 #define CHUNK_SIZE 1048576LL /* bytes */
448 #define JOURNAL_SIZE 16777216LL /* bytes */
449 #define STORAGE_GROW_UNIT 104857600LL /* bytes */
451 /* State of BDRVFvdState.prefetch_state. */
452 #define PREFETCH_STATE_RUNNING 1
453 #define PREFETCH_STATE_FINISHED 2
454 #define PREFETCH_STATE_DISABLED 3
456 /* For convience. */
457 #undef ROUND_UP /* override definition from osdep.h */
458 #define ROUND_UP(x, base) ((((x)+(base)-1) / (base)) * (base))
459 #define ROUND_DOWN(x, base) ((((x) / (base)) * (base)))
460 #define BOOL(x) ((x) ? "true" : "false")
461 #define EMPTY_TABLE ((uint32_t)0xFFFFFFFF)
462 #define DIRTY_TABLE ((uint32_t)0x80000000)
463 #define READ_TABLE(entry) (le32_to_cpu(entry) & ~DIRTY_TABLE)
464 # define FVDAIOCB_MAGIC ((uint64_t)0x3A8FCE89325B976DULL)
465 # define FVD_ALLOC_MAGIC ((uint64_t)0x4A7dCEF9925B976DULL)
466 #define IS_EMPTY(entry) ((entry) == EMPTY_TABLE)
467 #define IS_DIRTY(entry) (le32_to_cpu(entry) & DIRTY_TABLE)
468 #define WRITE_TABLE(entry,id) ((entry) = cpu_to_le32(id))
469 #define READ_TABLE2(entry) \
470 ((entry)==EMPTY_TABLE ? EMPTY_TABLE : (le32_to_cpu(entry) & ~DIRTY_TABLE))
472 #define CLEAN_DIRTY(entry) \
473 do { \
474 if (!IS_EMPTY(entry)) \
475 entry = cpu_to_le32(le32_to_cpu(entry) & ~DIRTY_TABLE); \
476 } while (0)
478 #define CLEAN_DIRTY2(entry) \
479 do { \
480 ASSERT(!IS_EMPTY(entry)); \
481 entry = cpu_to_le32(le32_to_cpu(entry) & ~DIRTY_TABLE); \
482 } while (0)