2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "block/block.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
21 #include "qemu/queue.h"
22 #include "qemu/timer.h"
23 #include "migration/block.h"
24 #include "migration/migration.h"
25 #include "sysemu/blockdev.h"
28 #define BLOCK_SIZE (1 << 20)
29 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
31 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
32 #define BLK_MIG_FLAG_EOS 0x02
33 #define BLK_MIG_FLAG_PROGRESS 0x04
34 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08
36 #define MAX_IS_ALLOCATED_SEARCH 65536
38 //#define DEBUG_BLK_MIGRATION
40 #ifdef DEBUG_BLK_MIGRATION
41 #define DPRINTF(fmt, ...) \
42 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
44 #define DPRINTF(fmt, ...) \
48 typedef struct BlkMigDevState
{
49 /* Written during setup phase. Can be read without a lock. */
52 int64_t total_sectors
;
53 QSIMPLEQ_ENTRY(BlkMigDevState
) entry
;
55 /* Only used by migration thread. Does not need a lock. */
60 /* Protected by block migration lock. */
61 unsigned long *aio_bitmap
;
62 int64_t completed_sectors
;
63 BdrvDirtyBitmap
*dirty_bitmap
;
67 typedef struct BlkMigBlock
{
68 /* Only used by migration thread. */
77 /* Protected by block migration lock. */
79 QSIMPLEQ_ENTRY(BlkMigBlock
) entry
;
82 typedef struct BlkMigState
{
83 /* Written during setup phase. Can be read without a lock. */
86 QSIMPLEQ_HEAD(bmds_list
, BlkMigDevState
) bmds_list
;
87 int64_t total_sector_sum
;
90 /* Protected by lock. */
91 QSIMPLEQ_HEAD(blk_list
, BlkMigBlock
) blk_list
;
95 /* Only used by migration thread. Does not need a lock. */
100 /* Lock must be taken _inside_ the iothread lock. */
104 static BlkMigState block_mig_state
;
106 static void blk_mig_lock(void)
108 qemu_mutex_lock(&block_mig_state
.lock
);
111 static void blk_mig_unlock(void)
113 qemu_mutex_unlock(&block_mig_state
.lock
);
116 /* Must run outside of the iothread lock during the bulk phase,
117 * or the VM will stall.
120 static void blk_send(QEMUFile
*f
, BlkMigBlock
* blk
)
123 uint64_t flags
= BLK_MIG_FLAG_DEVICE_BLOCK
;
125 if (block_mig_state
.zero_blocks
&&
126 buffer_is_zero(blk
->buf
, BLOCK_SIZE
)) {
127 flags
|= BLK_MIG_FLAG_ZERO_BLOCK
;
130 /* sector number and flags */
131 qemu_put_be64(f
, (blk
->sector
<< BDRV_SECTOR_BITS
)
135 len
= strlen(bdrv_get_device_name(blk
->bmds
->bs
));
136 qemu_put_byte(f
, len
);
137 qemu_put_buffer(f
, (uint8_t *)bdrv_get_device_name(blk
->bmds
->bs
), len
);
139 /* if a block is zero we need to flush here since the network
140 * bandwidth is now a lot higher than the storage device bandwidth.
141 * thus if we queue zero blocks we slow down the migration */
142 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
147 qemu_put_buffer(f
, blk
->buf
, BLOCK_SIZE
);
150 int blk_mig_active(void)
152 return !QSIMPLEQ_EMPTY(&block_mig_state
.bmds_list
);
155 uint64_t blk_mig_bytes_transferred(void)
157 BlkMigDevState
*bmds
;
161 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
162 sum
+= bmds
->completed_sectors
;
165 return sum
<< BDRV_SECTOR_BITS
;
168 uint64_t blk_mig_bytes_remaining(void)
170 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
173 uint64_t blk_mig_bytes_total(void)
175 BlkMigDevState
*bmds
;
178 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
179 sum
+= bmds
->total_sectors
;
181 return sum
<< BDRV_SECTOR_BITS
;
185 /* Called with migration lock held. */
187 static int bmds_aio_inflight(BlkMigDevState
*bmds
, int64_t sector
)
189 int64_t chunk
= sector
/ (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
;
191 if (sector
< bdrv_nb_sectors(bmds
->bs
)) {
192 return !!(bmds
->aio_bitmap
[chunk
/ (sizeof(unsigned long) * 8)] &
193 (1UL << (chunk
% (sizeof(unsigned long) * 8))));
199 /* Called with migration lock held. */
201 static void bmds_set_aio_inflight(BlkMigDevState
*bmds
, int64_t sector_num
,
202 int nb_sectors
, int set
)
205 unsigned long val
, idx
, bit
;
207 start
= sector_num
/ BDRV_SECTORS_PER_DIRTY_CHUNK
;
208 end
= (sector_num
+ nb_sectors
- 1) / BDRV_SECTORS_PER_DIRTY_CHUNK
;
210 for (; start
<= end
; start
++) {
211 idx
= start
/ (sizeof(unsigned long) * 8);
212 bit
= start
% (sizeof(unsigned long) * 8);
213 val
= bmds
->aio_bitmap
[idx
];
217 val
&= ~(1UL << bit
);
219 bmds
->aio_bitmap
[idx
] = val
;
223 static void alloc_aio_bitmap(BlkMigDevState
*bmds
)
225 BlockDriverState
*bs
= bmds
->bs
;
228 bitmap_size
= bdrv_nb_sectors(bs
) + BDRV_SECTORS_PER_DIRTY_CHUNK
* 8 - 1;
229 bitmap_size
/= BDRV_SECTORS_PER_DIRTY_CHUNK
* 8;
231 bmds
->aio_bitmap
= g_malloc0(bitmap_size
);
234 /* Never hold migration lock when yielding to the main loop! */
236 static void blk_mig_read_cb(void *opaque
, int ret
)
238 BlkMigBlock
*blk
= opaque
;
243 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.blk_list
, blk
, entry
);
244 bmds_set_aio_inflight(blk
->bmds
, blk
->sector
, blk
->nr_sectors
, 0);
246 block_mig_state
.submitted
--;
247 block_mig_state
.read_done
++;
248 assert(block_mig_state
.submitted
>= 0);
252 /* Called with no lock taken. */
254 static int mig_save_device_bulk(QEMUFile
*f
, BlkMigDevState
*bmds
)
256 int64_t total_sectors
= bmds
->total_sectors
;
257 int64_t cur_sector
= bmds
->cur_sector
;
258 BlockDriverState
*bs
= bmds
->bs
;
262 if (bmds
->shared_base
) {
263 qemu_mutex_lock_iothread();
264 while (cur_sector
< total_sectors
&&
265 !bdrv_is_allocated(bs
, cur_sector
, MAX_IS_ALLOCATED_SEARCH
,
267 cur_sector
+= nr_sectors
;
269 qemu_mutex_unlock_iothread();
272 if (cur_sector
>= total_sectors
) {
273 bmds
->cur_sector
= bmds
->completed_sectors
= total_sectors
;
277 bmds
->completed_sectors
= cur_sector
;
279 cur_sector
&= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK
- 1);
281 /* we are going to transfer a full block even if it is not allocated */
282 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
284 if (total_sectors
- cur_sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
285 nr_sectors
= total_sectors
- cur_sector
;
288 blk
= g_new(BlkMigBlock
, 1);
289 blk
->buf
= g_malloc(BLOCK_SIZE
);
291 blk
->sector
= cur_sector
;
292 blk
->nr_sectors
= nr_sectors
;
294 blk
->iov
.iov_base
= blk
->buf
;
295 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
296 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
299 block_mig_state
.submitted
++;
302 qemu_mutex_lock_iothread();
303 blk
->aiocb
= bdrv_aio_readv(bs
, cur_sector
, &blk
->qiov
,
304 nr_sectors
, blk_mig_read_cb
, blk
);
306 bdrv_reset_dirty(bs
, cur_sector
, nr_sectors
);
307 qemu_mutex_unlock_iothread();
309 bmds
->cur_sector
= cur_sector
+ nr_sectors
;
310 return (bmds
->cur_sector
>= total_sectors
);
313 /* Called with iothread lock taken. */
315 static int set_dirty_tracking(void)
317 BlkMigDevState
*bmds
;
320 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
321 bmds
->dirty_bitmap
= bdrv_create_dirty_bitmap(bmds
->bs
, BLOCK_SIZE
,
323 if (!bmds
->dirty_bitmap
) {
331 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
332 if (bmds
->dirty_bitmap
) {
333 bdrv_release_dirty_bitmap(bmds
->bs
, bmds
->dirty_bitmap
);
339 static void unset_dirty_tracking(void)
341 BlkMigDevState
*bmds
;
343 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
344 bdrv_release_dirty_bitmap(bmds
->bs
, bmds
->dirty_bitmap
);
348 static void init_blk_migration(QEMUFile
*f
)
350 BlockDriverState
*bs
;
351 BlkMigDevState
*bmds
;
354 block_mig_state
.submitted
= 0;
355 block_mig_state
.read_done
= 0;
356 block_mig_state
.transferred
= 0;
357 block_mig_state
.total_sector_sum
= 0;
358 block_mig_state
.prev_progress
= -1;
359 block_mig_state
.bulk_completed
= 0;
360 block_mig_state
.zero_blocks
= migrate_zero_blocks();
362 for (bs
= bdrv_next(NULL
); bs
; bs
= bdrv_next(bs
)) {
363 if (bdrv_is_read_only(bs
)) {
367 sectors
= bdrv_nb_sectors(bs
);
372 bmds
= g_new0(BlkMigDevState
, 1);
374 bmds
->bulk_completed
= 0;
375 bmds
->total_sectors
= sectors
;
376 bmds
->completed_sectors
= 0;
377 bmds
->shared_base
= block_mig_state
.shared_base
;
378 alloc_aio_bitmap(bmds
);
379 error_setg(&bmds
->blocker
, "block device is in use by migration");
380 bdrv_op_block_all(bs
, bmds
->blocker
);
383 block_mig_state
.total_sector_sum
+= sectors
;
385 if (bmds
->shared_base
) {
386 DPRINTF("Start migration for %s with shared base image\n",
387 bdrv_get_device_name(bs
));
389 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs
));
392 QSIMPLEQ_INSERT_TAIL(&block_mig_state
.bmds_list
, bmds
, entry
);
396 /* Called with no lock taken. */
398 static int blk_mig_save_bulked_block(QEMUFile
*f
)
400 int64_t completed_sector_sum
= 0;
401 BlkMigDevState
*bmds
;
405 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
406 if (bmds
->bulk_completed
== 0) {
407 if (mig_save_device_bulk(f
, bmds
) == 1) {
408 /* completed bulk section for this device */
409 bmds
->bulk_completed
= 1;
411 completed_sector_sum
+= bmds
->completed_sectors
;
415 completed_sector_sum
+= bmds
->completed_sectors
;
419 if (block_mig_state
.total_sector_sum
!= 0) {
420 progress
= completed_sector_sum
* 100 /
421 block_mig_state
.total_sector_sum
;
425 if (progress
!= block_mig_state
.prev_progress
) {
426 block_mig_state
.prev_progress
= progress
;
427 qemu_put_be64(f
, (progress
<< BDRV_SECTOR_BITS
)
428 | BLK_MIG_FLAG_PROGRESS
);
429 DPRINTF("Completed %d %%\r", progress
);
435 static void blk_mig_reset_dirty_cursor(void)
437 BlkMigDevState
*bmds
;
439 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
444 /* Called with iothread lock taken. */
446 static int mig_save_device_dirty(QEMUFile
*f
, BlkMigDevState
*bmds
,
450 int64_t total_sectors
= bmds
->total_sectors
;
455 for (sector
= bmds
->cur_dirty
; sector
< bmds
->total_sectors
;) {
457 if (bmds_aio_inflight(bmds
, sector
)) {
463 if (bdrv_get_dirty(bmds
->bs
, bmds
->dirty_bitmap
, sector
)) {
465 if (total_sectors
- sector
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
466 nr_sectors
= total_sectors
- sector
;
468 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
470 blk
= g_new(BlkMigBlock
, 1);
471 blk
->buf
= g_malloc(BLOCK_SIZE
);
473 blk
->sector
= sector
;
474 blk
->nr_sectors
= nr_sectors
;
477 blk
->iov
.iov_base
= blk
->buf
;
478 blk
->iov
.iov_len
= nr_sectors
* BDRV_SECTOR_SIZE
;
479 qemu_iovec_init_external(&blk
->qiov
, &blk
->iov
, 1);
481 blk
->aiocb
= bdrv_aio_readv(bmds
->bs
, sector
, &blk
->qiov
,
482 nr_sectors
, blk_mig_read_cb
, blk
);
485 block_mig_state
.submitted
++;
486 bmds_set_aio_inflight(bmds
, sector
, nr_sectors
, 1);
489 ret
= bdrv_read(bmds
->bs
, sector
, blk
->buf
, nr_sectors
);
499 bdrv_reset_dirty(bmds
->bs
, sector
, nr_sectors
);
502 sector
+= BDRV_SECTORS_PER_DIRTY_CHUNK
;
503 bmds
->cur_dirty
= sector
;
506 return (bmds
->cur_dirty
>= bmds
->total_sectors
);
509 DPRINTF("Error reading sector %" PRId64
"\n", sector
);
515 /* Called with iothread lock taken.
518 * 0: too much data for max_downtime
519 * 1: few enough data for max_downtime
521 static int blk_mig_save_dirty_block(QEMUFile
*f
, int is_async
)
523 BlkMigDevState
*bmds
;
526 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
527 ret
= mig_save_device_dirty(f
, bmds
, is_async
);
536 /* Called with no locks taken. */
538 static int flush_blks(QEMUFile
*f
)
543 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
544 __FUNCTION__
, block_mig_state
.submitted
, block_mig_state
.read_done
,
545 block_mig_state
.transferred
);
548 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
549 if (qemu_file_rate_limit(f
)) {
557 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
565 block_mig_state
.read_done
--;
566 block_mig_state
.transferred
++;
567 assert(block_mig_state
.read_done
>= 0);
571 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__
,
572 block_mig_state
.submitted
, block_mig_state
.read_done
,
573 block_mig_state
.transferred
);
577 /* Called with iothread lock taken. */
579 static int64_t get_remaining_dirty(void)
581 BlkMigDevState
*bmds
;
584 QSIMPLEQ_FOREACH(bmds
, &block_mig_state
.bmds_list
, entry
) {
585 dirty
+= bdrv_get_dirty_count(bmds
->bs
, bmds
->dirty_bitmap
);
588 return dirty
<< BDRV_SECTOR_BITS
;
591 /* Called with iothread lock taken. */
593 static void blk_mig_cleanup(void)
595 BlkMigDevState
*bmds
;
600 unset_dirty_tracking();
603 while ((bmds
= QSIMPLEQ_FIRST(&block_mig_state
.bmds_list
)) != NULL
) {
604 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.bmds_list
, entry
);
605 bdrv_op_unblock_all(bmds
->bs
, bmds
->blocker
);
606 error_free(bmds
->blocker
);
607 bdrv_unref(bmds
->bs
);
608 g_free(bmds
->aio_bitmap
);
612 while ((blk
= QSIMPLEQ_FIRST(&block_mig_state
.blk_list
)) != NULL
) {
613 QSIMPLEQ_REMOVE_HEAD(&block_mig_state
.blk_list
, entry
);
620 static void block_migration_cancel(void *opaque
)
625 static int block_save_setup(QEMUFile
*f
, void *opaque
)
629 DPRINTF("Enter save live setup submitted %d transferred %d\n",
630 block_mig_state
.submitted
, block_mig_state
.transferred
);
632 qemu_mutex_lock_iothread();
633 init_blk_migration(f
);
635 /* start track dirty blocks */
636 ret
= set_dirty_tracking();
639 qemu_mutex_unlock_iothread();
643 qemu_mutex_unlock_iothread();
646 blk_mig_reset_dirty_cursor();
647 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
652 static int block_save_iterate(QEMUFile
*f
, void *opaque
)
655 int64_t last_ftell
= qemu_ftell(f
);
658 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
659 block_mig_state
.submitted
, block_mig_state
.transferred
);
666 blk_mig_reset_dirty_cursor();
668 /* control the rate of transfer */
670 while ((block_mig_state
.submitted
+
671 block_mig_state
.read_done
) * BLOCK_SIZE
<
672 qemu_file_get_rate_limit(f
)) {
674 if (block_mig_state
.bulk_completed
== 0) {
675 /* first finish the bulk phase */
676 if (blk_mig_save_bulked_block(f
) == 0) {
677 /* finished saving bulk on all devices */
678 block_mig_state
.bulk_completed
= 1;
682 /* Always called with iothread lock taken for
683 * simplicity, block_save_complete also calls it.
685 qemu_mutex_lock_iothread();
686 ret
= blk_mig_save_dirty_block(f
, 1);
687 qemu_mutex_unlock_iothread();
694 /* no more dirty blocks */
705 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
706 delta_ftell
= qemu_ftell(f
) - last_ftell
;
707 if (delta_ftell
> 0) {
709 } else if (delta_ftell
< 0) {
716 /* Called with iothread lock taken. */
718 static int block_save_complete(QEMUFile
*f
, void *opaque
)
722 DPRINTF("Enter save live complete submitted %d transferred %d\n",
723 block_mig_state
.submitted
, block_mig_state
.transferred
);
730 blk_mig_reset_dirty_cursor();
732 /* we know for sure that save bulk is completed and
733 all async read completed */
735 assert(block_mig_state
.submitted
== 0);
739 ret
= blk_mig_save_dirty_block(f
, 0);
745 /* report completion */
746 qemu_put_be64(f
, (100 << BDRV_SECTOR_BITS
) | BLK_MIG_FLAG_PROGRESS
);
748 DPRINTF("Block migration completed\n");
750 qemu_put_be64(f
, BLK_MIG_FLAG_EOS
);
756 static uint64_t block_save_pending(QEMUFile
*f
, void *opaque
, uint64_t max_size
)
758 /* Estimate pending number of bytes to send */
761 qemu_mutex_lock_iothread();
763 pending
= get_remaining_dirty() +
764 block_mig_state
.submitted
* BLOCK_SIZE
+
765 block_mig_state
.read_done
* BLOCK_SIZE
;
767 /* Report at least one block pending during bulk phase */
768 if (pending
== 0 && !block_mig_state
.bulk_completed
) {
769 pending
= BLOCK_SIZE
;
772 qemu_mutex_unlock_iothread();
774 DPRINTF("Enter save live pending %" PRIu64
"\n", pending
);
778 static int block_load(QEMUFile
*f
, void *opaque
, int version_id
)
780 static int banner_printed
;
782 char device_name
[256];
784 BlockDriverState
*bs
, *bs_prev
= NULL
;
786 int64_t total_sectors
= 0;
791 addr
= qemu_get_be64(f
);
793 flags
= addr
& ~BDRV_SECTOR_MASK
;
794 addr
>>= BDRV_SECTOR_BITS
;
796 if (flags
& BLK_MIG_FLAG_DEVICE_BLOCK
) {
797 /* get device name */
798 len
= qemu_get_byte(f
);
799 qemu_get_buffer(f
, (uint8_t *)device_name
, len
);
800 device_name
[len
] = '\0';
802 bs
= bdrv_find(device_name
);
804 fprintf(stderr
, "Error unknown block device %s\n",
811 total_sectors
= bdrv_nb_sectors(bs
);
812 if (total_sectors
<= 0) {
813 error_report("Error getting length of block device %s",
819 if (total_sectors
- addr
< BDRV_SECTORS_PER_DIRTY_CHUNK
) {
820 nr_sectors
= total_sectors
- addr
;
822 nr_sectors
= BDRV_SECTORS_PER_DIRTY_CHUNK
;
825 if (flags
& BLK_MIG_FLAG_ZERO_BLOCK
) {
826 ret
= bdrv_write_zeroes(bs
, addr
, nr_sectors
,
829 buf
= g_malloc(BLOCK_SIZE
);
830 qemu_get_buffer(f
, buf
, BLOCK_SIZE
);
831 ret
= bdrv_write(bs
, addr
, buf
, nr_sectors
);
838 } else if (flags
& BLK_MIG_FLAG_PROGRESS
) {
839 if (!banner_printed
) {
840 printf("Receiving block device images\n");
843 printf("Completed %d %%%c", (int)addr
,
844 (addr
== 100) ? '\n' : '\r');
846 } else if (!(flags
& BLK_MIG_FLAG_EOS
)) {
847 fprintf(stderr
, "Unknown block migration flags: %#x\n", flags
);
850 ret
= qemu_file_get_error(f
);
854 } while (!(flags
& BLK_MIG_FLAG_EOS
));
859 static void block_set_params(const MigrationParams
*params
, void *opaque
)
861 block_mig_state
.blk_enable
= params
->blk
;
862 block_mig_state
.shared_base
= params
->shared
;
864 /* shared base means that blk_enable = 1 */
865 block_mig_state
.blk_enable
|= params
->shared
;
868 static bool block_is_active(void *opaque
)
870 return block_mig_state
.blk_enable
== 1;
873 static SaveVMHandlers savevm_block_handlers
= {
874 .set_params
= block_set_params
,
875 .save_live_setup
= block_save_setup
,
876 .save_live_iterate
= block_save_iterate
,
877 .save_live_complete
= block_save_complete
,
878 .save_live_pending
= block_save_pending
,
879 .load_state
= block_load
,
880 .cancel
= block_migration_cancel
,
881 .is_active
= block_is_active
,
884 void blk_mig_init(void)
886 QSIMPLEQ_INIT(&block_mig_state
.bmds_list
);
887 QSIMPLEQ_INIT(&block_mig_state
.blk_list
);
888 qemu_mutex_init(&block_mig_state
.lock
);
890 register_savevm_live(NULL
, "block", 0, 1, &savevm_block_handlers
,