4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2010 Robert Milkowski
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 * ZFS volume emulation driver.
32 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
33 * Volumes are accessed through the symbolic links named:
35 * /dev/zvol/dsk/<pool_name>/<dataset_name>
36 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
38 * These links are created by the /dev filesystem (sdev_zvolops.c).
39 * Volumes are persistent through reboot. No user command needs to be
40 * run before opening and using a device.
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/errno.h>
48 #include <sys/modctl.h>
52 #include <sys/cmn_err.h>
57 #include <sys/dmu_traverse.h>
58 #include <sys/dnode.h>
59 #include <sys/dsl_dataset.h>
60 #include <sys/dsl_prop.h>
62 #include <sys/efi_partition.h>
63 #include <sys/byteorder.h>
64 #include <sys/pathname.h>
66 #include <sys/sunddi.h>
67 #include <sys/crc32.h>
68 #include <sys/dirent.h>
69 #include <sys/policy.h>
70 #include <sys/fs/zfs.h>
71 #include <sys/zfs_ioctl.h>
72 #include <sys/mkdev.h>
74 #include <sys/refcount.h>
75 #include <sys/zfs_znode.h>
76 #include <sys/zfs_rlock.h>
77 #include <sys/vdev_disk.h>
78 #include <sys/vdev_impl.h>
80 #include <sys/dumphdr.h>
81 #include <sys/zil_impl.h>
83 #include "zfs_namecheck.h"
86 static char *zvol_tag
= "zvol_tag";
88 #define ZVOL_DUMPSIZE "dumpsize"
91 * This lock protects the zfsdev_state structure from being modified
92 * while it's being used, e.g. an open that comes in before a create
93 * finishes. It also protects temporary opens of the dataset so that,
94 * e.g., an open doesn't get a spurious EBUSY.
96 kmutex_t zfsdev_state_lock
;
97 static uint32_t zvol_minors
;
99 typedef struct zvol_extent
{
101 dva_t ze_dva
; /* dva associated with this extent */
102 uint64_t ze_nblks
; /* number of blocks in extent */
106 * The in-core state of each volume.
108 typedef struct zvol_state
{
109 char zv_name
[MAXPATHLEN
]; /* pool/dd name */
110 uint64_t zv_volsize
; /* amount of space we advertise */
111 uint64_t zv_volblocksize
; /* volume block size */
112 minor_t zv_minor
; /* minor number */
113 uint8_t zv_min_bs
; /* minimum addressable block shift */
114 uint8_t zv_flags
; /* readonly, dumpified, etc. */
115 objset_t
*zv_objset
; /* objset handle */
116 uint32_t zv_open_count
[OTYPCNT
]; /* open counts */
117 uint32_t zv_total_opens
; /* total open count */
118 zilog_t
*zv_zilog
; /* ZIL handle */
119 list_t zv_extents
; /* List of extents for dump */
120 znode_t zv_znode
; /* for range locking */
121 dmu_buf_t
*zv_dbuf
; /* bonus handle */
125 * zvol specific flags
127 #define ZVOL_RDONLY 0x1
128 #define ZVOL_DUMPIFIED 0x2
129 #define ZVOL_EXCL 0x4
133 * zvol maximum transfer in one DMU tx.
135 int zvol_maxphys
= DMU_MAX_ACCESS
/2;
137 extern int zfs_set_prop_nvlist(const char *, zprop_source_t
,
138 nvlist_t
*, nvlist_t
**);
139 static int zvol_remove_zv(zvol_state_t
*);
140 static int zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
);
141 static int zvol_dumpify(zvol_state_t
*zv
);
142 static int zvol_dump_fini(zvol_state_t
*zv
);
143 static int zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
);
146 zvol_size_changed(uint64_t volsize
, major_t maj
, minor_t min
)
148 dev_t dev
= makedevice(maj
, min
);
150 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
151 "Size", volsize
) == DDI_SUCCESS
);
152 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
153 "Nblocks", lbtodb(volsize
)) == DDI_SUCCESS
);
155 /* Notify specfs to invalidate the cached size */
156 spec_size_invalidate(dev
, VBLK
);
157 spec_size_invalidate(dev
, VCHR
);
161 zvol_check_volsize(uint64_t volsize
, uint64_t blocksize
)
166 if (volsize
% blocksize
!= 0)
170 if (volsize
- 1 > SPEC_MAXOFFSET_T
)
177 zvol_check_volblocksize(uint64_t volblocksize
)
179 if (volblocksize
< SPA_MINBLOCKSIZE
||
180 volblocksize
> SPA_MAXBLOCKSIZE
||
188 zvol_get_stats(objset_t
*os
, nvlist_t
*nv
)
191 dmu_object_info_t doi
;
194 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &val
);
198 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLSIZE
, val
);
200 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
203 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLBLOCKSIZE
,
204 doi
.doi_data_block_size
);
210 static zvol_state_t
*
211 zvol_minor_lookup(const char *name
)
216 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
218 for (minor
= 1; minor
<= ZFSDEV_MAX_MINOR
; minor
++) {
219 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
222 if (strcmp(zv
->zv_name
, name
) == 0)
229 /* extent mapping arg */
237 zvol_map_block(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
238 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
240 struct maparg
*ma
= arg
;
242 int bs
= ma
->ma_zv
->zv_volblocksize
;
244 if (bp
== NULL
|| zb
->zb_object
!= ZVOL_OBJ
|| zb
->zb_level
!= 0)
247 VERIFY3U(ma
->ma_blks
, ==, zb
->zb_blkid
);
250 /* Abort immediately if we have encountered gang blocks */
255 * See if the block is at the end of the previous extent.
257 ze
= list_tail(&ma
->ma_zv
->zv_extents
);
259 DVA_GET_VDEV(BP_IDENTITY(bp
)) == DVA_GET_VDEV(&ze
->ze_dva
) &&
260 DVA_GET_OFFSET(BP_IDENTITY(bp
)) ==
261 DVA_GET_OFFSET(&ze
->ze_dva
) + ze
->ze_nblks
* bs
) {
266 dprintf_bp(bp
, "%s", "next blkptr:");
268 /* start a new extent */
269 ze
= kmem_zalloc(sizeof (zvol_extent_t
), KM_SLEEP
);
270 ze
->ze_dva
= bp
->blk_dva
[0]; /* structure assignment */
272 list_insert_tail(&ma
->ma_zv
->zv_extents
, ze
);
277 zvol_free_extents(zvol_state_t
*zv
)
281 while (ze
= list_head(&zv
->zv_extents
)) {
282 list_remove(&zv
->zv_extents
, ze
);
283 kmem_free(ze
, sizeof (zvol_extent_t
));
288 zvol_get_lbas(zvol_state_t
*zv
)
290 objset_t
*os
= zv
->zv_objset
;
296 zvol_free_extents(zv
);
298 /* commit any in-flight changes before traversing the dataset */
299 txg_wait_synced(dmu_objset_pool(os
), 0);
300 err
= traverse_dataset(dmu_objset_ds(os
), 0,
301 TRAVERSE_PRE
| TRAVERSE_PREFETCH_METADATA
, zvol_map_block
, &ma
);
302 if (err
|| ma
.ma_blks
!= (zv
->zv_volsize
/ zv
->zv_volblocksize
)) {
303 zvol_free_extents(zv
);
304 return (err
? err
: EIO
);
312 zvol_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
314 zfs_creat_t
*zct
= arg
;
315 nvlist_t
*nvprops
= zct
->zct_props
;
317 uint64_t volblocksize
, volsize
;
319 VERIFY(nvlist_lookup_uint64(nvprops
,
320 zfs_prop_to_name(ZFS_PROP_VOLSIZE
), &volsize
) == 0);
321 if (nvlist_lookup_uint64(nvprops
,
322 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &volblocksize
) != 0)
323 volblocksize
= zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE
);
326 * These properties must be removed from the list so the generic
327 * property setting step won't apply to them.
329 VERIFY(nvlist_remove_all(nvprops
,
330 zfs_prop_to_name(ZFS_PROP_VOLSIZE
)) == 0);
331 (void) nvlist_remove_all(nvprops
,
332 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
));
334 error
= dmu_object_claim(os
, ZVOL_OBJ
, DMU_OT_ZVOL
, volblocksize
,
338 error
= zap_create_claim(os
, ZVOL_ZAP_OBJ
, DMU_OT_ZVOL_PROP
,
342 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
, tx
);
347 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
348 * implement DKIOCFREE/free-long-range.
351 zvol_replay_truncate(zvol_state_t
*zv
, lr_truncate_t
*lr
, boolean_t byteswap
)
353 uint64_t offset
, length
;
356 byteswap_uint64_array(lr
, sizeof (*lr
));
358 offset
= lr
->lr_offset
;
359 length
= lr
->lr_length
;
361 return (dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, offset
, length
));
365 * Replay a TX_WRITE ZIL transaction that didn't get committed
366 * after a system failure
369 zvol_replay_write(zvol_state_t
*zv
, lr_write_t
*lr
, boolean_t byteswap
)
371 objset_t
*os
= zv
->zv_objset
;
372 char *data
= (char *)(lr
+ 1); /* data follows lr_write_t */
373 uint64_t offset
, length
;
378 byteswap_uint64_array(lr
, sizeof (*lr
));
380 offset
= lr
->lr_offset
;
381 length
= lr
->lr_length
;
383 /* If it's a dmu_sync() block, write the whole block */
384 if (lr
->lr_common
.lrc_reclen
== sizeof (lr_write_t
)) {
385 uint64_t blocksize
= BP_GET_LSIZE(&lr
->lr_blkptr
);
386 if (length
< blocksize
) {
387 offset
-= offset
% blocksize
;
392 tx
= dmu_tx_create(os
);
393 dmu_tx_hold_write(tx
, ZVOL_OBJ
, offset
, length
);
394 error
= dmu_tx_assign(tx
, TXG_WAIT
);
398 dmu_write(os
, ZVOL_OBJ
, offset
, length
, data
, tx
);
407 zvol_replay_err(zvol_state_t
*zv
, lr_t
*lr
, boolean_t byteswap
)
413 * Callback vectors for replaying records.
414 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
416 zil_replay_func_t
*zvol_replay_vector
[TX_MAX_TYPE
] = {
417 zvol_replay_err
, /* 0 no such transaction type */
418 zvol_replay_err
, /* TX_CREATE */
419 zvol_replay_err
, /* TX_MKDIR */
420 zvol_replay_err
, /* TX_MKXATTR */
421 zvol_replay_err
, /* TX_SYMLINK */
422 zvol_replay_err
, /* TX_REMOVE */
423 zvol_replay_err
, /* TX_RMDIR */
424 zvol_replay_err
, /* TX_LINK */
425 zvol_replay_err
, /* TX_RENAME */
426 zvol_replay_write
, /* TX_WRITE */
427 zvol_replay_truncate
, /* TX_TRUNCATE */
428 zvol_replay_err
, /* TX_SETATTR */
429 zvol_replay_err
, /* TX_ACL */
430 zvol_replay_err
, /* TX_CREATE_ACL */
431 zvol_replay_err
, /* TX_CREATE_ATTR */
432 zvol_replay_err
, /* TX_CREATE_ACL_ATTR */
433 zvol_replay_err
, /* TX_MKDIR_ACL */
434 zvol_replay_err
, /* TX_MKDIR_ATTR */
435 zvol_replay_err
, /* TX_MKDIR_ACL_ATTR */
436 zvol_replay_err
, /* TX_WRITE2 */
440 zvol_name2minor(const char *name
, minor_t
*minor
)
444 mutex_enter(&zfsdev_state_lock
);
445 zv
= zvol_minor_lookup(name
);
447 *minor
= zv
->zv_minor
;
448 mutex_exit(&zfsdev_state_lock
);
449 return (zv
? 0 : -1);
453 * Create a minor node (plus a whole lot more) for the specified volume.
456 zvol_create_minor(const char *name
)
458 zfs_soft_state_t
*zs
;
461 dmu_object_info_t doi
;
463 char chrbuf
[30], blkbuf
[30];
466 mutex_enter(&zfsdev_state_lock
);
468 if (zvol_minor_lookup(name
) != NULL
) {
469 mutex_exit(&zfsdev_state_lock
);
473 /* lie and say we're read-only */
474 error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_TRUE
, FTAG
, &os
);
477 mutex_exit(&zfsdev_state_lock
);
481 if ((minor
= zfsdev_minor_alloc()) == 0) {
482 dmu_objset_disown(os
, FTAG
);
483 mutex_exit(&zfsdev_state_lock
);
487 if (ddi_soft_state_zalloc(zfsdev_state
, minor
) != DDI_SUCCESS
) {
488 dmu_objset_disown(os
, FTAG
);
489 mutex_exit(&zfsdev_state_lock
);
492 (void) ddi_prop_update_string(minor
, zfs_dip
, ZVOL_PROP_NAME
,
495 (void) snprintf(chrbuf
, sizeof (chrbuf
), "%u,raw", minor
);
497 if (ddi_create_minor_node(zfs_dip
, chrbuf
, S_IFCHR
,
498 minor
, DDI_PSEUDO
, 0) == DDI_FAILURE
) {
499 ddi_soft_state_free(zfsdev_state
, minor
);
500 dmu_objset_disown(os
, FTAG
);
501 mutex_exit(&zfsdev_state_lock
);
505 (void) snprintf(blkbuf
, sizeof (blkbuf
), "%u", minor
);
507 if (ddi_create_minor_node(zfs_dip
, blkbuf
, S_IFBLK
,
508 minor
, DDI_PSEUDO
, 0) == DDI_FAILURE
) {
509 ddi_remove_minor_node(zfs_dip
, chrbuf
);
510 ddi_soft_state_free(zfsdev_state
, minor
);
511 dmu_objset_disown(os
, FTAG
);
512 mutex_exit(&zfsdev_state_lock
);
516 zs
= ddi_get_soft_state(zfsdev_state
, minor
);
517 zs
->zss_type
= ZSST_ZVOL
;
518 zv
= zs
->zss_data
= kmem_zalloc(sizeof (zvol_state_t
), KM_SLEEP
);
519 (void) strlcpy(zv
->zv_name
, name
, MAXPATHLEN
);
520 zv
->zv_min_bs
= DEV_BSHIFT
;
521 zv
->zv_minor
= minor
;
523 if (dmu_objset_is_snapshot(os
) || !spa_writeable(dmu_objset_spa(os
)))
524 zv
->zv_flags
|= ZVOL_RDONLY
;
525 mutex_init(&zv
->zv_znode
.z_range_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
526 avl_create(&zv
->zv_znode
.z_range_avl
, zfs_range_compare
,
527 sizeof (rl_t
), offsetof(rl_t
, r_node
));
528 list_create(&zv
->zv_extents
, sizeof (zvol_extent_t
),
529 offsetof(zvol_extent_t
, ze_node
));
530 /* get and cache the blocksize */
531 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
533 zv
->zv_volblocksize
= doi
.doi_data_block_size
;
535 if (spa_writeable(dmu_objset_spa(os
))) {
536 if (zil_replay_disable
)
537 zil_destroy(dmu_objset_zil(os
), B_FALSE
);
539 zil_replay(os
, zv
, zvol_replay_vector
);
541 dmu_objset_disown(os
, FTAG
);
542 zv
->zv_objset
= NULL
;
546 mutex_exit(&zfsdev_state_lock
);
552 * Remove minor node for the specified volume.
555 zvol_remove_zv(zvol_state_t
*zv
)
558 minor_t minor
= zv
->zv_minor
;
560 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
561 if (zv
->zv_total_opens
!= 0)
564 (void) snprintf(nmbuf
, sizeof (nmbuf
), "%u,raw", minor
);
565 ddi_remove_minor_node(zfs_dip
, nmbuf
);
567 (void) snprintf(nmbuf
, sizeof (nmbuf
), "%u", minor
);
568 ddi_remove_minor_node(zfs_dip
, nmbuf
);
570 avl_destroy(&zv
->zv_znode
.z_range_avl
);
571 mutex_destroy(&zv
->zv_znode
.z_range_lock
);
573 kmem_free(zv
, sizeof (zvol_state_t
));
575 ddi_soft_state_free(zfsdev_state
, minor
);
582 zvol_remove_minor(const char *name
)
587 mutex_enter(&zfsdev_state_lock
);
588 if ((zv
= zvol_minor_lookup(name
)) == NULL
) {
589 mutex_exit(&zfsdev_state_lock
);
592 rc
= zvol_remove_zv(zv
);
593 mutex_exit(&zfsdev_state_lock
);
598 zvol_first_open(zvol_state_t
*zv
)
605 /* lie and say we're read-only */
606 error
= dmu_objset_own(zv
->zv_name
, DMU_OST_ZVOL
, B_TRUE
,
611 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
614 dmu_objset_disown(os
, zvol_tag
);
618 error
= dmu_bonus_hold(os
, ZVOL_OBJ
, zvol_tag
, &zv
->zv_dbuf
);
620 dmu_objset_disown(os
, zvol_tag
);
623 zv
->zv_volsize
= volsize
;
624 zv
->zv_zilog
= zil_open(os
, zvol_get_data
);
625 zvol_size_changed(zv
->zv_volsize
, ddi_driver_major(zfs_dip
),
628 VERIFY(dsl_prop_get_integer(zv
->zv_name
, "readonly", &readonly
,
630 if (readonly
|| dmu_objset_is_snapshot(os
) ||
631 !spa_writeable(dmu_objset_spa(os
)))
632 zv
->zv_flags
|= ZVOL_RDONLY
;
634 zv
->zv_flags
&= ~ZVOL_RDONLY
;
639 zvol_last_close(zvol_state_t
*zv
)
641 zil_close(zv
->zv_zilog
);
643 dmu_buf_rele(zv
->zv_dbuf
, zvol_tag
);
645 dmu_objset_disown(zv
->zv_objset
, zvol_tag
);
646 zv
->zv_objset
= NULL
;
650 zvol_prealloc(zvol_state_t
*zv
)
652 objset_t
*os
= zv
->zv_objset
;
654 uint64_t refd
, avail
, usedobjs
, availobjs
;
655 uint64_t resid
= zv
->zv_volsize
;
658 /* Check the space usage before attempting to allocate the space */
659 dmu_objset_space(os
, &refd
, &avail
, &usedobjs
, &availobjs
);
660 if (avail
< zv
->zv_volsize
)
663 /* Free old extents if they exist */
664 zvol_free_extents(zv
);
668 uint64_t bytes
= MIN(resid
, SPA_MAXBLOCKSIZE
);
670 tx
= dmu_tx_create(os
);
671 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
672 error
= dmu_tx_assign(tx
, TXG_WAIT
);
675 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, off
);
678 dmu_prealloc(os
, ZVOL_OBJ
, off
, bytes
, tx
);
683 txg_wait_synced(dmu_objset_pool(os
), 0);
689 zvol_update_volsize(objset_t
*os
, uint64_t volsize
)
694 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
696 tx
= dmu_tx_create(os
);
697 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
698 error
= dmu_tx_assign(tx
, TXG_WAIT
);
704 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1,
709 error
= dmu_free_long_range(os
,
710 ZVOL_OBJ
, volsize
, DMU_OBJECT_END
);
715 zvol_remove_minors(const char *name
)
721 namebuf
= kmem_zalloc(strlen(name
) + 2, KM_SLEEP
);
722 (void) strncpy(namebuf
, name
, strlen(name
));
723 (void) strcat(namebuf
, "/");
724 mutex_enter(&zfsdev_state_lock
);
725 for (minor
= 1; minor
<= ZFSDEV_MAX_MINOR
; minor
++) {
727 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
730 if (strncmp(namebuf
, zv
->zv_name
, strlen(namebuf
)) == 0)
731 (void) zvol_remove_zv(zv
);
733 kmem_free(namebuf
, strlen(name
) + 2);
735 mutex_exit(&zfsdev_state_lock
);
739 zvol_set_volsize(const char *name
, major_t maj
, uint64_t volsize
)
741 zvol_state_t
*zv
= NULL
;
744 dmu_object_info_t doi
;
745 uint64_t old_volsize
= 0ULL;
748 mutex_enter(&zfsdev_state_lock
);
749 zv
= zvol_minor_lookup(name
);
750 if ((error
= dmu_objset_hold(name
, FTAG
, &os
)) != 0) {
751 mutex_exit(&zfsdev_state_lock
);
755 if ((error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
)) != 0 ||
756 (error
= zvol_check_volsize(volsize
,
757 doi
.doi_data_block_size
)) != 0)
760 VERIFY(dsl_prop_get_integer(name
, "readonly", &readonly
,
767 error
= zvol_update_volsize(os
, volsize
);
769 * Reinitialize the dump area to the new size. If we
770 * failed to resize the dump area then restore it back to
773 if (zv
&& error
== 0) {
774 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
775 old_volsize
= zv
->zv_volsize
;
776 zv
->zv_volsize
= volsize
;
777 if ((error
= zvol_dumpify(zv
)) != 0 ||
778 (error
= dumpvp_resize()) != 0) {
779 (void) zvol_update_volsize(os
, old_volsize
);
780 zv
->zv_volsize
= old_volsize
;
781 error
= zvol_dumpify(zv
);
785 zv
->zv_volsize
= volsize
;
786 zvol_size_changed(volsize
, maj
, zv
->zv_minor
);
791 * Generate a LUN expansion event.
793 if (zv
&& error
== 0) {
796 char *physpath
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
798 (void) snprintf(physpath
, MAXPATHLEN
, "%s%u", ZVOL_PSEUDO_DEV
,
801 VERIFY(nvlist_alloc(&attr
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
802 VERIFY(nvlist_add_string(attr
, DEV_PHYS_PATH
, physpath
) == 0);
804 (void) ddi_log_sysevent(zfs_dip
, SUNW_VENDOR
, EC_DEV_STATUS
,
805 ESC_DEV_DLE
, attr
, &eid
, DDI_SLEEP
);
808 kmem_free(physpath
, MAXPATHLEN
);
812 dmu_objset_rele(os
, FTAG
);
814 mutex_exit(&zfsdev_state_lock
);
821 zvol_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*cr
)
826 mutex_enter(&zfsdev_state_lock
);
828 zv
= zfsdev_get_soft_state(getminor(*devp
), ZSST_ZVOL
);
830 mutex_exit(&zfsdev_state_lock
);
834 if (zv
->zv_total_opens
== 0)
835 err
= zvol_first_open(zv
);
837 mutex_exit(&zfsdev_state_lock
);
840 if ((flag
& FWRITE
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
844 if (zv
->zv_flags
& ZVOL_EXCL
) {
849 if (zv
->zv_total_opens
!= 0) {
853 zv
->zv_flags
|= ZVOL_EXCL
;
856 if (zv
->zv_open_count
[otyp
] == 0 || otyp
== OTYP_LYR
) {
857 zv
->zv_open_count
[otyp
]++;
858 zv
->zv_total_opens
++;
860 mutex_exit(&zfsdev_state_lock
);
864 if (zv
->zv_total_opens
== 0)
866 mutex_exit(&zfsdev_state_lock
);
872 zvol_close(dev_t dev
, int flag
, int otyp
, cred_t
*cr
)
874 minor_t minor
= getminor(dev
);
878 mutex_enter(&zfsdev_state_lock
);
880 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
882 mutex_exit(&zfsdev_state_lock
);
886 if (zv
->zv_flags
& ZVOL_EXCL
) {
887 ASSERT(zv
->zv_total_opens
== 1);
888 zv
->zv_flags
&= ~ZVOL_EXCL
;
892 * If the open count is zero, this is a spurious close.
893 * That indicates a bug in the kernel / DDI framework.
895 ASSERT(zv
->zv_open_count
[otyp
] != 0);
896 ASSERT(zv
->zv_total_opens
!= 0);
899 * You may get multiple opens, but only one close.
901 zv
->zv_open_count
[otyp
]--;
902 zv
->zv_total_opens
--;
904 if (zv
->zv_total_opens
== 0)
907 mutex_exit(&zfsdev_state_lock
);
912 zvol_get_done(zgd_t
*zgd
, int error
)
915 dmu_buf_rele(zgd
->zgd_db
, zgd
);
917 zfs_range_unlock(zgd
->zgd_rl
);
919 if (error
== 0 && zgd
->zgd_bp
)
920 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
922 kmem_free(zgd
, sizeof (zgd_t
));
926 * Get data to generate a TX_WRITE intent log record.
929 zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
931 zvol_state_t
*zv
= arg
;
932 objset_t
*os
= zv
->zv_objset
;
933 uint64_t object
= ZVOL_OBJ
;
934 uint64_t offset
= lr
->lr_offset
;
935 uint64_t size
= lr
->lr_length
; /* length of user data */
936 blkptr_t
*bp
= &lr
->lr_blkptr
;
944 zgd
= kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
945 zgd
->zgd_zilog
= zv
->zv_zilog
;
946 zgd
->zgd_rl
= zfs_range_lock(&zv
->zv_znode
, offset
, size
, RL_READER
);
949 * Write records come in two flavors: immediate and indirect.
950 * For small writes it's cheaper to store the data with the
951 * log record (immediate); for large writes it's cheaper to
952 * sync the data and get a pointer to it (indirect) so that
953 * we don't have to write the data twice.
955 if (buf
!= NULL
) { /* immediate write */
956 error
= dmu_read(os
, object
, offset
, size
, buf
,
957 DMU_READ_NO_PREFETCH
);
959 size
= zv
->zv_volblocksize
;
960 offset
= P2ALIGN(offset
, size
);
961 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
962 DMU_READ_NO_PREFETCH
);
967 ASSERT(db
->db_offset
== offset
);
968 ASSERT(db
->db_size
== size
);
970 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
978 zvol_get_done(zgd
, error
);
984 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
986 * We store data in the log buffers if it's small enough.
987 * Otherwise we will later flush the data out via dmu_sync().
989 ssize_t zvol_immediate_write_sz
= 32768;
992 zvol_log_write(zvol_state_t
*zv
, dmu_tx_t
*tx
, offset_t off
, ssize_t resid
,
995 uint32_t blocksize
= zv
->zv_volblocksize
;
996 zilog_t
*zilog
= zv
->zv_zilog
;
998 ssize_t immediate_write_sz
;
1000 if (zil_replaying(zilog
, tx
))
1003 immediate_write_sz
= (zilog
->zl_logbias
== ZFS_LOGBIAS_THROUGHPUT
)
1004 ? 0 : zvol_immediate_write_sz
;
1006 slogging
= spa_has_slogs(zilog
->zl_spa
) &&
1007 (zilog
->zl_logbias
== ZFS_LOGBIAS_LATENCY
);
1013 itx_wr_state_t write_state
;
1016 * Unlike zfs_log_write() we can be called with
1017 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1019 if (blocksize
> immediate_write_sz
&& !slogging
&&
1020 resid
>= blocksize
&& off
% blocksize
== 0) {
1021 write_state
= WR_INDIRECT
; /* uses dmu_sync */
1024 write_state
= WR_COPIED
;
1025 len
= MIN(ZIL_MAX_LOG_DATA
, resid
);
1027 write_state
= WR_NEED_COPY
;
1028 len
= MIN(ZIL_MAX_LOG_DATA
, resid
);
1031 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
) +
1032 (write_state
== WR_COPIED
? len
: 0));
1033 lr
= (lr_write_t
*)&itx
->itx_lr
;
1034 if (write_state
== WR_COPIED
&& dmu_read(zv
->zv_objset
,
1035 ZVOL_OBJ
, off
, len
, lr
+ 1, DMU_READ_NO_PREFETCH
) != 0) {
1036 zil_itx_destroy(itx
);
1037 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1038 lr
= (lr_write_t
*)&itx
->itx_lr
;
1039 write_state
= WR_NEED_COPY
;
1042 itx
->itx_wr_state
= write_state
;
1043 if (write_state
== WR_NEED_COPY
)
1044 itx
->itx_sod
+= len
;
1045 lr
->lr_foid
= ZVOL_OBJ
;
1046 lr
->lr_offset
= off
;
1047 lr
->lr_length
= len
;
1049 BP_ZERO(&lr
->lr_blkptr
);
1051 itx
->itx_private
= zv
;
1052 itx
->itx_sync
= sync
;
1054 zil_itx_assign(zilog
, itx
, tx
);
1062 zvol_dumpio_vdev(vdev_t
*vd
, void *addr
, uint64_t offset
, uint64_t size
,
1063 boolean_t doread
, boolean_t isdump
)
1069 for (c
= 0; c
< vd
->vdev_children
; c
++) {
1070 ASSERT(vd
->vdev_ops
== &vdev_mirror_ops
||
1071 vd
->vdev_ops
== &vdev_replacing_ops
||
1072 vd
->vdev_ops
== &vdev_spare_ops
);
1073 int err
= zvol_dumpio_vdev(vd
->vdev_child
[c
],
1074 addr
, offset
, size
, doread
, isdump
);
1077 } else if (doread
) {
1082 if (!vd
->vdev_ops
->vdev_op_leaf
)
1083 return (numerrors
< vd
->vdev_children
? 0 : EIO
);
1085 if (doread
&& !vdev_readable(vd
))
1087 else if (!doread
&& !vdev_writeable(vd
))
1091 ASSERT3P(dvd
, !=, NULL
);
1092 offset
+= VDEV_LABEL_START_SIZE
;
1094 if (ddi_in_panic() || isdump
) {
1098 return (ldi_dump(dvd
->vd_lh
, addr
, lbtodb(offset
),
1101 return (vdev_disk_physio(dvd
->vd_lh
, addr
, size
, offset
,
1102 doread
? B_READ
: B_WRITE
));
1107 zvol_dumpio(zvol_state_t
*zv
, void *addr
, uint64_t offset
, uint64_t size
,
1108 boolean_t doread
, boolean_t isdump
)
1113 spa_t
*spa
= dmu_objset_spa(zv
->zv_objset
);
1115 /* Must be sector aligned, and not stradle a block boundary. */
1116 if (P2PHASE(offset
, DEV_BSIZE
) || P2PHASE(size
, DEV_BSIZE
) ||
1117 P2BOUNDARY(offset
, size
, zv
->zv_volblocksize
)) {
1120 ASSERT(size
<= zv
->zv_volblocksize
);
1122 /* Locate the extent this belongs to */
1123 ze
= list_head(&zv
->zv_extents
);
1124 while (offset
>= ze
->ze_nblks
* zv
->zv_volblocksize
) {
1125 offset
-= ze
->ze_nblks
* zv
->zv_volblocksize
;
1126 ze
= list_next(&zv
->zv_extents
, ze
);
1129 if (!ddi_in_panic())
1130 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
1132 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&ze
->ze_dva
));
1133 offset
+= DVA_GET_OFFSET(&ze
->ze_dva
);
1134 error
= zvol_dumpio_vdev(vd
, addr
, offset
, size
, doread
, isdump
);
1136 if (!ddi_in_panic())
1137 spa_config_exit(spa
, SCL_STATE
, FTAG
);
1143 zvol_strategy(buf_t
*bp
)
1145 zfs_soft_state_t
*zs
= NULL
;
1147 uint64_t off
, volsize
;
1153 boolean_t doread
= bp
->b_flags
& B_READ
;
1157 if (getminor(bp
->b_edev
) == 0) {
1160 zs
= ddi_get_soft_state(zfsdev_state
, getminor(bp
->b_edev
));
1163 else if (zs
->zss_type
!= ZSST_ZVOL
)
1168 bioerror(bp
, error
);
1175 if (!(bp
->b_flags
& B_READ
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
1176 bioerror(bp
, EROFS
);
1181 off
= ldbtob(bp
->b_blkno
);
1182 volsize
= zv
->zv_volsize
;
1188 addr
= bp
->b_un
.b_addr
;
1189 resid
= bp
->b_bcount
;
1191 if (resid
> 0 && (off
< 0 || off
>= volsize
)) {
1197 is_dump
= zv
->zv_flags
& ZVOL_DUMPIFIED
;
1198 sync
= ((!(bp
->b_flags
& B_ASYNC
) &&
1199 !(zv
->zv_flags
& ZVOL_WCE
)) ||
1200 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
)) &&
1201 !doread
&& !is_dump
;
1204 * There must be no buffer changes when doing a dmu_sync() because
1205 * we can't change the data whilst calculating the checksum.
1207 rl
= zfs_range_lock(&zv
->zv_znode
, off
, resid
,
1208 doread
? RL_READER
: RL_WRITER
);
1210 while (resid
!= 0 && off
< volsize
) {
1211 size_t size
= MIN(resid
, zvol_maxphys
);
1213 size
= MIN(size
, P2END(off
, zv
->zv_volblocksize
) - off
);
1214 error
= zvol_dumpio(zv
, addr
, off
, size
,
1216 } else if (doread
) {
1217 error
= dmu_read(os
, ZVOL_OBJ
, off
, size
, addr
,
1220 dmu_tx_t
*tx
= dmu_tx_create(os
);
1221 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, size
);
1222 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1226 dmu_write(os
, ZVOL_OBJ
, off
, size
, addr
, tx
);
1227 zvol_log_write(zv
, tx
, off
, size
, sync
);
1232 /* convert checksum errors into IO errors */
1233 if (error
== ECKSUM
)
1241 zfs_range_unlock(rl
);
1243 if ((bp
->b_resid
= resid
) == bp
->b_bcount
)
1244 bioerror(bp
, off
> volsize
? EINVAL
: error
);
1247 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1254 * Set the buffer count to the zvol maximum transfer.
1255 * Using our own routine instead of the default minphys()
1256 * means that for larger writes we write bigger buffers on X86
1257 * (128K instead of 56K) and flush the disk write cache less often
1258 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1259 * 56K on X86 and 128K on sparc).
1262 zvol_minphys(struct buf
*bp
)
1264 if (bp
->b_bcount
> zvol_maxphys
)
1265 bp
->b_bcount
= zvol_maxphys
;
1269 zvol_dump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblocks
)
1271 minor_t minor
= getminor(dev
);
1278 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1282 boff
= ldbtob(blkno
);
1283 resid
= ldbtob(nblocks
);
1285 VERIFY3U(boff
+ resid
, <=, zv
->zv_volsize
);
1288 size
= MIN(resid
, P2END(boff
, zv
->zv_volblocksize
) - boff
);
1289 error
= zvol_dumpio(zv
, addr
, boff
, size
, B_FALSE
, B_TRUE
);
1302 zvol_read(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1304 minor_t minor
= getminor(dev
);
1310 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1314 volsize
= zv
->zv_volsize
;
1315 if (uio
->uio_resid
> 0 &&
1316 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1319 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1320 error
= physio(zvol_strategy
, NULL
, dev
, B_READ
,
1325 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1327 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1328 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1330 /* don't read past the end */
1331 if (bytes
> volsize
- uio
->uio_loffset
)
1332 bytes
= volsize
- uio
->uio_loffset
;
1334 error
= dmu_read_uio(zv
->zv_objset
, ZVOL_OBJ
, uio
, bytes
);
1336 /* convert checksum errors into IO errors */
1337 if (error
== ECKSUM
)
1342 zfs_range_unlock(rl
);
1348 zvol_write(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1350 minor_t minor
= getminor(dev
);
1357 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1361 volsize
= zv
->zv_volsize
;
1362 if (uio
->uio_resid
> 0 &&
1363 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1366 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1367 error
= physio(zvol_strategy
, NULL
, dev
, B_WRITE
,
1372 sync
= !(zv
->zv_flags
& ZVOL_WCE
) ||
1373 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
);
1375 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1377 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1378 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1379 uint64_t off
= uio
->uio_loffset
;
1380 dmu_tx_t
*tx
= dmu_tx_create(zv
->zv_objset
);
1382 if (bytes
> volsize
- off
) /* don't write past the end */
1383 bytes
= volsize
- off
;
1385 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
1386 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1391 error
= dmu_write_uio_dbuf(zv
->zv_dbuf
, uio
, bytes
, tx
);
1393 zvol_log_write(zv
, tx
, off
, bytes
, sync
);
1399 zfs_range_unlock(rl
);
1401 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1406 zvol_getefi(void *arg
, int flag
, uint64_t vs
, uint8_t bs
)
1408 struct uuid uuid
= EFI_RESERVED
;
1409 efi_gpe_t gpe
= { 0 };
1415 if (ddi_copyin(arg
, &efi
, sizeof (dk_efi_t
), flag
))
1417 ptr
= (char *)(uintptr_t)efi
.dki_data_64
;
1418 length
= efi
.dki_length
;
1420 * Some clients may attempt to request a PMBR for the
1421 * zvol. Currently this interface will return EINVAL to
1422 * such requests. These requests could be supported by
1423 * adding a check for lba == 0 and consing up an appropriate
1426 if (efi
.dki_lba
< 1 || efi
.dki_lba
> 2 || length
<= 0)
1429 gpe
.efi_gpe_StartingLBA
= LE_64(34ULL);
1430 gpe
.efi_gpe_EndingLBA
= LE_64((vs
>> bs
) - 1);
1431 UUID_LE_CONVERT(gpe
.efi_gpe_PartitionTypeGUID
, uuid
);
1433 if (efi
.dki_lba
== 1) {
1434 efi_gpt_t gpt
= { 0 };
1436 gpt
.efi_gpt_Signature
= LE_64(EFI_SIGNATURE
);
1437 gpt
.efi_gpt_Revision
= LE_32(EFI_VERSION_CURRENT
);
1438 gpt
.efi_gpt_HeaderSize
= LE_32(sizeof (gpt
));
1439 gpt
.efi_gpt_MyLBA
= LE_64(1ULL);
1440 gpt
.efi_gpt_FirstUsableLBA
= LE_64(34ULL);
1441 gpt
.efi_gpt_LastUsableLBA
= LE_64((vs
>> bs
) - 1);
1442 gpt
.efi_gpt_PartitionEntryLBA
= LE_64(2ULL);
1443 gpt
.efi_gpt_NumberOfPartitionEntries
= LE_32(1);
1444 gpt
.efi_gpt_SizeOfPartitionEntry
=
1445 LE_32(sizeof (efi_gpe_t
));
1446 CRC32(crc
, &gpe
, sizeof (gpe
), -1U, crc32_table
);
1447 gpt
.efi_gpt_PartitionEntryArrayCRC32
= LE_32(~crc
);
1448 CRC32(crc
, &gpt
, sizeof (gpt
), -1U, crc32_table
);
1449 gpt
.efi_gpt_HeaderCRC32
= LE_32(~crc
);
1450 if (ddi_copyout(&gpt
, ptr
, MIN(sizeof (gpt
), length
),
1453 ptr
+= sizeof (gpt
);
1454 length
-= sizeof (gpt
);
1456 if (length
> 0 && ddi_copyout(&gpe
, ptr
, MIN(sizeof (gpe
),
1463 * BEGIN entry points to allow external callers access to the volume.
1466 * Return the volume parameters needed for access from an external caller.
1467 * These values are invariant as long as the volume is held open.
1470 zvol_get_volume_params(minor_t minor
, uint64_t *blksize
,
1471 uint64_t *max_xfer_len
, void **minor_hdl
, void **objset_hdl
, void **zil_hdl
,
1472 void **rl_hdl
, void **bonus_hdl
)
1476 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1479 if (zv
->zv_flags
& ZVOL_DUMPIFIED
)
1482 ASSERT(blksize
&& max_xfer_len
&& minor_hdl
&&
1483 objset_hdl
&& zil_hdl
&& rl_hdl
&& bonus_hdl
);
1485 *blksize
= zv
->zv_volblocksize
;
1486 *max_xfer_len
= (uint64_t)zvol_maxphys
;
1488 *objset_hdl
= zv
->zv_objset
;
1489 *zil_hdl
= zv
->zv_zilog
;
1490 *rl_hdl
= &zv
->zv_znode
;
1491 *bonus_hdl
= zv
->zv_dbuf
;
1496 * Return the current volume size to an external caller.
1497 * The size can change while the volume is open.
1500 zvol_get_volume_size(void *minor_hdl
)
1502 zvol_state_t
*zv
= minor_hdl
;
1504 return (zv
->zv_volsize
);
1508 * Return the current WCE setting to an external caller.
1509 * The WCE setting can change while the volume is open.
1512 zvol_get_volume_wce(void *minor_hdl
)
1514 zvol_state_t
*zv
= minor_hdl
;
1516 return ((zv
->zv_flags
& ZVOL_WCE
) ? 1 : 0);
1520 * Entry point for external callers to zvol_log_write
1523 zvol_log_write_minor(void *minor_hdl
, dmu_tx_t
*tx
, offset_t off
, ssize_t resid
,
1526 zvol_state_t
*zv
= minor_hdl
;
1528 zvol_log_write(zv
, tx
, off
, resid
, sync
);
1531 * END entry points to allow external callers access to the volume.
1535 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1538 zvol_log_truncate(zvol_state_t
*zv
, dmu_tx_t
*tx
, uint64_t off
, uint64_t len
,
1543 zilog_t
*zilog
= zv
->zv_zilog
;
1545 if (zil_replaying(zilog
, tx
))
1548 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
1549 lr
= (lr_truncate_t
*)&itx
->itx_lr
;
1550 lr
->lr_foid
= ZVOL_OBJ
;
1551 lr
->lr_offset
= off
;
1552 lr
->lr_length
= len
;
1554 itx
->itx_sync
= sync
;
1555 zil_itx_assign(zilog
, itx
, tx
);
1559 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1560 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1564 zvol_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
, int *rvalp
)
1567 struct dk_cinfo dki
;
1568 struct dk_minfo dkm
;
1569 struct dk_callback
*dkc
;
1573 mutex_enter(&zfsdev_state_lock
);
1575 zv
= zfsdev_get_soft_state(getminor(dev
), ZSST_ZVOL
);
1578 mutex_exit(&zfsdev_state_lock
);
1581 ASSERT(zv
->zv_total_opens
> 0);
1586 bzero(&dki
, sizeof (dki
));
1587 (void) strcpy(dki
.dki_cname
, "zvol");
1588 (void) strcpy(dki
.dki_dname
, "zvol");
1589 dki
.dki_ctype
= DKC_UNKNOWN
;
1590 dki
.dki_unit
= getminor(dev
);
1591 dki
.dki_maxtransfer
= 1 << (SPA_MAXBLOCKSHIFT
- zv
->zv_min_bs
);
1592 mutex_exit(&zfsdev_state_lock
);
1593 if (ddi_copyout(&dki
, (void *)arg
, sizeof (dki
), flag
))
1597 case DKIOCGMEDIAINFO
:
1598 bzero(&dkm
, sizeof (dkm
));
1599 dkm
.dki_lbsize
= 1U << zv
->zv_min_bs
;
1600 dkm
.dki_capacity
= zv
->zv_volsize
>> zv
->zv_min_bs
;
1601 dkm
.dki_media_type
= DK_UNKNOWN
;
1602 mutex_exit(&zfsdev_state_lock
);
1603 if (ddi_copyout(&dkm
, (void *)arg
, sizeof (dkm
), flag
))
1609 uint64_t vs
= zv
->zv_volsize
;
1610 uint8_t bs
= zv
->zv_min_bs
;
1612 mutex_exit(&zfsdev_state_lock
);
1613 error
= zvol_getefi((void *)arg
, flag
, vs
, bs
);
1617 case DKIOCFLUSHWRITECACHE
:
1618 dkc
= (struct dk_callback
*)arg
;
1619 mutex_exit(&zfsdev_state_lock
);
1620 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1621 if ((flag
& FKIOCTL
) && dkc
!= NULL
&& dkc
->dkc_callback
) {
1622 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, error
);
1629 int wce
= (zv
->zv_flags
& ZVOL_WCE
) ? 1 : 0;
1630 if (ddi_copyout(&wce
, (void *)arg
, sizeof (int),
1638 if (ddi_copyin((void *)arg
, &wce
, sizeof (int),
1644 zv
->zv_flags
|= ZVOL_WCE
;
1645 mutex_exit(&zfsdev_state_lock
);
1647 zv
->zv_flags
&= ~ZVOL_WCE
;
1648 mutex_exit(&zfsdev_state_lock
);
1649 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1657 * commands using these (like prtvtoc) expect ENOTSUP
1658 * since we're emulating an EFI label
1664 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1666 error
= zvol_dumpify(zv
);
1667 zfs_range_unlock(rl
);
1671 if (!(zv
->zv_flags
& ZVOL_DUMPIFIED
))
1673 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1675 error
= zvol_dump_fini(zv
);
1676 zfs_range_unlock(rl
);
1684 if (ddi_copyin((void *)arg
, &df
, sizeof (df
), flag
)) {
1690 * Apply Postel's Law to length-checking. If they overshoot,
1691 * just blank out until the end, if there's a need to blank
1694 if (df
.df_start
>= zv
->zv_volsize
)
1695 break; /* No need to do anything... */
1696 if (df
.df_start
+ df
.df_length
> zv
->zv_volsize
)
1697 df
.df_length
= DMU_OBJECT_END
;
1699 rl
= zfs_range_lock(&zv
->zv_znode
, df
.df_start
, df
.df_length
,
1701 tx
= dmu_tx_create(zv
->zv_objset
);
1702 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1706 zvol_log_truncate(zv
, tx
, df
.df_start
,
1707 df
.df_length
, B_TRUE
);
1708 error
= dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
,
1709 df
.df_start
, df
.df_length
);
1713 zfs_range_unlock(rl
);
1717 * If the write-cache is disabled or 'sync' property
1718 * is set to 'always' then treat this as a synchronous
1719 * operation (i.e. commit to zil).
1721 if (!(zv
->zv_flags
& ZVOL_WCE
) ||
1722 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
))
1723 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1726 * If the caller really wants synchronous writes, and
1727 * can't wait for them, don't return until the write
1730 if (df
.df_flags
& DF_WAIT_SYNC
) {
1732 dmu_objset_pool(zv
->zv_objset
), 0);
1743 mutex_exit(&zfsdev_state_lock
);
1750 return (zvol_minors
!= 0);
1756 VERIFY(ddi_soft_state_init(&zfsdev_state
, sizeof (zfs_soft_state_t
),
1758 mutex_init(&zfsdev_state_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1764 mutex_destroy(&zfsdev_state_lock
);
1765 ddi_soft_state_fini(&zfsdev_state
);
1769 zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
)
1773 objset_t
*os
= zv
->zv_objset
;
1774 nvlist_t
*nv
= NULL
;
1775 uint64_t version
= spa_version(dmu_objset_spa(zv
->zv_objset
));
1777 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
1778 error
= dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, 0,
1780 /* wait for dmu_free_long_range to actually free the blocks */
1781 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
1783 tx
= dmu_tx_create(os
);
1784 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1785 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
1786 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1793 * If we are resizing the dump device then we only need to
1794 * update the refreservation to match the newly updated
1795 * zvolsize. Otherwise, we save off the original state of the
1796 * zvol so that we can restore them if the zvol is ever undumpified.
1799 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1800 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1801 &zv
->zv_volsize
, tx
);
1803 uint64_t checksum
, compress
, refresrv
, vbs
, dedup
;
1805 error
= dsl_prop_get_integer(zv
->zv_name
,
1806 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), &compress
, NULL
);
1807 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1808 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), &checksum
, NULL
);
1809 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1810 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &refresrv
, NULL
);
1811 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1812 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &vbs
, NULL
);
1813 if (version
>= SPA_VERSION_DEDUP
) {
1814 error
= error
? error
:
1815 dsl_prop_get_integer(zv
->zv_name
,
1816 zfs_prop_to_name(ZFS_PROP_DEDUP
), &dedup
, NULL
);
1819 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1820 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1,
1822 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1823 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
, tx
);
1824 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1825 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1827 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1828 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1,
1830 error
= error
? error
: dmu_object_set_blocksize(
1831 os
, ZVOL_OBJ
, SPA_MAXBLOCKSIZE
, 0, tx
);
1832 if (version
>= SPA_VERSION_DEDUP
) {
1833 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1834 zfs_prop_to_name(ZFS_PROP_DEDUP
), 8, 1,
1838 zv
->zv_volblocksize
= SPA_MAXBLOCKSIZE
;
1843 * We only need update the zvol's property if we are initializing
1844 * the dump area for the first time.
1847 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1848 VERIFY(nvlist_add_uint64(nv
,
1849 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 0) == 0);
1850 VERIFY(nvlist_add_uint64(nv
,
1851 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
1852 ZIO_COMPRESS_OFF
) == 0);
1853 VERIFY(nvlist_add_uint64(nv
,
1854 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
1855 ZIO_CHECKSUM_OFF
) == 0);
1856 if (version
>= SPA_VERSION_DEDUP
) {
1857 VERIFY(nvlist_add_uint64(nv
,
1858 zfs_prop_to_name(ZFS_PROP_DEDUP
),
1859 ZIO_CHECKSUM_OFF
) == 0);
1862 error
= zfs_set_prop_nvlist(zv
->zv_name
, ZPROP_SRC_LOCAL
,
1870 /* Allocate the space for the dump */
1871 error
= zvol_prealloc(zv
);
1876 zvol_dumpify(zvol_state_t
*zv
)
1879 uint64_t dumpsize
= 0;
1881 objset_t
*os
= zv
->zv_objset
;
1883 if (zv
->zv_flags
& ZVOL_RDONLY
)
1886 if (zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
,
1887 8, 1, &dumpsize
) != 0 || dumpsize
!= zv
->zv_volsize
) {
1888 boolean_t resize
= (dumpsize
> 0) ? B_TRUE
: B_FALSE
;
1890 if ((error
= zvol_dump_init(zv
, resize
)) != 0) {
1891 (void) zvol_dump_fini(zv
);
1897 * Build up our lba mapping.
1899 error
= zvol_get_lbas(zv
);
1901 (void) zvol_dump_fini(zv
);
1905 tx
= dmu_tx_create(os
);
1906 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1907 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1910 (void) zvol_dump_fini(zv
);
1914 zv
->zv_flags
|= ZVOL_DUMPIFIED
;
1915 error
= zap_update(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, 8, 1,
1916 &zv
->zv_volsize
, tx
);
1920 (void) zvol_dump_fini(zv
);
1924 txg_wait_synced(dmu_objset_pool(os
), 0);
1929 zvol_dump_fini(zvol_state_t
*zv
)
1932 objset_t
*os
= zv
->zv_objset
;
1935 uint64_t checksum
, compress
, refresrv
, vbs
, dedup
;
1936 uint64_t version
= spa_version(dmu_objset_spa(zv
->zv_objset
));
1939 * Attempt to restore the zvol back to its pre-dumpified state.
1940 * This is a best-effort attempt as it's possible that not all
1941 * of these properties were initialized during the dumpify process
1942 * (i.e. error during zvol_dump_init).
1945 tx
= dmu_tx_create(os
);
1946 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1947 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1952 (void) zap_remove(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, tx
);
1955 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1956 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
);
1957 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1958 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1, &compress
);
1959 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1960 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1, &refresrv
);
1961 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1962 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1, &vbs
);
1964 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1965 (void) nvlist_add_uint64(nv
,
1966 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), checksum
);
1967 (void) nvlist_add_uint64(nv
,
1968 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), compress
);
1969 (void) nvlist_add_uint64(nv
,
1970 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), refresrv
);
1971 if (version
>= SPA_VERSION_DEDUP
&&
1972 zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
1973 zfs_prop_to_name(ZFS_PROP_DEDUP
), 8, 1, &dedup
) == 0) {
1974 (void) nvlist_add_uint64(nv
,
1975 zfs_prop_to_name(ZFS_PROP_DEDUP
), dedup
);
1977 (void) zfs_set_prop_nvlist(zv
->zv_name
, ZPROP_SRC_LOCAL
,
1981 zvol_free_extents(zv
);
1982 zv
->zv_flags
&= ~ZVOL_DUMPIFIED
;
1983 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, DMU_OBJECT_END
);
1984 /* wait for dmu_free_long_range to actually free the blocks */
1985 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
1986 tx
= dmu_tx_create(os
);
1987 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
1988 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1993 if (dmu_object_set_blocksize(os
, ZVOL_OBJ
, vbs
, 0, tx
) == 0)
1994 zv
->zv_volblocksize
= vbs
;