4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2010 Robert Milkowski
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
33 * ZFS volume emulation driver.
35 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36 * Volumes are accessed through the symbolic links named:
38 * /dev/zvol/dsk/<pool_name>/<dataset_name>
39 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
41 * These links are created by the /dev filesystem (sdev_zvolops.c).
42 * Volumes are persistent through reboot. No user command needs to be
43 * run before opening and using a device.
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/errno.h>
51 #include <sys/modctl.h>
55 #include <sys/cmn_err.h>
59 #include <sys/spa_impl.h>
61 #include <sys/dmu_traverse.h>
62 #include <sys/dnode.h>
63 #include <sys/dsl_dataset.h>
64 #include <sys/dsl_prop.h>
66 #include <sys/efi_partition.h>
67 #include <sys/byteorder.h>
68 #include <sys/pathname.h>
70 #include <sys/sunddi.h>
71 #include <sys/crc32.h>
72 #include <sys/dirent.h>
73 #include <sys/policy.h>
74 #include <sys/fs/zfs.h>
75 #include <sys/zfs_ioctl.h>
76 #include <sys/mkdev.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_disk.h>
82 #include <sys/vdev_impl.h>
83 #include <sys/vdev_raidz.h>
85 #include <sys/dumphdr.h>
86 #include <sys/zil_impl.h>
88 #include <sys/dmu_tx.h>
89 #include <sys/zfeature.h>
90 #include <sys/zio_checksum.h>
91 #include <sys/zil_impl.h>
93 #include "zfs_namecheck.h"
96 static char *zvol_tag
= "zvol_tag";
98 #define ZVOL_DUMPSIZE "dumpsize"
101 * This lock protects the zfsdev_state structure from being modified
102 * while it's being used, e.g. an open that comes in before a create
103 * finishes. It also protects temporary opens of the dataset so that,
104 * e.g., an open doesn't get a spurious EBUSY.
106 kmutex_t zfsdev_state_lock
;
107 static uint32_t zvol_minors
;
109 typedef struct zvol_extent
{
111 dva_t ze_dva
; /* dva associated with this extent */
112 uint64_t ze_nblks
; /* number of blocks in extent */
116 * The in-core state of each volume.
118 typedef struct zvol_state
{
119 char zv_name
[MAXPATHLEN
]; /* pool/dd name */
120 uint64_t zv_volsize
; /* amount of space we advertise */
121 uint64_t zv_volblocksize
; /* volume block size */
122 minor_t zv_minor
; /* minor number */
123 uint8_t zv_min_bs
; /* minimum addressable block shift */
124 uint8_t zv_flags
; /* readonly, dumpified, etc. */
125 objset_t
*zv_objset
; /* objset handle */
126 uint32_t zv_open_count
[OTYPCNT
]; /* open counts */
127 uint32_t zv_total_opens
; /* total open count */
128 zilog_t
*zv_zilog
; /* ZIL handle */
129 list_t zv_extents
; /* List of extents for dump */
130 znode_t zv_znode
; /* for range locking */
131 dmu_buf_t
*zv_dbuf
; /* bonus handle */
135 * zvol specific flags
137 #define ZVOL_RDONLY 0x1
138 #define ZVOL_DUMPIFIED 0x2
139 #define ZVOL_EXCL 0x4
143 * zvol maximum transfer in one DMU tx.
145 int zvol_maxphys
= DMU_MAX_ACCESS
/2;
148 * Toggle unmap functionality.
150 boolean_t zvol_unmap_enabled
= B_TRUE
;
153 * If true, unmaps requested as synchronous are executed synchronously,
154 * otherwise all unmaps are asynchronous.
156 boolean_t zvol_unmap_sync_enabled
= B_FALSE
;
158 extern int zfs_set_prop_nvlist(const char *, zprop_source_t
,
159 nvlist_t
*, nvlist_t
*);
160 static int zvol_remove_zv(zvol_state_t
*);
161 static int zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
,
162 struct lwb
*lwb
, zio_t
*zio
);
163 static int zvol_dumpify(zvol_state_t
*zv
);
164 static int zvol_dump_fini(zvol_state_t
*zv
);
165 static int zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
);
168 zvol_size_changed(zvol_state_t
*zv
, uint64_t volsize
)
170 dev_t dev
= makedevice(ddi_driver_major(zfs_dip
), zv
->zv_minor
);
172 zv
->zv_volsize
= volsize
;
173 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
174 "Size", volsize
) == DDI_SUCCESS
);
175 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
176 "Nblocks", lbtodb(volsize
)) == DDI_SUCCESS
);
178 /* Notify specfs to invalidate the cached size */
179 spec_size_invalidate(dev
, VBLK
);
180 spec_size_invalidate(dev
, VCHR
);
184 zvol_check_volsize(uint64_t volsize
, uint64_t blocksize
)
187 return (SET_ERROR(EINVAL
));
189 if (volsize
% blocksize
!= 0)
190 return (SET_ERROR(EINVAL
));
193 if (volsize
- 1 > SPEC_MAXOFFSET_T
)
194 return (SET_ERROR(EOVERFLOW
));
200 zvol_check_volblocksize(uint64_t volblocksize
)
202 if (volblocksize
< SPA_MINBLOCKSIZE
||
203 volblocksize
> SPA_OLD_MAXBLOCKSIZE
||
205 return (SET_ERROR(EDOM
));
211 zvol_get_stats(objset_t
*os
, nvlist_t
*nv
)
214 dmu_object_info_t doi
;
217 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &val
);
221 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLSIZE
, val
);
223 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
226 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLBLOCKSIZE
,
227 doi
.doi_data_block_size
);
233 static zvol_state_t
*
234 zvol_minor_lookup(const char *name
)
239 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
241 for (minor
= 1; minor
<= ZFSDEV_MAX_MINOR
; minor
++) {
242 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
245 if (strcmp(zv
->zv_name
, name
) == 0)
252 /* extent mapping arg */
260 zvol_map_block(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
261 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
263 struct maparg
*ma
= arg
;
265 int bs
= ma
->ma_zv
->zv_volblocksize
;
267 if (bp
== NULL
|| BP_IS_HOLE(bp
) ||
268 zb
->zb_object
!= ZVOL_OBJ
|| zb
->zb_level
!= 0)
271 VERIFY(!BP_IS_EMBEDDED(bp
));
273 VERIFY3U(ma
->ma_blks
, ==, zb
->zb_blkid
);
276 /* Abort immediately if we have encountered gang blocks */
278 return (SET_ERROR(EFRAGS
));
281 * See if the block is at the end of the previous extent.
283 ze
= list_tail(&ma
->ma_zv
->zv_extents
);
285 DVA_GET_VDEV(BP_IDENTITY(bp
)) == DVA_GET_VDEV(&ze
->ze_dva
) &&
286 DVA_GET_OFFSET(BP_IDENTITY(bp
)) ==
287 DVA_GET_OFFSET(&ze
->ze_dva
) + ze
->ze_nblks
* bs
) {
292 dprintf_bp(bp
, "%s", "next blkptr:");
294 /* start a new extent */
295 ze
= kmem_zalloc(sizeof (zvol_extent_t
), KM_SLEEP
);
296 ze
->ze_dva
= bp
->blk_dva
[0]; /* structure assignment */
298 list_insert_tail(&ma
->ma_zv
->zv_extents
, ze
);
303 zvol_free_extents(zvol_state_t
*zv
)
307 while (ze
= list_head(&zv
->zv_extents
)) {
308 list_remove(&zv
->zv_extents
, ze
);
309 kmem_free(ze
, sizeof (zvol_extent_t
));
314 zvol_get_lbas(zvol_state_t
*zv
)
316 objset_t
*os
= zv
->zv_objset
;
322 zvol_free_extents(zv
);
324 /* commit any in-flight changes before traversing the dataset */
325 txg_wait_synced(dmu_objset_pool(os
), 0);
326 err
= traverse_dataset(dmu_objset_ds(os
), 0,
327 TRAVERSE_PRE
| TRAVERSE_PREFETCH_METADATA
, zvol_map_block
, &ma
);
328 if (err
|| ma
.ma_blks
!= (zv
->zv_volsize
/ zv
->zv_volblocksize
)) {
329 zvol_free_extents(zv
);
330 return (err
? err
: EIO
);
338 zvol_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
340 zfs_creat_t
*zct
= arg
;
341 nvlist_t
*nvprops
= zct
->zct_props
;
343 uint64_t volblocksize
, volsize
;
345 VERIFY(nvlist_lookup_uint64(nvprops
,
346 zfs_prop_to_name(ZFS_PROP_VOLSIZE
), &volsize
) == 0);
347 if (nvlist_lookup_uint64(nvprops
,
348 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &volblocksize
) != 0)
349 volblocksize
= zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE
);
352 * These properties must be removed from the list so the generic
353 * property setting step won't apply to them.
355 VERIFY(nvlist_remove_all(nvprops
,
356 zfs_prop_to_name(ZFS_PROP_VOLSIZE
)) == 0);
357 (void) nvlist_remove_all(nvprops
,
358 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
));
360 error
= dmu_object_claim(os
, ZVOL_OBJ
, DMU_OT_ZVOL
, volblocksize
,
364 error
= zap_create_claim(os
, ZVOL_ZAP_OBJ
, DMU_OT_ZVOL_PROP
,
368 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
, tx
);
373 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
374 * implement DKIOCFREE/free-long-range.
377 zvol_replay_truncate(void *arg1
, void *arg2
, boolean_t byteswap
)
379 zvol_state_t
*zv
= arg1
;
380 lr_truncate_t
*lr
= arg2
;
381 uint64_t offset
, length
;
384 byteswap_uint64_array(lr
, sizeof (*lr
));
386 offset
= lr
->lr_offset
;
387 length
= lr
->lr_length
;
389 return (dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, offset
, length
));
393 * Replay a TX_WRITE ZIL transaction that didn't get committed
394 * after a system failure
397 zvol_replay_write(void *arg1
, void *arg2
, boolean_t byteswap
)
399 zvol_state_t
*zv
= arg1
;
400 lr_write_t
*lr
= arg2
;
401 objset_t
*os
= zv
->zv_objset
;
402 char *data
= (char *)(lr
+ 1); /* data follows lr_write_t */
403 uint64_t offset
, length
;
408 byteswap_uint64_array(lr
, sizeof (*lr
));
410 offset
= lr
->lr_offset
;
411 length
= lr
->lr_length
;
413 /* If it's a dmu_sync() block, write the whole block */
414 if (lr
->lr_common
.lrc_reclen
== sizeof (lr_write_t
)) {
415 uint64_t blocksize
= BP_GET_LSIZE(&lr
->lr_blkptr
);
416 if (length
< blocksize
) {
417 offset
-= offset
% blocksize
;
422 tx
= dmu_tx_create(os
);
423 dmu_tx_hold_write(tx
, ZVOL_OBJ
, offset
, length
);
424 error
= dmu_tx_assign(tx
, TXG_WAIT
);
428 dmu_write(os
, ZVOL_OBJ
, offset
, length
, data
, tx
);
437 zvol_replay_err(void *arg1
, void *arg2
, boolean_t byteswap
)
439 return (SET_ERROR(ENOTSUP
));
443 * Callback vectors for replaying records.
444 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
446 zil_replay_func_t
*zvol_replay_vector
[TX_MAX_TYPE
] = {
447 zvol_replay_err
, /* 0 no such transaction type */
448 zvol_replay_err
, /* TX_CREATE */
449 zvol_replay_err
, /* TX_MKDIR */
450 zvol_replay_err
, /* TX_MKXATTR */
451 zvol_replay_err
, /* TX_SYMLINK */
452 zvol_replay_err
, /* TX_REMOVE */
453 zvol_replay_err
, /* TX_RMDIR */
454 zvol_replay_err
, /* TX_LINK */
455 zvol_replay_err
, /* TX_RENAME */
456 zvol_replay_write
, /* TX_WRITE */
457 zvol_replay_truncate
, /* TX_TRUNCATE */
458 zvol_replay_err
, /* TX_SETATTR */
459 zvol_replay_err
, /* TX_ACL */
460 zvol_replay_err
, /* TX_CREATE_ACL */
461 zvol_replay_err
, /* TX_CREATE_ATTR */
462 zvol_replay_err
, /* TX_CREATE_ACL_ATTR */
463 zvol_replay_err
, /* TX_MKDIR_ACL */
464 zvol_replay_err
, /* TX_MKDIR_ATTR */
465 zvol_replay_err
, /* TX_MKDIR_ACL_ATTR */
466 zvol_replay_err
, /* TX_WRITE2 */
470 zvol_name2minor(const char *name
, minor_t
*minor
)
474 mutex_enter(&zfsdev_state_lock
);
475 zv
= zvol_minor_lookup(name
);
477 *minor
= zv
->zv_minor
;
478 mutex_exit(&zfsdev_state_lock
);
479 return (zv
? 0 : -1);
483 * Create a minor node (plus a whole lot more) for the specified volume.
486 zvol_create_minor(const char *name
)
488 zfs_soft_state_t
*zs
;
491 dmu_object_info_t doi
;
493 char chrbuf
[30], blkbuf
[30];
496 mutex_enter(&zfsdev_state_lock
);
498 if (zvol_minor_lookup(name
) != NULL
) {
499 mutex_exit(&zfsdev_state_lock
);
500 return (SET_ERROR(EEXIST
));
503 /* lie and say we're read-only */
504 error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_TRUE
, FTAG
, &os
);
507 mutex_exit(&zfsdev_state_lock
);
511 if ((minor
= zfsdev_minor_alloc()) == 0) {
512 dmu_objset_disown(os
, FTAG
);
513 mutex_exit(&zfsdev_state_lock
);
514 return (SET_ERROR(ENXIO
));
517 if (ddi_soft_state_zalloc(zfsdev_state
, minor
) != DDI_SUCCESS
) {
518 dmu_objset_disown(os
, FTAG
);
519 mutex_exit(&zfsdev_state_lock
);
520 return (SET_ERROR(EAGAIN
));
522 (void) ddi_prop_update_string(minor
, zfs_dip
, ZVOL_PROP_NAME
,
525 (void) snprintf(chrbuf
, sizeof (chrbuf
), "%u,raw", minor
);
527 if (ddi_create_minor_node(zfs_dip
, chrbuf
, S_IFCHR
,
528 minor
, DDI_PSEUDO
, 0) == DDI_FAILURE
) {
529 ddi_soft_state_free(zfsdev_state
, minor
);
530 dmu_objset_disown(os
, FTAG
);
531 mutex_exit(&zfsdev_state_lock
);
532 return (SET_ERROR(EAGAIN
));
535 (void) snprintf(blkbuf
, sizeof (blkbuf
), "%u", minor
);
537 if (ddi_create_minor_node(zfs_dip
, blkbuf
, S_IFBLK
,
538 minor
, DDI_PSEUDO
, 0) == DDI_FAILURE
) {
539 ddi_remove_minor_node(zfs_dip
, chrbuf
);
540 ddi_soft_state_free(zfsdev_state
, minor
);
541 dmu_objset_disown(os
, FTAG
);
542 mutex_exit(&zfsdev_state_lock
);
543 return (SET_ERROR(EAGAIN
));
546 zs
= ddi_get_soft_state(zfsdev_state
, minor
);
547 zs
->zss_type
= ZSST_ZVOL
;
548 zv
= zs
->zss_data
= kmem_zalloc(sizeof (zvol_state_t
), KM_SLEEP
);
549 (void) strlcpy(zv
->zv_name
, name
, MAXPATHLEN
);
550 zv
->zv_min_bs
= DEV_BSHIFT
;
551 zv
->zv_minor
= minor
;
553 if (dmu_objset_is_snapshot(os
) || !spa_writeable(dmu_objset_spa(os
)))
554 zv
->zv_flags
|= ZVOL_RDONLY
;
555 mutex_init(&zv
->zv_znode
.z_range_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
556 avl_create(&zv
->zv_znode
.z_range_avl
, zfs_range_compare
,
557 sizeof (rl_t
), offsetof(rl_t
, r_node
));
558 list_create(&zv
->zv_extents
, sizeof (zvol_extent_t
),
559 offsetof(zvol_extent_t
, ze_node
));
560 /* get and cache the blocksize */
561 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
563 zv
->zv_volblocksize
= doi
.doi_data_block_size
;
565 if (spa_writeable(dmu_objset_spa(os
))) {
566 if (zil_replay_disable
)
567 zil_destroy(dmu_objset_zil(os
), B_FALSE
);
569 zil_replay(os
, zv
, zvol_replay_vector
);
571 dmu_objset_disown(os
, FTAG
);
572 zv
->zv_objset
= NULL
;
576 mutex_exit(&zfsdev_state_lock
);
582 * Remove minor node for the specified volume.
585 zvol_remove_zv(zvol_state_t
*zv
)
588 minor_t minor
= zv
->zv_minor
;
590 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
591 if (zv
->zv_total_opens
!= 0)
592 return (SET_ERROR(EBUSY
));
594 (void) snprintf(nmbuf
, sizeof (nmbuf
), "%u,raw", minor
);
595 ddi_remove_minor_node(zfs_dip
, nmbuf
);
597 (void) snprintf(nmbuf
, sizeof (nmbuf
), "%u", minor
);
598 ddi_remove_minor_node(zfs_dip
, nmbuf
);
600 avl_destroy(&zv
->zv_znode
.z_range_avl
);
601 mutex_destroy(&zv
->zv_znode
.z_range_lock
);
603 kmem_free(zv
, sizeof (zvol_state_t
));
605 ddi_soft_state_free(zfsdev_state
, minor
);
612 zvol_remove_minor(const char *name
)
617 mutex_enter(&zfsdev_state_lock
);
618 if ((zv
= zvol_minor_lookup(name
)) == NULL
) {
619 mutex_exit(&zfsdev_state_lock
);
620 return (SET_ERROR(ENXIO
));
622 rc
= zvol_remove_zv(zv
);
623 mutex_exit(&zfsdev_state_lock
);
628 zvol_first_open(zvol_state_t
*zv
)
635 /* lie and say we're read-only */
636 error
= dmu_objset_own(zv
->zv_name
, DMU_OST_ZVOL
, B_TRUE
,
642 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
645 dmu_objset_disown(os
, zvol_tag
);
649 error
= dmu_bonus_hold(os
, ZVOL_OBJ
, zvol_tag
, &zv
->zv_dbuf
);
651 dmu_objset_disown(os
, zvol_tag
);
655 zvol_size_changed(zv
, volsize
);
656 zv
->zv_zilog
= zil_open(os
, zvol_get_data
);
658 VERIFY(dsl_prop_get_integer(zv
->zv_name
, "readonly", &readonly
,
660 if (readonly
|| dmu_objset_is_snapshot(os
) ||
661 !spa_writeable(dmu_objset_spa(os
)))
662 zv
->zv_flags
|= ZVOL_RDONLY
;
664 zv
->zv_flags
&= ~ZVOL_RDONLY
;
669 zvol_last_close(zvol_state_t
*zv
)
671 zil_close(zv
->zv_zilog
);
674 dmu_buf_rele(zv
->zv_dbuf
, zvol_tag
);
680 if (dsl_dataset_is_dirty(dmu_objset_ds(zv
->zv_objset
)) &&
681 !(zv
->zv_flags
& ZVOL_RDONLY
))
682 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
683 dmu_objset_evict_dbufs(zv
->zv_objset
);
685 dmu_objset_disown(zv
->zv_objset
, zvol_tag
);
686 zv
->zv_objset
= NULL
;
690 zvol_prealloc(zvol_state_t
*zv
)
692 objset_t
*os
= zv
->zv_objset
;
694 uint64_t refd
, avail
, usedobjs
, availobjs
;
695 uint64_t resid
= zv
->zv_volsize
;
698 /* Check the space usage before attempting to allocate the space */
699 dmu_objset_space(os
, &refd
, &avail
, &usedobjs
, &availobjs
);
700 if (avail
< zv
->zv_volsize
)
701 return (SET_ERROR(ENOSPC
));
703 /* Free old extents if they exist */
704 zvol_free_extents(zv
);
708 uint64_t bytes
= MIN(resid
, SPA_OLD_MAXBLOCKSIZE
);
710 tx
= dmu_tx_create(os
);
711 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
712 error
= dmu_tx_assign(tx
, TXG_WAIT
);
715 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, off
);
718 dmu_prealloc(os
, ZVOL_OBJ
, off
, bytes
, tx
);
723 txg_wait_synced(dmu_objset_pool(os
), 0);
729 zvol_update_volsize(objset_t
*os
, uint64_t volsize
)
734 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
736 tx
= dmu_tx_create(os
);
737 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
738 dmu_tx_mark_netfree(tx
);
739 error
= dmu_tx_assign(tx
, TXG_WAIT
);
745 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1,
750 error
= dmu_free_long_range(os
,
751 ZVOL_OBJ
, volsize
, DMU_OBJECT_END
);
756 zvol_remove_minors(const char *name
)
762 namebuf
= kmem_zalloc(strlen(name
) + 2, KM_SLEEP
);
763 (void) strncpy(namebuf
, name
, strlen(name
));
764 (void) strcat(namebuf
, "/");
765 mutex_enter(&zfsdev_state_lock
);
766 for (minor
= 1; minor
<= ZFSDEV_MAX_MINOR
; minor
++) {
768 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
771 if (strncmp(namebuf
, zv
->zv_name
, strlen(namebuf
)) == 0)
772 (void) zvol_remove_zv(zv
);
774 kmem_free(namebuf
, strlen(name
) + 2);
776 mutex_exit(&zfsdev_state_lock
);
780 zvol_update_live_volsize(zvol_state_t
*zv
, uint64_t volsize
)
782 uint64_t old_volsize
= 0ULL;
785 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
788 * Reinitialize the dump area to the new size. If we
789 * failed to resize the dump area then restore it back to
790 * its original size. We must set the new volsize prior
791 * to calling dumpvp_resize() to ensure that the devices'
792 * size(9P) is not visible by the dump subsystem.
794 old_volsize
= zv
->zv_volsize
;
795 zvol_size_changed(zv
, volsize
);
797 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
798 if ((error
= zvol_dumpify(zv
)) != 0 ||
799 (error
= dumpvp_resize()) != 0) {
802 (void) zvol_update_volsize(zv
->zv_objset
, old_volsize
);
803 zvol_size_changed(zv
, old_volsize
);
804 dumpify_error
= zvol_dumpify(zv
);
805 error
= dumpify_error
? dumpify_error
: error
;
810 * Generate a LUN expansion event.
815 char *physpath
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
817 (void) snprintf(physpath
, MAXPATHLEN
, "%s%u", ZVOL_PSEUDO_DEV
,
820 VERIFY(nvlist_alloc(&attr
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
821 VERIFY(nvlist_add_string(attr
, DEV_PHYS_PATH
, physpath
) == 0);
823 (void) ddi_log_sysevent(zfs_dip
, SUNW_VENDOR
, EC_DEV_STATUS
,
824 ESC_DEV_DLE
, attr
, &eid
, DDI_SLEEP
);
827 kmem_free(physpath
, MAXPATHLEN
);
833 zvol_set_volsize(const char *name
, uint64_t volsize
)
835 zvol_state_t
*zv
= NULL
;
838 dmu_object_info_t doi
;
840 boolean_t owned
= B_FALSE
;
842 error
= dsl_prop_get_integer(name
,
843 zfs_prop_to_name(ZFS_PROP_READONLY
), &readonly
, NULL
);
847 return (SET_ERROR(EROFS
));
849 mutex_enter(&zfsdev_state_lock
);
850 zv
= zvol_minor_lookup(name
);
852 if (zv
== NULL
|| zv
->zv_objset
== NULL
) {
853 if ((error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_FALSE
,
855 mutex_exit(&zfsdev_state_lock
);
865 if ((error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
)) != 0 ||
866 (error
= zvol_check_volsize(volsize
, doi
.doi_data_block_size
)) != 0)
869 error
= zvol_update_volsize(os
, volsize
);
871 if (error
== 0 && zv
!= NULL
)
872 error
= zvol_update_live_volsize(zv
, volsize
);
875 dmu_objset_disown(os
, FTAG
);
877 zv
->zv_objset
= NULL
;
879 mutex_exit(&zfsdev_state_lock
);
885 zvol_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*cr
)
890 mutex_enter(&zfsdev_state_lock
);
892 zv
= zfsdev_get_soft_state(getminor(*devp
), ZSST_ZVOL
);
894 mutex_exit(&zfsdev_state_lock
);
895 return (SET_ERROR(ENXIO
));
898 if (zv
->zv_total_opens
== 0)
899 err
= zvol_first_open(zv
);
901 mutex_exit(&zfsdev_state_lock
);
904 if ((flag
& FWRITE
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
905 err
= SET_ERROR(EROFS
);
908 if (zv
->zv_flags
& ZVOL_EXCL
) {
909 err
= SET_ERROR(EBUSY
);
913 if (zv
->zv_total_opens
!= 0) {
914 err
= SET_ERROR(EBUSY
);
917 zv
->zv_flags
|= ZVOL_EXCL
;
920 if (zv
->zv_open_count
[otyp
] == 0 || otyp
== OTYP_LYR
) {
921 zv
->zv_open_count
[otyp
]++;
922 zv
->zv_total_opens
++;
924 mutex_exit(&zfsdev_state_lock
);
928 if (zv
->zv_total_opens
== 0)
930 mutex_exit(&zfsdev_state_lock
);
936 zvol_close(dev_t dev
, int flag
, int otyp
, cred_t
*cr
)
938 minor_t minor
= getminor(dev
);
942 mutex_enter(&zfsdev_state_lock
);
944 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
946 mutex_exit(&zfsdev_state_lock
);
947 return (SET_ERROR(ENXIO
));
950 if (zv
->zv_flags
& ZVOL_EXCL
) {
951 ASSERT(zv
->zv_total_opens
== 1);
952 zv
->zv_flags
&= ~ZVOL_EXCL
;
956 * If the open count is zero, this is a spurious close.
957 * That indicates a bug in the kernel / DDI framework.
959 ASSERT(zv
->zv_open_count
[otyp
] != 0);
960 ASSERT(zv
->zv_total_opens
!= 0);
963 * You may get multiple opens, but only one close.
965 zv
->zv_open_count
[otyp
]--;
966 zv
->zv_total_opens
--;
968 if (zv
->zv_total_opens
== 0)
971 mutex_exit(&zfsdev_state_lock
);
976 zvol_get_done(zgd_t
*zgd
, int error
)
979 dmu_buf_rele(zgd
->zgd_db
, zgd
);
981 zfs_range_unlock(zgd
->zgd_rl
);
983 if (error
== 0 && zgd
->zgd_bp
)
984 zil_lwb_add_block(zgd
->zgd_lwb
, zgd
->zgd_bp
);
986 kmem_free(zgd
, sizeof (zgd_t
));
990 * Get data to generate a TX_WRITE intent log record.
993 zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, struct lwb
*lwb
, zio_t
*zio
)
995 zvol_state_t
*zv
= arg
;
996 objset_t
*os
= zv
->zv_objset
;
997 uint64_t object
= ZVOL_OBJ
;
998 uint64_t offset
= lr
->lr_offset
;
999 uint64_t size
= lr
->lr_length
; /* length of user data */
1004 ASSERT3P(lwb
, !=, NULL
);
1005 ASSERT3P(zio
, !=, NULL
);
1006 ASSERT3U(size
, !=, 0);
1008 zgd
= kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
1012 * Write records come in two flavors: immediate and indirect.
1013 * For small writes it's cheaper to store the data with the
1014 * log record (immediate); for large writes it's cheaper to
1015 * sync the data and get a pointer to it (indirect) so that
1016 * we don't have to write the data twice.
1018 if (buf
!= NULL
) { /* immediate write */
1019 zgd
->zgd_rl
= zfs_range_lock(&zv
->zv_znode
, offset
, size
,
1021 error
= dmu_read(os
, object
, offset
, size
, buf
,
1022 DMU_READ_NO_PREFETCH
);
1023 } else { /* indirect write */
1025 * Have to lock the whole block to ensure when it's written out
1026 * and its checksum is being calculated that no one can change
1027 * the data. Contrarily to zfs_get_data we need not re-check
1028 * blocksize after we get the lock because it cannot be changed.
1030 size
= zv
->zv_volblocksize
;
1031 offset
= P2ALIGN(offset
, size
);
1032 zgd
->zgd_rl
= zfs_range_lock(&zv
->zv_znode
, offset
, size
,
1034 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1035 DMU_READ_NO_PREFETCH
);
1037 blkptr_t
*bp
= &lr
->lr_blkptr
;
1042 ASSERT(db
->db_offset
== offset
);
1043 ASSERT(db
->db_size
== size
);
1045 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1046 zvol_get_done
, zgd
);
1053 zvol_get_done(zgd
, error
);
1059 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1061 * We store data in the log buffers if it's small enough.
1062 * Otherwise we will later flush the data out via dmu_sync().
1064 ssize_t zvol_immediate_write_sz
= 32768;
1067 zvol_log_write(zvol_state_t
*zv
, dmu_tx_t
*tx
, offset_t off
, ssize_t resid
,
1070 uint32_t blocksize
= zv
->zv_volblocksize
;
1071 zilog_t
*zilog
= zv
->zv_zilog
;
1072 itx_wr_state_t write_state
;
1074 if (zil_replaying(zilog
, tx
))
1077 if (zilog
->zl_logbias
== ZFS_LOGBIAS_THROUGHPUT
)
1078 write_state
= WR_INDIRECT
;
1079 else if (!spa_has_slogs(zilog
->zl_spa
) &&
1080 resid
>= blocksize
&& blocksize
> zvol_immediate_write_sz
)
1081 write_state
= WR_INDIRECT
;
1083 write_state
= WR_COPIED
;
1085 write_state
= WR_NEED_COPY
;
1090 itx_wr_state_t wr_state
= write_state
;
1091 ssize_t len
= resid
;
1093 if (wr_state
== WR_COPIED
&& resid
> ZIL_MAX_COPIED_DATA
)
1094 wr_state
= WR_NEED_COPY
;
1095 else if (wr_state
== WR_INDIRECT
)
1096 len
= MIN(blocksize
- P2PHASE(off
, blocksize
), resid
);
1098 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
) +
1099 (wr_state
== WR_COPIED
? len
: 0));
1100 lr
= (lr_write_t
*)&itx
->itx_lr
;
1101 if (wr_state
== WR_COPIED
&& dmu_read(zv
->zv_objset
,
1102 ZVOL_OBJ
, off
, len
, lr
+ 1, DMU_READ_NO_PREFETCH
) != 0) {
1103 zil_itx_destroy(itx
);
1104 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1105 lr
= (lr_write_t
*)&itx
->itx_lr
;
1106 wr_state
= WR_NEED_COPY
;
1109 itx
->itx_wr_state
= wr_state
;
1110 lr
->lr_foid
= ZVOL_OBJ
;
1111 lr
->lr_offset
= off
;
1112 lr
->lr_length
= len
;
1114 BP_ZERO(&lr
->lr_blkptr
);
1116 itx
->itx_private
= zv
;
1117 itx
->itx_sync
= sync
;
1119 zil_itx_assign(zilog
, itx
, tx
);
1127 zvol_dumpio_vdev(vdev_t
*vd
, void *addr
, uint64_t offset
, uint64_t origoffset
,
1128 uint64_t size
, boolean_t doread
, boolean_t isdump
)
1134 if (vd
->vdev_ops
== &vdev_mirror_ops
||
1135 vd
->vdev_ops
== &vdev_replacing_ops
||
1136 vd
->vdev_ops
== &vdev_spare_ops
) {
1137 for (c
= 0; c
< vd
->vdev_children
; c
++) {
1138 int err
= zvol_dumpio_vdev(vd
->vdev_child
[c
],
1139 addr
, offset
, origoffset
, size
, doread
, isdump
);
1142 } else if (doread
) {
1148 if (!vd
->vdev_ops
->vdev_op_leaf
&& vd
->vdev_ops
!= &vdev_raidz_ops
)
1149 return (numerrors
< vd
->vdev_children
? 0 : EIO
);
1151 if (doread
&& !vdev_readable(vd
))
1152 return (SET_ERROR(EIO
));
1153 else if (!doread
&& !vdev_writeable(vd
))
1154 return (SET_ERROR(EIO
));
1156 if (vd
->vdev_ops
== &vdev_raidz_ops
) {
1157 return (vdev_raidz_physio(vd
,
1158 addr
, size
, offset
, origoffset
, doread
, isdump
));
1161 offset
+= VDEV_LABEL_START_SIZE
;
1163 if (ddi_in_panic() || isdump
) {
1166 return (SET_ERROR(EIO
));
1168 ASSERT3P(dvd
, !=, NULL
);
1169 return (ldi_dump(dvd
->vd_lh
, addr
, lbtodb(offset
),
1173 ASSERT3P(dvd
, !=, NULL
);
1174 return (vdev_disk_ldi_physio(dvd
->vd_lh
, addr
, size
,
1175 offset
, doread
? B_READ
: B_WRITE
));
1180 zvol_dumpio(zvol_state_t
*zv
, void *addr
, uint64_t offset
, uint64_t size
,
1181 boolean_t doread
, boolean_t isdump
)
1186 spa_t
*spa
= dmu_objset_spa(zv
->zv_objset
);
1188 /* Must be sector aligned, and not stradle a block boundary. */
1189 if (P2PHASE(offset
, DEV_BSIZE
) || P2PHASE(size
, DEV_BSIZE
) ||
1190 P2BOUNDARY(offset
, size
, zv
->zv_volblocksize
)) {
1191 return (SET_ERROR(EINVAL
));
1193 ASSERT(size
<= zv
->zv_volblocksize
);
1195 /* Locate the extent this belongs to */
1196 ze
= list_head(&zv
->zv_extents
);
1197 while (offset
>= ze
->ze_nblks
* zv
->zv_volblocksize
) {
1198 offset
-= ze
->ze_nblks
* zv
->zv_volblocksize
;
1199 ze
= list_next(&zv
->zv_extents
, ze
);
1203 return (SET_ERROR(EINVAL
));
1205 if (!ddi_in_panic())
1206 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
1208 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&ze
->ze_dva
));
1209 offset
+= DVA_GET_OFFSET(&ze
->ze_dva
);
1210 error
= zvol_dumpio_vdev(vd
, addr
, offset
, DVA_GET_OFFSET(&ze
->ze_dva
),
1211 size
, doread
, isdump
);
1213 if (!ddi_in_panic())
1214 spa_config_exit(spa
, SCL_STATE
, FTAG
);
1220 zvol_strategy(buf_t
*bp
)
1222 zfs_soft_state_t
*zs
= NULL
;
1224 uint64_t off
, volsize
;
1230 boolean_t doread
= bp
->b_flags
& B_READ
;
1231 boolean_t is_dumpified
;
1234 if (getminor(bp
->b_edev
) == 0) {
1235 error
= SET_ERROR(EINVAL
);
1237 zs
= ddi_get_soft_state(zfsdev_state
, getminor(bp
->b_edev
));
1239 error
= SET_ERROR(ENXIO
);
1240 else if (zs
->zss_type
!= ZSST_ZVOL
)
1241 error
= SET_ERROR(EINVAL
);
1245 bioerror(bp
, error
);
1252 if (!(bp
->b_flags
& B_READ
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
1253 bioerror(bp
, EROFS
);
1258 off
= ldbtob(bp
->b_blkno
);
1259 volsize
= zv
->zv_volsize
;
1265 addr
= bp
->b_un
.b_addr
;
1266 resid
= bp
->b_bcount
;
1268 if (resid
> 0 && (off
< 0 || off
>= volsize
)) {
1274 is_dumpified
= zv
->zv_flags
& ZVOL_DUMPIFIED
;
1275 sync
= ((!(bp
->b_flags
& B_ASYNC
) &&
1276 !(zv
->zv_flags
& ZVOL_WCE
)) ||
1277 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
)) &&
1278 !doread
&& !is_dumpified
;
1281 * There must be no buffer changes when doing a dmu_sync() because
1282 * we can't change the data whilst calculating the checksum.
1284 rl
= zfs_range_lock(&zv
->zv_znode
, off
, resid
,
1285 doread
? RL_READER
: RL_WRITER
);
1287 while (resid
!= 0 && off
< volsize
) {
1288 size_t size
= MIN(resid
, zvol_maxphys
);
1290 size
= MIN(size
, P2END(off
, zv
->zv_volblocksize
) - off
);
1291 error
= zvol_dumpio(zv
, addr
, off
, size
,
1293 } else if (doread
) {
1294 error
= dmu_read(os
, ZVOL_OBJ
, off
, size
, addr
,
1297 dmu_tx_t
*tx
= dmu_tx_create(os
);
1298 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, size
);
1299 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1303 dmu_write(os
, ZVOL_OBJ
, off
, size
, addr
, tx
);
1304 zvol_log_write(zv
, tx
, off
, size
, sync
);
1309 /* convert checksum errors into IO errors */
1310 if (error
== ECKSUM
)
1311 error
= SET_ERROR(EIO
);
1318 zfs_range_unlock(rl
);
1320 if ((bp
->b_resid
= resid
) == bp
->b_bcount
)
1321 bioerror(bp
, off
> volsize
? EINVAL
: error
);
1324 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1331 * Set the buffer count to the zvol maximum transfer.
1332 * Using our own routine instead of the default minphys()
1333 * means that for larger writes we write bigger buffers on X86
1334 * (128K instead of 56K) and flush the disk write cache less often
1335 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1336 * 56K on X86 and 128K on sparc).
1339 zvol_minphys(struct buf
*bp
)
1341 if (bp
->b_bcount
> zvol_maxphys
)
1342 bp
->b_bcount
= zvol_maxphys
;
1346 zvol_dump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblocks
)
1348 minor_t minor
= getminor(dev
);
1355 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1357 return (SET_ERROR(ENXIO
));
1359 if ((zv
->zv_flags
& ZVOL_DUMPIFIED
) == 0)
1360 return (SET_ERROR(EINVAL
));
1362 boff
= ldbtob(blkno
);
1363 resid
= ldbtob(nblocks
);
1365 VERIFY3U(boff
+ resid
, <=, zv
->zv_volsize
);
1368 size
= MIN(resid
, P2END(boff
, zv
->zv_volblocksize
) - boff
);
1369 error
= zvol_dumpio(zv
, addr
, boff
, size
, B_FALSE
, B_TRUE
);
1382 zvol_read(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1384 minor_t minor
= getminor(dev
);
1390 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1392 return (SET_ERROR(ENXIO
));
1394 volsize
= zv
->zv_volsize
;
1395 if (uio
->uio_resid
> 0 &&
1396 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1397 return (SET_ERROR(EIO
));
1399 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1400 error
= physio(zvol_strategy
, NULL
, dev
, B_READ
,
1405 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1407 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1408 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1410 /* don't read past the end */
1411 if (bytes
> volsize
- uio
->uio_loffset
)
1412 bytes
= volsize
- uio
->uio_loffset
;
1414 error
= dmu_read_uio(zv
->zv_objset
, ZVOL_OBJ
, uio
, bytes
);
1416 /* convert checksum errors into IO errors */
1417 if (error
== ECKSUM
)
1418 error
= SET_ERROR(EIO
);
1422 zfs_range_unlock(rl
);
1428 zvol_write(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1430 minor_t minor
= getminor(dev
);
1437 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1439 return (SET_ERROR(ENXIO
));
1441 volsize
= zv
->zv_volsize
;
1442 if (uio
->uio_resid
> 0 &&
1443 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1444 return (SET_ERROR(EIO
));
1446 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1447 error
= physio(zvol_strategy
, NULL
, dev
, B_WRITE
,
1452 sync
= !(zv
->zv_flags
& ZVOL_WCE
) ||
1453 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
);
1455 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1457 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1458 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1459 uint64_t off
= uio
->uio_loffset
;
1460 dmu_tx_t
*tx
= dmu_tx_create(zv
->zv_objset
);
1462 if (bytes
> volsize
- off
) /* don't write past the end */
1463 bytes
= volsize
- off
;
1465 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
1466 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1471 error
= dmu_write_uio_dbuf(zv
->zv_dbuf
, uio
, bytes
, tx
);
1473 zvol_log_write(zv
, tx
, off
, bytes
, sync
);
1479 zfs_range_unlock(rl
);
1481 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1486 zvol_getefi(void *arg
, int flag
, uint64_t vs
, uint8_t bs
)
1488 struct uuid uuid
= EFI_RESERVED
;
1489 efi_gpe_t gpe
= { 0 };
1495 if (ddi_copyin(arg
, &efi
, sizeof (dk_efi_t
), flag
))
1496 return (SET_ERROR(EFAULT
));
1497 ptr
= (char *)(uintptr_t)efi
.dki_data_64
;
1498 length
= efi
.dki_length
;
1500 * Some clients may attempt to request a PMBR for the
1501 * zvol. Currently this interface will return EINVAL to
1502 * such requests. These requests could be supported by
1503 * adding a check for lba == 0 and consing up an appropriate
1506 if (efi
.dki_lba
< 1 || efi
.dki_lba
> 2 || length
<= 0)
1507 return (SET_ERROR(EINVAL
));
1509 gpe
.efi_gpe_StartingLBA
= LE_64(34ULL);
1510 gpe
.efi_gpe_EndingLBA
= LE_64((vs
>> bs
) - 1);
1511 UUID_LE_CONVERT(gpe
.efi_gpe_PartitionTypeGUID
, uuid
);
1513 if (efi
.dki_lba
== 1) {
1514 efi_gpt_t gpt
= { 0 };
1516 gpt
.efi_gpt_Signature
= LE_64(EFI_SIGNATURE
);
1517 gpt
.efi_gpt_Revision
= LE_32(EFI_VERSION_CURRENT
);
1518 gpt
.efi_gpt_HeaderSize
= LE_32(sizeof (gpt
));
1519 gpt
.efi_gpt_MyLBA
= LE_64(1ULL);
1520 gpt
.efi_gpt_FirstUsableLBA
= LE_64(34ULL);
1521 gpt
.efi_gpt_LastUsableLBA
= LE_64((vs
>> bs
) - 1);
1522 gpt
.efi_gpt_PartitionEntryLBA
= LE_64(2ULL);
1523 gpt
.efi_gpt_NumberOfPartitionEntries
= LE_32(1);
1524 gpt
.efi_gpt_SizeOfPartitionEntry
=
1525 LE_32(sizeof (efi_gpe_t
));
1526 CRC32(crc
, &gpe
, sizeof (gpe
), -1U, crc32_table
);
1527 gpt
.efi_gpt_PartitionEntryArrayCRC32
= LE_32(~crc
);
1528 CRC32(crc
, &gpt
, sizeof (gpt
), -1U, crc32_table
);
1529 gpt
.efi_gpt_HeaderCRC32
= LE_32(~crc
);
1530 if (ddi_copyout(&gpt
, ptr
, MIN(sizeof (gpt
), length
),
1532 return (SET_ERROR(EFAULT
));
1533 ptr
+= sizeof (gpt
);
1534 length
-= sizeof (gpt
);
1536 if (length
> 0 && ddi_copyout(&gpe
, ptr
, MIN(sizeof (gpe
),
1538 return (SET_ERROR(EFAULT
));
1543 * BEGIN entry points to allow external callers access to the volume.
1546 * Return the volume parameters needed for access from an external caller.
1547 * These values are invariant as long as the volume is held open.
1550 zvol_get_volume_params(minor_t minor
, uint64_t *blksize
,
1551 uint64_t *max_xfer_len
, void **minor_hdl
, void **objset_hdl
, void **zil_hdl
,
1552 void **rl_hdl
, void **bonus_hdl
)
1556 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1558 return (SET_ERROR(ENXIO
));
1559 if (zv
->zv_flags
& ZVOL_DUMPIFIED
)
1560 return (SET_ERROR(ENXIO
));
1562 ASSERT(blksize
&& max_xfer_len
&& minor_hdl
&&
1563 objset_hdl
&& zil_hdl
&& rl_hdl
&& bonus_hdl
);
1565 *blksize
= zv
->zv_volblocksize
;
1566 *max_xfer_len
= (uint64_t)zvol_maxphys
;
1568 *objset_hdl
= zv
->zv_objset
;
1569 *zil_hdl
= zv
->zv_zilog
;
1570 *rl_hdl
= &zv
->zv_znode
;
1571 *bonus_hdl
= zv
->zv_dbuf
;
1576 * Return the current volume size to an external caller.
1577 * The size can change while the volume is open.
1580 zvol_get_volume_size(void *minor_hdl
)
1582 zvol_state_t
*zv
= minor_hdl
;
1584 return (zv
->zv_volsize
);
1588 * Return the current WCE setting to an external caller.
1589 * The WCE setting can change while the volume is open.
1592 zvol_get_volume_wce(void *minor_hdl
)
1594 zvol_state_t
*zv
= minor_hdl
;
1596 return ((zv
->zv_flags
& ZVOL_WCE
) ? 1 : 0);
1600 * Entry point for external callers to zvol_log_write
1603 zvol_log_write_minor(void *minor_hdl
, dmu_tx_t
*tx
, offset_t off
, ssize_t resid
,
1606 zvol_state_t
*zv
= minor_hdl
;
1608 zvol_log_write(zv
, tx
, off
, resid
, sync
);
1611 * END entry points to allow external callers access to the volume.
1615 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1618 zvol_log_truncate(zvol_state_t
*zv
, dmu_tx_t
*tx
, uint64_t off
, uint64_t len
,
1623 zilog_t
*zilog
= zv
->zv_zilog
;
1625 if (zil_replaying(zilog
, tx
))
1628 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
1629 lr
= (lr_truncate_t
*)&itx
->itx_lr
;
1630 lr
->lr_foid
= ZVOL_OBJ
;
1631 lr
->lr_offset
= off
;
1632 lr
->lr_length
= len
;
1634 itx
->itx_sync
= sync
;
1635 zil_itx_assign(zilog
, itx
, tx
);
1639 * Dirtbag ioctls to support mkfs(8) for UFS filesystems. See dkio(7I).
1640 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1644 zvol_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
, int *rvalp
)
1647 struct dk_callback
*dkc
;
1651 mutex_enter(&zfsdev_state_lock
);
1653 zv
= zfsdev_get_soft_state(getminor(dev
), ZSST_ZVOL
);
1656 mutex_exit(&zfsdev_state_lock
);
1657 return (SET_ERROR(ENXIO
));
1659 ASSERT(zv
->zv_total_opens
> 0);
1665 struct dk_cinfo dki
;
1667 bzero(&dki
, sizeof (dki
));
1668 (void) strcpy(dki
.dki_cname
, "zvol");
1669 (void) strcpy(dki
.dki_dname
, "zvol");
1670 dki
.dki_ctype
= DKC_UNKNOWN
;
1671 dki
.dki_unit
= getminor(dev
);
1672 dki
.dki_maxtransfer
=
1673 1 << (SPA_OLD_MAXBLOCKSHIFT
- zv
->zv_min_bs
);
1674 mutex_exit(&zfsdev_state_lock
);
1675 if (ddi_copyout(&dki
, (void *)arg
, sizeof (dki
), flag
))
1676 error
= SET_ERROR(EFAULT
);
1680 case DKIOCGMEDIAINFO
:
1682 struct dk_minfo dkm
;
1684 bzero(&dkm
, sizeof (dkm
));
1685 dkm
.dki_lbsize
= 1U << zv
->zv_min_bs
;
1686 dkm
.dki_capacity
= zv
->zv_volsize
>> zv
->zv_min_bs
;
1687 dkm
.dki_media_type
= DK_UNKNOWN
;
1688 mutex_exit(&zfsdev_state_lock
);
1689 if (ddi_copyout(&dkm
, (void *)arg
, sizeof (dkm
), flag
))
1690 error
= SET_ERROR(EFAULT
);
1694 case DKIOCGMEDIAINFOEXT
:
1696 struct dk_minfo_ext dkmext
;
1698 bzero(&dkmext
, sizeof (dkmext
));
1699 dkmext
.dki_lbsize
= 1U << zv
->zv_min_bs
;
1700 dkmext
.dki_pbsize
= zv
->zv_volblocksize
;
1701 dkmext
.dki_capacity
= zv
->zv_volsize
>> zv
->zv_min_bs
;
1702 dkmext
.dki_media_type
= DK_UNKNOWN
;
1703 mutex_exit(&zfsdev_state_lock
);
1704 if (ddi_copyout(&dkmext
, (void *)arg
, sizeof (dkmext
), flag
))
1705 error
= SET_ERROR(EFAULT
);
1711 uint64_t vs
= zv
->zv_volsize
;
1712 uint8_t bs
= zv
->zv_min_bs
;
1714 mutex_exit(&zfsdev_state_lock
);
1715 error
= zvol_getefi((void *)arg
, flag
, vs
, bs
);
1719 case DKIOCFLUSHWRITECACHE
:
1720 dkc
= (struct dk_callback
*)arg
;
1721 mutex_exit(&zfsdev_state_lock
);
1722 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1723 if ((flag
& FKIOCTL
) && dkc
!= NULL
&& dkc
->dkc_callback
) {
1724 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, error
);
1731 int wce
= (zv
->zv_flags
& ZVOL_WCE
) ? 1 : 0;
1732 if (ddi_copyout(&wce
, (void *)arg
, sizeof (int),
1734 error
= SET_ERROR(EFAULT
);
1740 if (ddi_copyin((void *)arg
, &wce
, sizeof (int),
1742 error
= SET_ERROR(EFAULT
);
1746 zv
->zv_flags
|= ZVOL_WCE
;
1747 mutex_exit(&zfsdev_state_lock
);
1749 zv
->zv_flags
&= ~ZVOL_WCE
;
1750 mutex_exit(&zfsdev_state_lock
);
1751 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1759 * commands using these (like prtvtoc) expect ENOTSUP
1760 * since we're emulating an EFI label
1762 error
= SET_ERROR(ENOTSUP
);
1766 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1768 error
= zvol_dumpify(zv
);
1769 zfs_range_unlock(rl
);
1773 if (!(zv
->zv_flags
& ZVOL_DUMPIFIED
))
1775 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1777 error
= zvol_dump_fini(zv
);
1778 zfs_range_unlock(rl
);
1786 if (!zvol_unmap_enabled
)
1789 if (ddi_copyin((void *)arg
, &df
, sizeof (df
), flag
)) {
1790 error
= SET_ERROR(EFAULT
);
1795 * Apply Postel's Law to length-checking. If they overshoot,
1796 * just blank out until the end, if there's a need to blank
1799 if (df
.df_start
>= zv
->zv_volsize
)
1800 break; /* No need to do anything... */
1802 mutex_exit(&zfsdev_state_lock
);
1804 rl
= zfs_range_lock(&zv
->zv_znode
, df
.df_start
, df
.df_length
,
1806 tx
= dmu_tx_create(zv
->zv_objset
);
1807 dmu_tx_mark_netfree(tx
);
1808 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1812 zvol_log_truncate(zv
, tx
, df
.df_start
,
1813 df
.df_length
, B_TRUE
);
1815 error
= dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
,
1816 df
.df_start
, df
.df_length
);
1819 zfs_range_unlock(rl
);
1822 * If the write-cache is disabled, 'sync' property
1823 * is set to 'always', or if the caller is asking for
1824 * a synchronous free, commit this operation to the zil.
1825 * This will sync any previous uncommitted writes to the
1827 * Can be overridden by the zvol_unmap_sync_enabled tunable.
1829 if ((error
== 0) && zvol_unmap_sync_enabled
&&
1830 (!(zv
->zv_flags
& ZVOL_WCE
) ||
1831 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
) ||
1832 (df
.df_flags
& DF_WAIT_SYNC
))) {
1833 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1840 error
= SET_ERROR(ENOTTY
);
1844 mutex_exit(&zfsdev_state_lock
);
1851 return (zvol_minors
!= 0);
1857 VERIFY(ddi_soft_state_init(&zfsdev_state
, sizeof (zfs_soft_state_t
),
1859 mutex_init(&zfsdev_state_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1865 mutex_destroy(&zfsdev_state_lock
);
1866 ddi_soft_state_fini(&zfsdev_state
);
1871 zfs_mvdev_dump_feature_check(void *arg
, dmu_tx_t
*tx
)
1873 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1875 if (spa_feature_is_active(spa
, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
))
1882 zfs_mvdev_dump_activate_feature_sync(void *arg
, dmu_tx_t
*tx
)
1884 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1886 spa_feature_incr(spa
, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
, tx
);
1890 zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
)
1894 objset_t
*os
= zv
->zv_objset
;
1895 spa_t
*spa
= dmu_objset_spa(os
);
1896 vdev_t
*vd
= spa
->spa_root_vdev
;
1897 nvlist_t
*nv
= NULL
;
1898 uint64_t version
= spa_version(spa
);
1899 uint64_t checksum
, compress
, refresrv
, vbs
, dedup
;
1901 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
1902 ASSERT(vd
->vdev_ops
== &vdev_root_ops
);
1904 error
= dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, 0,
1908 /* wait for dmu_free_long_range to actually free the blocks */
1909 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
1912 * If the pool on which the dump device is being initialized has more
1913 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1914 * enabled. If so, bump that feature's counter to indicate that the
1915 * feature is active. We also check the vdev type to handle the
1917 * # zpool create test raidz disk1 disk2 disk3
1918 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1919 * the raidz vdev itself has 3 children.
1921 if (vd
->vdev_children
> 1 || vd
->vdev_ops
== &vdev_raidz_ops
) {
1922 if (!spa_feature_is_enabled(spa
,
1923 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
))
1924 return (SET_ERROR(ENOTSUP
));
1925 (void) dsl_sync_task(spa_name(spa
),
1926 zfs_mvdev_dump_feature_check
,
1927 zfs_mvdev_dump_activate_feature_sync
, NULL
,
1928 2, ZFS_SPACE_CHECK_RESERVED
);
1932 error
= dsl_prop_get_integer(zv
->zv_name
,
1933 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), &compress
, NULL
);
1935 error
= dsl_prop_get_integer(zv
->zv_name
,
1936 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), &checksum
,
1940 error
= dsl_prop_get_integer(zv
->zv_name
,
1941 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
1945 error
= dsl_prop_get_integer(zv
->zv_name
,
1946 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &vbs
,
1949 if (version
>= SPA_VERSION_DEDUP
&& error
== 0) {
1950 error
= dsl_prop_get_integer(zv
->zv_name
,
1951 zfs_prop_to_name(ZFS_PROP_DEDUP
), &dedup
, NULL
);
1957 tx
= dmu_tx_create(os
);
1958 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1959 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
1960 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1967 * If we are resizing the dump device then we only need to
1968 * update the refreservation to match the newly updated
1969 * zvolsize. Otherwise, we save off the original state of the
1970 * zvol so that we can restore them if the zvol is ever undumpified.
1973 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1974 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1975 &zv
->zv_volsize
, tx
);
1977 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1978 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1,
1981 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1982 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1,
1986 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1987 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1991 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1992 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1,
1996 error
= dmu_object_set_blocksize(
1997 os
, ZVOL_OBJ
, SPA_OLD_MAXBLOCKSIZE
, 0, tx
);
1999 if (version
>= SPA_VERSION_DEDUP
&& error
== 0) {
2000 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
2001 zfs_prop_to_name(ZFS_PROP_DEDUP
), 8, 1,
2005 zv
->zv_volblocksize
= SPA_OLD_MAXBLOCKSIZE
;
2010 * We only need update the zvol's property if we are initializing
2011 * the dump area for the first time.
2013 if (error
== 0 && !resize
) {
2015 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2016 * function. Otherwise, use the old default -- OFF.
2018 checksum
= spa_feature_is_active(spa
,
2019 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
) ? ZIO_CHECKSUM_NOPARITY
:
2022 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2023 VERIFY(nvlist_add_uint64(nv
,
2024 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 0) == 0);
2025 VERIFY(nvlist_add_uint64(nv
,
2026 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
2027 ZIO_COMPRESS_OFF
) == 0);
2028 VERIFY(nvlist_add_uint64(nv
,
2029 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
2031 if (version
>= SPA_VERSION_DEDUP
) {
2032 VERIFY(nvlist_add_uint64(nv
,
2033 zfs_prop_to_name(ZFS_PROP_DEDUP
),
2034 ZIO_CHECKSUM_OFF
) == 0);
2037 error
= zfs_set_prop_nvlist(zv
->zv_name
, ZPROP_SRC_LOCAL
,
2042 /* Allocate the space for the dump */
2044 error
= zvol_prealloc(zv
);
2049 zvol_dumpify(zvol_state_t
*zv
)
2052 uint64_t dumpsize
= 0;
2054 objset_t
*os
= zv
->zv_objset
;
2056 if (zv
->zv_flags
& ZVOL_RDONLY
)
2057 return (SET_ERROR(EROFS
));
2059 if (zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
,
2060 8, 1, &dumpsize
) != 0 || dumpsize
!= zv
->zv_volsize
) {
2061 boolean_t resize
= (dumpsize
> 0);
2063 if ((error
= zvol_dump_init(zv
, resize
)) != 0) {
2064 (void) zvol_dump_fini(zv
);
2070 * Build up our lba mapping.
2072 error
= zvol_get_lbas(zv
);
2074 (void) zvol_dump_fini(zv
);
2078 tx
= dmu_tx_create(os
);
2079 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
2080 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2083 (void) zvol_dump_fini(zv
);
2087 zv
->zv_flags
|= ZVOL_DUMPIFIED
;
2088 error
= zap_update(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, 8, 1,
2089 &zv
->zv_volsize
, tx
);
2093 (void) zvol_dump_fini(zv
);
2097 txg_wait_synced(dmu_objset_pool(os
), 0);
2102 zvol_dump_fini(zvol_state_t
*zv
)
2105 objset_t
*os
= zv
->zv_objset
;
2108 uint64_t checksum
, compress
, refresrv
, vbs
, dedup
;
2109 uint64_t version
= spa_version(dmu_objset_spa(zv
->zv_objset
));
2112 * Attempt to restore the zvol back to its pre-dumpified state.
2113 * This is a best-effort attempt as it's possible that not all
2114 * of these properties were initialized during the dumpify process
2115 * (i.e. error during zvol_dump_init).
2118 tx
= dmu_tx_create(os
);
2119 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
2120 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2125 (void) zap_remove(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, tx
);
2128 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2129 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
);
2130 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2131 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1, &compress
);
2132 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2133 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1, &refresrv
);
2134 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2135 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1, &vbs
);
2137 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2138 (void) nvlist_add_uint64(nv
,
2139 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), checksum
);
2140 (void) nvlist_add_uint64(nv
,
2141 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), compress
);
2142 (void) nvlist_add_uint64(nv
,
2143 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), refresrv
);
2144 if (version
>= SPA_VERSION_DEDUP
&&
2145 zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2146 zfs_prop_to_name(ZFS_PROP_DEDUP
), 8, 1, &dedup
) == 0) {
2147 (void) nvlist_add_uint64(nv
,
2148 zfs_prop_to_name(ZFS_PROP_DEDUP
), dedup
);
2150 (void) zfs_set_prop_nvlist(zv
->zv_name
, ZPROP_SRC_LOCAL
,
2154 zvol_free_extents(zv
);
2155 zv
->zv_flags
&= ~ZVOL_DUMPIFIED
;
2156 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, DMU_OBJECT_END
);
2157 /* wait for dmu_free_long_range to actually free the blocks */
2158 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
2159 tx
= dmu_tx_create(os
);
2160 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
2161 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2166 if (dmu_object_set_blocksize(os
, ZVOL_OBJ
, vbs
, 0, tx
) == 0)
2167 zv
->zv_volblocksize
= vbs
;