4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2010 Robert Milkowski
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
32 * ZFS volume emulation driver.
34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
35 * Volumes are accessed through the symbolic links named:
37 * /dev/zvol/dsk/<pool_name>/<dataset_name>
38 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40 * These links are created by the /dev filesystem (sdev_zvolops.c).
41 * Volumes are persistent through reboot. No user command needs to be
42 * run before opening and using a device.
45 #include <sys/types.h>
46 #include <sys/param.h>
47 #include <sys/errno.h>
50 #include <sys/modctl.h>
54 #include <sys/cmn_err.h>
58 #include <sys/spa_impl.h>
60 #include <sys/dmu_traverse.h>
61 #include <sys/dnode.h>
62 #include <sys/dsl_dataset.h>
63 #include <sys/dsl_prop.h>
65 #include <sys/efi_partition.h>
66 #include <sys/byteorder.h>
67 #include <sys/pathname.h>
69 #include <sys/sunddi.h>
70 #include <sys/crc32.h>
71 #include <sys/dirent.h>
72 #include <sys/policy.h>
73 #include <sys/fs/zfs.h>
74 #include <sys/zfs_ioctl.h>
75 #include <sys/mkdev.h>
77 #include <sys/refcount.h>
78 #include <sys/zfs_znode.h>
79 #include <sys/zfs_rlock.h>
80 #include <sys/vdev_disk.h>
81 #include <sys/vdev_impl.h>
82 #include <sys/vdev_raidz.h>
84 #include <sys/dumphdr.h>
85 #include <sys/zil_impl.h>
87 #include <sys/dmu_tx.h>
88 #include <sys/zfeature.h>
89 #include <sys/zio_checksum.h>
91 #include "zfs_namecheck.h"
94 static char *zvol_tag
= "zvol_tag";
96 #define ZVOL_DUMPSIZE "dumpsize"
99 * This lock protects the zfsdev_state structure from being modified
100 * while it's being used, e.g. an open that comes in before a create
101 * finishes. It also protects temporary opens of the dataset so that,
102 * e.g., an open doesn't get a spurious EBUSY.
104 kmutex_t zfsdev_state_lock
;
105 static uint32_t zvol_minors
;
107 typedef struct zvol_extent
{
109 dva_t ze_dva
; /* dva associated with this extent */
110 uint64_t ze_nblks
; /* number of blocks in extent */
114 * The in-core state of each volume.
116 typedef struct zvol_state
{
117 char zv_name
[MAXPATHLEN
]; /* pool/dd name */
118 uint64_t zv_volsize
; /* amount of space we advertise */
119 uint64_t zv_volblocksize
; /* volume block size */
120 minor_t zv_minor
; /* minor number */
121 uint8_t zv_min_bs
; /* minimum addressable block shift */
122 uint8_t zv_flags
; /* readonly, dumpified, etc. */
123 objset_t
*zv_objset
; /* objset handle */
124 uint32_t zv_open_count
[OTYPCNT
]; /* open counts */
125 uint32_t zv_total_opens
; /* total open count */
126 zilog_t
*zv_zilog
; /* ZIL handle */
127 list_t zv_extents
; /* List of extents for dump */
128 znode_t zv_znode
; /* for range locking */
129 dmu_buf_t
*zv_dbuf
; /* bonus handle */
133 * zvol specific flags
135 #define ZVOL_RDONLY 0x1
136 #define ZVOL_DUMPIFIED 0x2
137 #define ZVOL_EXCL 0x4
141 * zvol maximum transfer in one DMU tx.
143 int zvol_maxphys
= DMU_MAX_ACCESS
/2;
146 * Toggle unmap functionality.
148 boolean_t zvol_unmap_enabled
= B_TRUE
;
150 extern int zfs_set_prop_nvlist(const char *, zprop_source_t
,
151 nvlist_t
*, nvlist_t
*);
152 static int zvol_remove_zv(zvol_state_t
*);
153 static int zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
);
154 static int zvol_dumpify(zvol_state_t
*zv
);
155 static int zvol_dump_fini(zvol_state_t
*zv
);
156 static int zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
);
159 zvol_size_changed(zvol_state_t
*zv
, uint64_t volsize
)
161 dev_t dev
= makedevice(ddi_driver_major(zfs_dip
), zv
->zv_minor
);
163 zv
->zv_volsize
= volsize
;
164 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
165 "Size", volsize
) == DDI_SUCCESS
);
166 VERIFY(ddi_prop_update_int64(dev
, zfs_dip
,
167 "Nblocks", lbtodb(volsize
)) == DDI_SUCCESS
);
169 /* Notify specfs to invalidate the cached size */
170 spec_size_invalidate(dev
, VBLK
);
171 spec_size_invalidate(dev
, VCHR
);
175 zvol_check_volsize(uint64_t volsize
, uint64_t blocksize
)
178 return (SET_ERROR(EINVAL
));
180 if (volsize
% blocksize
!= 0)
181 return (SET_ERROR(EINVAL
));
184 if (volsize
- 1 > SPEC_MAXOFFSET_T
)
185 return (SET_ERROR(EOVERFLOW
));
191 zvol_check_volblocksize(uint64_t volblocksize
)
193 if (volblocksize
< SPA_MINBLOCKSIZE
||
194 volblocksize
> SPA_OLD_MAXBLOCKSIZE
||
196 return (SET_ERROR(EDOM
));
202 zvol_get_stats(objset_t
*os
, nvlist_t
*nv
)
205 dmu_object_info_t doi
;
208 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &val
);
212 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLSIZE
, val
);
214 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
217 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLBLOCKSIZE
,
218 doi
.doi_data_block_size
);
224 static zvol_state_t
*
225 zvol_minor_lookup(const char *name
)
230 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
232 for (minor
= 1; minor
<= ZFSDEV_MAX_MINOR
; minor
++) {
233 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
236 if (strcmp(zv
->zv_name
, name
) == 0)
243 /* extent mapping arg */
251 zvol_map_block(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
252 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
254 struct maparg
*ma
= arg
;
256 int bs
= ma
->ma_zv
->zv_volblocksize
;
258 if (bp
== NULL
|| BP_IS_HOLE(bp
) ||
259 zb
->zb_object
!= ZVOL_OBJ
|| zb
->zb_level
!= 0)
262 VERIFY(!BP_IS_EMBEDDED(bp
));
264 VERIFY3U(ma
->ma_blks
, ==, zb
->zb_blkid
);
267 /* Abort immediately if we have encountered gang blocks */
269 return (SET_ERROR(EFRAGS
));
272 * See if the block is at the end of the previous extent.
274 ze
= list_tail(&ma
->ma_zv
->zv_extents
);
276 DVA_GET_VDEV(BP_IDENTITY(bp
)) == DVA_GET_VDEV(&ze
->ze_dva
) &&
277 DVA_GET_OFFSET(BP_IDENTITY(bp
)) ==
278 DVA_GET_OFFSET(&ze
->ze_dva
) + ze
->ze_nblks
* bs
) {
283 dprintf_bp(bp
, "%s", "next blkptr:");
285 /* start a new extent */
286 ze
= kmem_zalloc(sizeof (zvol_extent_t
), KM_SLEEP
);
287 ze
->ze_dva
= bp
->blk_dva
[0]; /* structure assignment */
289 list_insert_tail(&ma
->ma_zv
->zv_extents
, ze
);
294 zvol_free_extents(zvol_state_t
*zv
)
298 while (ze
= list_head(&zv
->zv_extents
)) {
299 list_remove(&zv
->zv_extents
, ze
);
300 kmem_free(ze
, sizeof (zvol_extent_t
));
305 zvol_get_lbas(zvol_state_t
*zv
)
307 objset_t
*os
= zv
->zv_objset
;
313 zvol_free_extents(zv
);
315 /* commit any in-flight changes before traversing the dataset */
316 txg_wait_synced(dmu_objset_pool(os
), 0);
317 err
= traverse_dataset(dmu_objset_ds(os
), 0,
318 TRAVERSE_PRE
| TRAVERSE_PREFETCH_METADATA
, zvol_map_block
, &ma
);
319 if (err
|| ma
.ma_blks
!= (zv
->zv_volsize
/ zv
->zv_volblocksize
)) {
320 zvol_free_extents(zv
);
321 return (err
? err
: EIO
);
329 zvol_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
331 zfs_creat_t
*zct
= arg
;
332 nvlist_t
*nvprops
= zct
->zct_props
;
334 uint64_t volblocksize
, volsize
;
336 VERIFY(nvlist_lookup_uint64(nvprops
,
337 zfs_prop_to_name(ZFS_PROP_VOLSIZE
), &volsize
) == 0);
338 if (nvlist_lookup_uint64(nvprops
,
339 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &volblocksize
) != 0)
340 volblocksize
= zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE
);
343 * These properties must be removed from the list so the generic
344 * property setting step won't apply to them.
346 VERIFY(nvlist_remove_all(nvprops
,
347 zfs_prop_to_name(ZFS_PROP_VOLSIZE
)) == 0);
348 (void) nvlist_remove_all(nvprops
,
349 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
));
351 error
= dmu_object_claim(os
, ZVOL_OBJ
, DMU_OT_ZVOL
, volblocksize
,
355 error
= zap_create_claim(os
, ZVOL_ZAP_OBJ
, DMU_OT_ZVOL_PROP
,
359 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
, tx
);
364 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
365 * implement DKIOCFREE/free-long-range.
368 zvol_replay_truncate(zvol_state_t
*zv
, lr_truncate_t
*lr
, boolean_t byteswap
)
370 uint64_t offset
, length
;
373 byteswap_uint64_array(lr
, sizeof (*lr
));
375 offset
= lr
->lr_offset
;
376 length
= lr
->lr_length
;
378 return (dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, offset
, length
));
382 * Replay a TX_WRITE ZIL transaction that didn't get committed
383 * after a system failure
386 zvol_replay_write(zvol_state_t
*zv
, lr_write_t
*lr
, boolean_t byteswap
)
388 objset_t
*os
= zv
->zv_objset
;
389 char *data
= (char *)(lr
+ 1); /* data follows lr_write_t */
390 uint64_t offset
, length
;
395 byteswap_uint64_array(lr
, sizeof (*lr
));
397 offset
= lr
->lr_offset
;
398 length
= lr
->lr_length
;
400 /* If it's a dmu_sync() block, write the whole block */
401 if (lr
->lr_common
.lrc_reclen
== sizeof (lr_write_t
)) {
402 uint64_t blocksize
= BP_GET_LSIZE(&lr
->lr_blkptr
);
403 if (length
< blocksize
) {
404 offset
-= offset
% blocksize
;
409 tx
= dmu_tx_create(os
);
410 dmu_tx_hold_write(tx
, ZVOL_OBJ
, offset
, length
);
411 error
= dmu_tx_assign(tx
, TXG_WAIT
);
415 dmu_write(os
, ZVOL_OBJ
, offset
, length
, data
, tx
);
424 zvol_replay_err(zvol_state_t
*zv
, lr_t
*lr
, boolean_t byteswap
)
426 return (SET_ERROR(ENOTSUP
));
430 * Callback vectors for replaying records.
431 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
433 zil_replay_func_t
*zvol_replay_vector
[TX_MAX_TYPE
] = {
434 zvol_replay_err
, /* 0 no such transaction type */
435 zvol_replay_err
, /* TX_CREATE */
436 zvol_replay_err
, /* TX_MKDIR */
437 zvol_replay_err
, /* TX_MKXATTR */
438 zvol_replay_err
, /* TX_SYMLINK */
439 zvol_replay_err
, /* TX_REMOVE */
440 zvol_replay_err
, /* TX_RMDIR */
441 zvol_replay_err
, /* TX_LINK */
442 zvol_replay_err
, /* TX_RENAME */
443 zvol_replay_write
, /* TX_WRITE */
444 zvol_replay_truncate
, /* TX_TRUNCATE */
445 zvol_replay_err
, /* TX_SETATTR */
446 zvol_replay_err
, /* TX_ACL */
447 zvol_replay_err
, /* TX_CREATE_ACL */
448 zvol_replay_err
, /* TX_CREATE_ATTR */
449 zvol_replay_err
, /* TX_CREATE_ACL_ATTR */
450 zvol_replay_err
, /* TX_MKDIR_ACL */
451 zvol_replay_err
, /* TX_MKDIR_ATTR */
452 zvol_replay_err
, /* TX_MKDIR_ACL_ATTR */
453 zvol_replay_err
, /* TX_WRITE2 */
457 zvol_name2minor(const char *name
, minor_t
*minor
)
461 mutex_enter(&zfsdev_state_lock
);
462 zv
= zvol_minor_lookup(name
);
464 *minor
= zv
->zv_minor
;
465 mutex_exit(&zfsdev_state_lock
);
466 return (zv
? 0 : -1);
470 * Create a minor node (plus a whole lot more) for the specified volume.
473 zvol_create_minor(const char *name
)
475 zfs_soft_state_t
*zs
;
478 dmu_object_info_t doi
;
480 char chrbuf
[30], blkbuf
[30];
483 mutex_enter(&zfsdev_state_lock
);
485 if (zvol_minor_lookup(name
) != NULL
) {
486 mutex_exit(&zfsdev_state_lock
);
487 return (SET_ERROR(EEXIST
));
490 /* lie and say we're read-only */
491 error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_TRUE
, FTAG
, &os
);
494 mutex_exit(&zfsdev_state_lock
);
498 if ((minor
= zfsdev_minor_alloc()) == 0) {
499 dmu_objset_disown(os
, FTAG
);
500 mutex_exit(&zfsdev_state_lock
);
501 return (SET_ERROR(ENXIO
));
504 if (ddi_soft_state_zalloc(zfsdev_state
, minor
) != DDI_SUCCESS
) {
505 dmu_objset_disown(os
, FTAG
);
506 mutex_exit(&zfsdev_state_lock
);
507 return (SET_ERROR(EAGAIN
));
509 (void) ddi_prop_update_string(minor
, zfs_dip
, ZVOL_PROP_NAME
,
512 (void) snprintf(chrbuf
, sizeof (chrbuf
), "%u,raw", minor
);
514 if (ddi_create_minor_node(zfs_dip
, chrbuf
, S_IFCHR
,
515 minor
, DDI_PSEUDO
, 0) == DDI_FAILURE
) {
516 ddi_soft_state_free(zfsdev_state
, minor
);
517 dmu_objset_disown(os
, FTAG
);
518 mutex_exit(&zfsdev_state_lock
);
519 return (SET_ERROR(EAGAIN
));
522 (void) snprintf(blkbuf
, sizeof (blkbuf
), "%u", minor
);
524 if (ddi_create_minor_node(zfs_dip
, blkbuf
, S_IFBLK
,
525 minor
, DDI_PSEUDO
, 0) == DDI_FAILURE
) {
526 ddi_remove_minor_node(zfs_dip
, chrbuf
);
527 ddi_soft_state_free(zfsdev_state
, minor
);
528 dmu_objset_disown(os
, FTAG
);
529 mutex_exit(&zfsdev_state_lock
);
530 return (SET_ERROR(EAGAIN
));
533 zs
= ddi_get_soft_state(zfsdev_state
, minor
);
534 zs
->zss_type
= ZSST_ZVOL
;
535 zv
= zs
->zss_data
= kmem_zalloc(sizeof (zvol_state_t
), KM_SLEEP
);
536 (void) strlcpy(zv
->zv_name
, name
, MAXPATHLEN
);
537 zv
->zv_min_bs
= DEV_BSHIFT
;
538 zv
->zv_minor
= minor
;
540 if (dmu_objset_is_snapshot(os
) || !spa_writeable(dmu_objset_spa(os
)))
541 zv
->zv_flags
|= ZVOL_RDONLY
;
542 mutex_init(&zv
->zv_znode
.z_range_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
543 avl_create(&zv
->zv_znode
.z_range_avl
, zfs_range_compare
,
544 sizeof (rl_t
), offsetof(rl_t
, r_node
));
545 list_create(&zv
->zv_extents
, sizeof (zvol_extent_t
),
546 offsetof(zvol_extent_t
, ze_node
));
547 /* get and cache the blocksize */
548 error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
);
550 zv
->zv_volblocksize
= doi
.doi_data_block_size
;
552 if (spa_writeable(dmu_objset_spa(os
))) {
553 if (zil_replay_disable
)
554 zil_destroy(dmu_objset_zil(os
), B_FALSE
);
556 zil_replay(os
, zv
, zvol_replay_vector
);
558 dmu_objset_disown(os
, FTAG
);
559 zv
->zv_objset
= NULL
;
563 mutex_exit(&zfsdev_state_lock
);
569 * Remove minor node for the specified volume.
572 zvol_remove_zv(zvol_state_t
*zv
)
575 minor_t minor
= zv
->zv_minor
;
577 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
578 if (zv
->zv_total_opens
!= 0)
579 return (SET_ERROR(EBUSY
));
581 (void) snprintf(nmbuf
, sizeof (nmbuf
), "%u,raw", minor
);
582 ddi_remove_minor_node(zfs_dip
, nmbuf
);
584 (void) snprintf(nmbuf
, sizeof (nmbuf
), "%u", minor
);
585 ddi_remove_minor_node(zfs_dip
, nmbuf
);
587 avl_destroy(&zv
->zv_znode
.z_range_avl
);
588 mutex_destroy(&zv
->zv_znode
.z_range_lock
);
590 kmem_free(zv
, sizeof (zvol_state_t
));
592 ddi_soft_state_free(zfsdev_state
, minor
);
599 zvol_remove_minor(const char *name
)
604 mutex_enter(&zfsdev_state_lock
);
605 if ((zv
= zvol_minor_lookup(name
)) == NULL
) {
606 mutex_exit(&zfsdev_state_lock
);
607 return (SET_ERROR(ENXIO
));
609 rc
= zvol_remove_zv(zv
);
610 mutex_exit(&zfsdev_state_lock
);
615 zvol_first_open(zvol_state_t
*zv
)
622 /* lie and say we're read-only */
623 error
= dmu_objset_own(zv
->zv_name
, DMU_OST_ZVOL
, B_TRUE
,
629 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
632 dmu_objset_disown(os
, zvol_tag
);
636 error
= dmu_bonus_hold(os
, ZVOL_OBJ
, zvol_tag
, &zv
->zv_dbuf
);
638 dmu_objset_disown(os
, zvol_tag
);
642 zvol_size_changed(zv
, volsize
);
643 zv
->zv_zilog
= zil_open(os
, zvol_get_data
);
645 VERIFY(dsl_prop_get_integer(zv
->zv_name
, "readonly", &readonly
,
647 if (readonly
|| dmu_objset_is_snapshot(os
) ||
648 !spa_writeable(dmu_objset_spa(os
)))
649 zv
->zv_flags
|= ZVOL_RDONLY
;
651 zv
->zv_flags
&= ~ZVOL_RDONLY
;
656 zvol_last_close(zvol_state_t
*zv
)
658 zil_close(zv
->zv_zilog
);
661 dmu_buf_rele(zv
->zv_dbuf
, zvol_tag
);
667 if (dsl_dataset_is_dirty(dmu_objset_ds(zv
->zv_objset
)) &&
668 !(zv
->zv_flags
& ZVOL_RDONLY
))
669 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
670 dmu_objset_evict_dbufs(zv
->zv_objset
);
672 dmu_objset_disown(zv
->zv_objset
, zvol_tag
);
673 zv
->zv_objset
= NULL
;
677 zvol_prealloc(zvol_state_t
*zv
)
679 objset_t
*os
= zv
->zv_objset
;
681 uint64_t refd
, avail
, usedobjs
, availobjs
;
682 uint64_t resid
= zv
->zv_volsize
;
685 /* Check the space usage before attempting to allocate the space */
686 dmu_objset_space(os
, &refd
, &avail
, &usedobjs
, &availobjs
);
687 if (avail
< zv
->zv_volsize
)
688 return (SET_ERROR(ENOSPC
));
690 /* Free old extents if they exist */
691 zvol_free_extents(zv
);
695 uint64_t bytes
= MIN(resid
, SPA_OLD_MAXBLOCKSIZE
);
697 tx
= dmu_tx_create(os
);
698 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
699 error
= dmu_tx_assign(tx
, TXG_WAIT
);
702 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, off
);
705 dmu_prealloc(os
, ZVOL_OBJ
, off
, bytes
, tx
);
710 txg_wait_synced(dmu_objset_pool(os
), 0);
716 zvol_update_volsize(objset_t
*os
, uint64_t volsize
)
721 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
723 tx
= dmu_tx_create(os
);
724 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
725 dmu_tx_mark_netfree(tx
);
726 error
= dmu_tx_assign(tx
, TXG_WAIT
);
732 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1,
737 error
= dmu_free_long_range(os
,
738 ZVOL_OBJ
, volsize
, DMU_OBJECT_END
);
743 zvol_remove_minors(const char *name
)
749 namebuf
= kmem_zalloc(strlen(name
) + 2, KM_SLEEP
);
750 (void) strncpy(namebuf
, name
, strlen(name
));
751 (void) strcat(namebuf
, "/");
752 mutex_enter(&zfsdev_state_lock
);
753 for (minor
= 1; minor
<= ZFSDEV_MAX_MINOR
; minor
++) {
755 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
758 if (strncmp(namebuf
, zv
->zv_name
, strlen(namebuf
)) == 0)
759 (void) zvol_remove_zv(zv
);
761 kmem_free(namebuf
, strlen(name
) + 2);
763 mutex_exit(&zfsdev_state_lock
);
767 zvol_update_live_volsize(zvol_state_t
*zv
, uint64_t volsize
)
769 uint64_t old_volsize
= 0ULL;
772 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
775 * Reinitialize the dump area to the new size. If we
776 * failed to resize the dump area then restore it back to
777 * its original size. We must set the new volsize prior
778 * to calling dumpvp_resize() to ensure that the devices'
779 * size(9P) is not visible by the dump subsystem.
781 old_volsize
= zv
->zv_volsize
;
782 zvol_size_changed(zv
, volsize
);
784 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
785 if ((error
= zvol_dumpify(zv
)) != 0 ||
786 (error
= dumpvp_resize()) != 0) {
789 (void) zvol_update_volsize(zv
->zv_objset
, old_volsize
);
790 zvol_size_changed(zv
, old_volsize
);
791 dumpify_error
= zvol_dumpify(zv
);
792 error
= dumpify_error
? dumpify_error
: error
;
797 * Generate a LUN expansion event.
802 char *physpath
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
804 (void) snprintf(physpath
, MAXPATHLEN
, "%s%u", ZVOL_PSEUDO_DEV
,
807 VERIFY(nvlist_alloc(&attr
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
808 VERIFY(nvlist_add_string(attr
, DEV_PHYS_PATH
, physpath
) == 0);
810 (void) ddi_log_sysevent(zfs_dip
, SUNW_VENDOR
, EC_DEV_STATUS
,
811 ESC_DEV_DLE
, attr
, &eid
, DDI_SLEEP
);
814 kmem_free(physpath
, MAXPATHLEN
);
820 zvol_set_volsize(const char *name
, uint64_t volsize
)
822 zvol_state_t
*zv
= NULL
;
825 dmu_object_info_t doi
;
827 boolean_t owned
= B_FALSE
;
829 error
= dsl_prop_get_integer(name
,
830 zfs_prop_to_name(ZFS_PROP_READONLY
), &readonly
, NULL
);
834 return (SET_ERROR(EROFS
));
836 mutex_enter(&zfsdev_state_lock
);
837 zv
= zvol_minor_lookup(name
);
839 if (zv
== NULL
|| zv
->zv_objset
== NULL
) {
840 if ((error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_FALSE
,
842 mutex_exit(&zfsdev_state_lock
);
852 if ((error
= dmu_object_info(os
, ZVOL_OBJ
, &doi
)) != 0 ||
853 (error
= zvol_check_volsize(volsize
, doi
.doi_data_block_size
)) != 0)
856 error
= zvol_update_volsize(os
, volsize
);
858 if (error
== 0 && zv
!= NULL
)
859 error
= zvol_update_live_volsize(zv
, volsize
);
862 dmu_objset_disown(os
, FTAG
);
864 zv
->zv_objset
= NULL
;
866 mutex_exit(&zfsdev_state_lock
);
872 zvol_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*cr
)
877 mutex_enter(&zfsdev_state_lock
);
879 zv
= zfsdev_get_soft_state(getminor(*devp
), ZSST_ZVOL
);
881 mutex_exit(&zfsdev_state_lock
);
882 return (SET_ERROR(ENXIO
));
885 if (zv
->zv_total_opens
== 0)
886 err
= zvol_first_open(zv
);
888 mutex_exit(&zfsdev_state_lock
);
891 if ((flag
& FWRITE
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
892 err
= SET_ERROR(EROFS
);
895 if (zv
->zv_flags
& ZVOL_EXCL
) {
896 err
= SET_ERROR(EBUSY
);
900 if (zv
->zv_total_opens
!= 0) {
901 err
= SET_ERROR(EBUSY
);
904 zv
->zv_flags
|= ZVOL_EXCL
;
907 if (zv
->zv_open_count
[otyp
] == 0 || otyp
== OTYP_LYR
) {
908 zv
->zv_open_count
[otyp
]++;
909 zv
->zv_total_opens
++;
911 mutex_exit(&zfsdev_state_lock
);
915 if (zv
->zv_total_opens
== 0)
917 mutex_exit(&zfsdev_state_lock
);
923 zvol_close(dev_t dev
, int flag
, int otyp
, cred_t
*cr
)
925 minor_t minor
= getminor(dev
);
929 mutex_enter(&zfsdev_state_lock
);
931 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
933 mutex_exit(&zfsdev_state_lock
);
934 return (SET_ERROR(ENXIO
));
937 if (zv
->zv_flags
& ZVOL_EXCL
) {
938 ASSERT(zv
->zv_total_opens
== 1);
939 zv
->zv_flags
&= ~ZVOL_EXCL
;
943 * If the open count is zero, this is a spurious close.
944 * That indicates a bug in the kernel / DDI framework.
946 ASSERT(zv
->zv_open_count
[otyp
] != 0);
947 ASSERT(zv
->zv_total_opens
!= 0);
950 * You may get multiple opens, but only one close.
952 zv
->zv_open_count
[otyp
]--;
953 zv
->zv_total_opens
--;
955 if (zv
->zv_total_opens
== 0)
958 mutex_exit(&zfsdev_state_lock
);
963 zvol_get_done(zgd_t
*zgd
, int error
)
966 dmu_buf_rele(zgd
->zgd_db
, zgd
);
968 zfs_range_unlock(zgd
->zgd_rl
);
970 if (error
== 0 && zgd
->zgd_bp
)
971 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
973 kmem_free(zgd
, sizeof (zgd_t
));
977 * Get data to generate a TX_WRITE intent log record.
980 zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
982 zvol_state_t
*zv
= arg
;
983 objset_t
*os
= zv
->zv_objset
;
984 uint64_t object
= ZVOL_OBJ
;
985 uint64_t offset
= lr
->lr_offset
;
986 uint64_t size
= lr
->lr_length
; /* length of user data */
987 blkptr_t
*bp
= &lr
->lr_blkptr
;
995 zgd
= kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
996 zgd
->zgd_zilog
= zv
->zv_zilog
;
997 zgd
->zgd_rl
= zfs_range_lock(&zv
->zv_znode
, offset
, size
, RL_READER
);
1000 * Write records come in two flavors: immediate and indirect.
1001 * For small writes it's cheaper to store the data with the
1002 * log record (immediate); for large writes it's cheaper to
1003 * sync the data and get a pointer to it (indirect) so that
1004 * we don't have to write the data twice.
1006 if (buf
!= NULL
) { /* immediate write */
1007 error
= dmu_read(os
, object
, offset
, size
, buf
,
1008 DMU_READ_NO_PREFETCH
);
1010 size
= zv
->zv_volblocksize
;
1011 offset
= P2ALIGN(offset
, size
);
1012 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1013 DMU_READ_NO_PREFETCH
);
1015 blkptr_t
*obp
= dmu_buf_get_blkptr(db
);
1017 ASSERT(BP_IS_HOLE(bp
));
1024 ASSERT(db
->db_offset
== offset
);
1025 ASSERT(db
->db_size
== size
);
1027 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1028 zvol_get_done
, zgd
);
1035 zvol_get_done(zgd
, error
);
1041 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1043 * We store data in the log buffers if it's small enough.
1044 * Otherwise we will later flush the data out via dmu_sync().
1046 ssize_t zvol_immediate_write_sz
= 32768;
1049 zvol_log_write(zvol_state_t
*zv
, dmu_tx_t
*tx
, offset_t off
, ssize_t resid
,
1052 uint32_t blocksize
= zv
->zv_volblocksize
;
1053 zilog_t
*zilog
= zv
->zv_zilog
;
1055 ssize_t immediate_write_sz
;
1057 if (zil_replaying(zilog
, tx
))
1060 immediate_write_sz
= (zilog
->zl_logbias
== ZFS_LOGBIAS_THROUGHPUT
)
1061 ? 0 : zvol_immediate_write_sz
;
1063 slogging
= spa_has_slogs(zilog
->zl_spa
) &&
1064 (zilog
->zl_logbias
== ZFS_LOGBIAS_LATENCY
);
1070 itx_wr_state_t write_state
;
1073 * Unlike zfs_log_write() we can be called with
1074 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1076 if (blocksize
> immediate_write_sz
&& !slogging
&&
1077 resid
>= blocksize
&& off
% blocksize
== 0) {
1078 write_state
= WR_INDIRECT
; /* uses dmu_sync */
1081 write_state
= WR_COPIED
;
1082 len
= MIN(ZIL_MAX_LOG_DATA
, resid
);
1084 write_state
= WR_NEED_COPY
;
1085 len
= MIN(ZIL_MAX_LOG_DATA
, resid
);
1088 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
) +
1089 (write_state
== WR_COPIED
? len
: 0));
1090 lr
= (lr_write_t
*)&itx
->itx_lr
;
1091 if (write_state
== WR_COPIED
&& dmu_read(zv
->zv_objset
,
1092 ZVOL_OBJ
, off
, len
, lr
+ 1, DMU_READ_NO_PREFETCH
) != 0) {
1093 zil_itx_destroy(itx
);
1094 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1095 lr
= (lr_write_t
*)&itx
->itx_lr
;
1096 write_state
= WR_NEED_COPY
;
1099 itx
->itx_wr_state
= write_state
;
1100 if (write_state
== WR_NEED_COPY
)
1101 itx
->itx_sod
+= len
;
1102 lr
->lr_foid
= ZVOL_OBJ
;
1103 lr
->lr_offset
= off
;
1104 lr
->lr_length
= len
;
1106 BP_ZERO(&lr
->lr_blkptr
);
1108 itx
->itx_private
= zv
;
1109 itx
->itx_sync
= sync
;
1111 zil_itx_assign(zilog
, itx
, tx
);
1119 zvol_dumpio_vdev(vdev_t
*vd
, void *addr
, uint64_t offset
, uint64_t origoffset
,
1120 uint64_t size
, boolean_t doread
, boolean_t isdump
)
1126 if (vd
->vdev_ops
== &vdev_mirror_ops
||
1127 vd
->vdev_ops
== &vdev_replacing_ops
||
1128 vd
->vdev_ops
== &vdev_spare_ops
) {
1129 for (c
= 0; c
< vd
->vdev_children
; c
++) {
1130 int err
= zvol_dumpio_vdev(vd
->vdev_child
[c
],
1131 addr
, offset
, origoffset
, size
, doread
, isdump
);
1134 } else if (doread
) {
1140 if (!vd
->vdev_ops
->vdev_op_leaf
&& vd
->vdev_ops
!= &vdev_raidz_ops
)
1141 return (numerrors
< vd
->vdev_children
? 0 : EIO
);
1143 if (doread
&& !vdev_readable(vd
))
1144 return (SET_ERROR(EIO
));
1145 else if (!doread
&& !vdev_writeable(vd
))
1146 return (SET_ERROR(EIO
));
1148 if (vd
->vdev_ops
== &vdev_raidz_ops
) {
1149 return (vdev_raidz_physio(vd
,
1150 addr
, size
, offset
, origoffset
, doread
, isdump
));
1153 offset
+= VDEV_LABEL_START_SIZE
;
1155 if (ddi_in_panic() || isdump
) {
1158 return (SET_ERROR(EIO
));
1160 ASSERT3P(dvd
, !=, NULL
);
1161 return (ldi_dump(dvd
->vd_lh
, addr
, lbtodb(offset
),
1165 ASSERT3P(dvd
, !=, NULL
);
1166 return (vdev_disk_ldi_physio(dvd
->vd_lh
, addr
, size
,
1167 offset
, doread
? B_READ
: B_WRITE
));
1172 zvol_dumpio(zvol_state_t
*zv
, void *addr
, uint64_t offset
, uint64_t size
,
1173 boolean_t doread
, boolean_t isdump
)
1178 spa_t
*spa
= dmu_objset_spa(zv
->zv_objset
);
1180 /* Must be sector aligned, and not stradle a block boundary. */
1181 if (P2PHASE(offset
, DEV_BSIZE
) || P2PHASE(size
, DEV_BSIZE
) ||
1182 P2BOUNDARY(offset
, size
, zv
->zv_volblocksize
)) {
1183 return (SET_ERROR(EINVAL
));
1185 ASSERT(size
<= zv
->zv_volblocksize
);
1187 /* Locate the extent this belongs to */
1188 ze
= list_head(&zv
->zv_extents
);
1189 while (offset
>= ze
->ze_nblks
* zv
->zv_volblocksize
) {
1190 offset
-= ze
->ze_nblks
* zv
->zv_volblocksize
;
1191 ze
= list_next(&zv
->zv_extents
, ze
);
1195 return (SET_ERROR(EINVAL
));
1197 if (!ddi_in_panic())
1198 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
1200 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&ze
->ze_dva
));
1201 offset
+= DVA_GET_OFFSET(&ze
->ze_dva
);
1202 error
= zvol_dumpio_vdev(vd
, addr
, offset
, DVA_GET_OFFSET(&ze
->ze_dva
),
1203 size
, doread
, isdump
);
1205 if (!ddi_in_panic())
1206 spa_config_exit(spa
, SCL_STATE
, FTAG
);
1212 zvol_strategy(buf_t
*bp
)
1214 zfs_soft_state_t
*zs
= NULL
;
1216 uint64_t off
, volsize
;
1222 boolean_t doread
= bp
->b_flags
& B_READ
;
1223 boolean_t is_dumpified
;
1226 if (getminor(bp
->b_edev
) == 0) {
1227 error
= SET_ERROR(EINVAL
);
1229 zs
= ddi_get_soft_state(zfsdev_state
, getminor(bp
->b_edev
));
1231 error
= SET_ERROR(ENXIO
);
1232 else if (zs
->zss_type
!= ZSST_ZVOL
)
1233 error
= SET_ERROR(EINVAL
);
1237 bioerror(bp
, error
);
1244 if (!(bp
->b_flags
& B_READ
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
1245 bioerror(bp
, EROFS
);
1250 off
= ldbtob(bp
->b_blkno
);
1251 volsize
= zv
->zv_volsize
;
1257 addr
= bp
->b_un
.b_addr
;
1258 resid
= bp
->b_bcount
;
1260 if (resid
> 0 && (off
< 0 || off
>= volsize
)) {
1266 is_dumpified
= zv
->zv_flags
& ZVOL_DUMPIFIED
;
1267 sync
= ((!(bp
->b_flags
& B_ASYNC
) &&
1268 !(zv
->zv_flags
& ZVOL_WCE
)) ||
1269 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
)) &&
1270 !doread
&& !is_dumpified
;
1273 * There must be no buffer changes when doing a dmu_sync() because
1274 * we can't change the data whilst calculating the checksum.
1276 rl
= zfs_range_lock(&zv
->zv_znode
, off
, resid
,
1277 doread
? RL_READER
: RL_WRITER
);
1279 while (resid
!= 0 && off
< volsize
) {
1280 size_t size
= MIN(resid
, zvol_maxphys
);
1282 size
= MIN(size
, P2END(off
, zv
->zv_volblocksize
) - off
);
1283 error
= zvol_dumpio(zv
, addr
, off
, size
,
1285 } else if (doread
) {
1286 error
= dmu_read(os
, ZVOL_OBJ
, off
, size
, addr
,
1289 dmu_tx_t
*tx
= dmu_tx_create(os
);
1290 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, size
);
1291 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1295 dmu_write(os
, ZVOL_OBJ
, off
, size
, addr
, tx
);
1296 zvol_log_write(zv
, tx
, off
, size
, sync
);
1301 /* convert checksum errors into IO errors */
1302 if (error
== ECKSUM
)
1303 error
= SET_ERROR(EIO
);
1310 zfs_range_unlock(rl
);
1312 if ((bp
->b_resid
= resid
) == bp
->b_bcount
)
1313 bioerror(bp
, off
> volsize
? EINVAL
: error
);
1316 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1323 * Set the buffer count to the zvol maximum transfer.
1324 * Using our own routine instead of the default minphys()
1325 * means that for larger writes we write bigger buffers on X86
1326 * (128K instead of 56K) and flush the disk write cache less often
1327 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1328 * 56K on X86 and 128K on sparc).
1331 zvol_minphys(struct buf
*bp
)
1333 if (bp
->b_bcount
> zvol_maxphys
)
1334 bp
->b_bcount
= zvol_maxphys
;
1338 zvol_dump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblocks
)
1340 minor_t minor
= getminor(dev
);
1347 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1349 return (SET_ERROR(ENXIO
));
1351 if ((zv
->zv_flags
& ZVOL_DUMPIFIED
) == 0)
1352 return (SET_ERROR(EINVAL
));
1354 boff
= ldbtob(blkno
);
1355 resid
= ldbtob(nblocks
);
1357 VERIFY3U(boff
+ resid
, <=, zv
->zv_volsize
);
1360 size
= MIN(resid
, P2END(boff
, zv
->zv_volblocksize
) - boff
);
1361 error
= zvol_dumpio(zv
, addr
, boff
, size
, B_FALSE
, B_TRUE
);
1374 zvol_read(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1376 minor_t minor
= getminor(dev
);
1382 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1384 return (SET_ERROR(ENXIO
));
1386 volsize
= zv
->zv_volsize
;
1387 if (uio
->uio_resid
> 0 &&
1388 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1389 return (SET_ERROR(EIO
));
1391 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1392 error
= physio(zvol_strategy
, NULL
, dev
, B_READ
,
1397 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1399 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1400 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1402 /* don't read past the end */
1403 if (bytes
> volsize
- uio
->uio_loffset
)
1404 bytes
= volsize
- uio
->uio_loffset
;
1406 error
= dmu_read_uio(zv
->zv_objset
, ZVOL_OBJ
, uio
, bytes
);
1408 /* convert checksum errors into IO errors */
1409 if (error
== ECKSUM
)
1410 error
= SET_ERROR(EIO
);
1414 zfs_range_unlock(rl
);
1420 zvol_write(dev_t dev
, uio_t
*uio
, cred_t
*cr
)
1422 minor_t minor
= getminor(dev
);
1429 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1431 return (SET_ERROR(ENXIO
));
1433 volsize
= zv
->zv_volsize
;
1434 if (uio
->uio_resid
> 0 &&
1435 (uio
->uio_loffset
< 0 || uio
->uio_loffset
>= volsize
))
1436 return (SET_ERROR(EIO
));
1438 if (zv
->zv_flags
& ZVOL_DUMPIFIED
) {
1439 error
= physio(zvol_strategy
, NULL
, dev
, B_WRITE
,
1444 sync
= !(zv
->zv_flags
& ZVOL_WCE
) ||
1445 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
);
1447 rl
= zfs_range_lock(&zv
->zv_znode
, uio
->uio_loffset
, uio
->uio_resid
,
1449 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
1450 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
1451 uint64_t off
= uio
->uio_loffset
;
1452 dmu_tx_t
*tx
= dmu_tx_create(zv
->zv_objset
);
1454 if (bytes
> volsize
- off
) /* don't write past the end */
1455 bytes
= volsize
- off
;
1457 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
1458 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1463 error
= dmu_write_uio_dbuf(zv
->zv_dbuf
, uio
, bytes
, tx
);
1465 zvol_log_write(zv
, tx
, off
, bytes
, sync
);
1471 zfs_range_unlock(rl
);
1473 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1478 zvol_getefi(void *arg
, int flag
, uint64_t vs
, uint8_t bs
)
1480 struct uuid uuid
= EFI_RESERVED
;
1481 efi_gpe_t gpe
= { 0 };
1487 if (ddi_copyin(arg
, &efi
, sizeof (dk_efi_t
), flag
))
1488 return (SET_ERROR(EFAULT
));
1489 ptr
= (char *)(uintptr_t)efi
.dki_data_64
;
1490 length
= efi
.dki_length
;
1492 * Some clients may attempt to request a PMBR for the
1493 * zvol. Currently this interface will return EINVAL to
1494 * such requests. These requests could be supported by
1495 * adding a check for lba == 0 and consing up an appropriate
1498 if (efi
.dki_lba
< 1 || efi
.dki_lba
> 2 || length
<= 0)
1499 return (SET_ERROR(EINVAL
));
1501 gpe
.efi_gpe_StartingLBA
= LE_64(34ULL);
1502 gpe
.efi_gpe_EndingLBA
= LE_64((vs
>> bs
) - 1);
1503 UUID_LE_CONVERT(gpe
.efi_gpe_PartitionTypeGUID
, uuid
);
1505 if (efi
.dki_lba
== 1) {
1506 efi_gpt_t gpt
= { 0 };
1508 gpt
.efi_gpt_Signature
= LE_64(EFI_SIGNATURE
);
1509 gpt
.efi_gpt_Revision
= LE_32(EFI_VERSION_CURRENT
);
1510 gpt
.efi_gpt_HeaderSize
= LE_32(sizeof (gpt
));
1511 gpt
.efi_gpt_MyLBA
= LE_64(1ULL);
1512 gpt
.efi_gpt_FirstUsableLBA
= LE_64(34ULL);
1513 gpt
.efi_gpt_LastUsableLBA
= LE_64((vs
>> bs
) - 1);
1514 gpt
.efi_gpt_PartitionEntryLBA
= LE_64(2ULL);
1515 gpt
.efi_gpt_NumberOfPartitionEntries
= LE_32(1);
1516 gpt
.efi_gpt_SizeOfPartitionEntry
=
1517 LE_32(sizeof (efi_gpe_t
));
1518 CRC32(crc
, &gpe
, sizeof (gpe
), -1U, crc32_table
);
1519 gpt
.efi_gpt_PartitionEntryArrayCRC32
= LE_32(~crc
);
1520 CRC32(crc
, &gpt
, sizeof (gpt
), -1U, crc32_table
);
1521 gpt
.efi_gpt_HeaderCRC32
= LE_32(~crc
);
1522 if (ddi_copyout(&gpt
, ptr
, MIN(sizeof (gpt
), length
),
1524 return (SET_ERROR(EFAULT
));
1525 ptr
+= sizeof (gpt
);
1526 length
-= sizeof (gpt
);
1528 if (length
> 0 && ddi_copyout(&gpe
, ptr
, MIN(sizeof (gpe
),
1530 return (SET_ERROR(EFAULT
));
1535 * BEGIN entry points to allow external callers access to the volume.
1538 * Return the volume parameters needed for access from an external caller.
1539 * These values are invariant as long as the volume is held open.
1542 zvol_get_volume_params(minor_t minor
, uint64_t *blksize
,
1543 uint64_t *max_xfer_len
, void **minor_hdl
, void **objset_hdl
, void **zil_hdl
,
1544 void **rl_hdl
, void **bonus_hdl
)
1548 zv
= zfsdev_get_soft_state(minor
, ZSST_ZVOL
);
1550 return (SET_ERROR(ENXIO
));
1551 if (zv
->zv_flags
& ZVOL_DUMPIFIED
)
1552 return (SET_ERROR(ENXIO
));
1554 ASSERT(blksize
&& max_xfer_len
&& minor_hdl
&&
1555 objset_hdl
&& zil_hdl
&& rl_hdl
&& bonus_hdl
);
1557 *blksize
= zv
->zv_volblocksize
;
1558 *max_xfer_len
= (uint64_t)zvol_maxphys
;
1560 *objset_hdl
= zv
->zv_objset
;
1561 *zil_hdl
= zv
->zv_zilog
;
1562 *rl_hdl
= &zv
->zv_znode
;
1563 *bonus_hdl
= zv
->zv_dbuf
;
1568 * Return the current volume size to an external caller.
1569 * The size can change while the volume is open.
1572 zvol_get_volume_size(void *minor_hdl
)
1574 zvol_state_t
*zv
= minor_hdl
;
1576 return (zv
->zv_volsize
);
1580 * Return the current WCE setting to an external caller.
1581 * The WCE setting can change while the volume is open.
1584 zvol_get_volume_wce(void *minor_hdl
)
1586 zvol_state_t
*zv
= minor_hdl
;
1588 return ((zv
->zv_flags
& ZVOL_WCE
) ? 1 : 0);
1592 * Entry point for external callers to zvol_log_write
1595 zvol_log_write_minor(void *minor_hdl
, dmu_tx_t
*tx
, offset_t off
, ssize_t resid
,
1598 zvol_state_t
*zv
= minor_hdl
;
1600 zvol_log_write(zv
, tx
, off
, resid
, sync
);
1603 * END entry points to allow external callers access to the volume.
1607 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1610 zvol_log_truncate(zvol_state_t
*zv
, dmu_tx_t
*tx
, uint64_t off
, uint64_t len
,
1615 zilog_t
*zilog
= zv
->zv_zilog
;
1617 if (zil_replaying(zilog
, tx
))
1620 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
1621 lr
= (lr_truncate_t
*)&itx
->itx_lr
;
1622 lr
->lr_foid
= ZVOL_OBJ
;
1623 lr
->lr_offset
= off
;
1624 lr
->lr_length
= len
;
1626 itx
->itx_sync
= sync
;
1627 zil_itx_assign(zilog
, itx
, tx
);
1631 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1632 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1636 zvol_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
, int *rvalp
)
1639 struct dk_callback
*dkc
;
1643 mutex_enter(&zfsdev_state_lock
);
1645 zv
= zfsdev_get_soft_state(getminor(dev
), ZSST_ZVOL
);
1648 mutex_exit(&zfsdev_state_lock
);
1649 return (SET_ERROR(ENXIO
));
1651 ASSERT(zv
->zv_total_opens
> 0);
1657 struct dk_cinfo dki
;
1659 bzero(&dki
, sizeof (dki
));
1660 (void) strcpy(dki
.dki_cname
, "zvol");
1661 (void) strcpy(dki
.dki_dname
, "zvol");
1662 dki
.dki_ctype
= DKC_UNKNOWN
;
1663 dki
.dki_unit
= getminor(dev
);
1664 dki
.dki_maxtransfer
=
1665 1 << (SPA_OLD_MAXBLOCKSHIFT
- zv
->zv_min_bs
);
1666 mutex_exit(&zfsdev_state_lock
);
1667 if (ddi_copyout(&dki
, (void *)arg
, sizeof (dki
), flag
))
1668 error
= SET_ERROR(EFAULT
);
1672 case DKIOCGMEDIAINFO
:
1674 struct dk_minfo dkm
;
1676 bzero(&dkm
, sizeof (dkm
));
1677 dkm
.dki_lbsize
= 1U << zv
->zv_min_bs
;
1678 dkm
.dki_capacity
= zv
->zv_volsize
>> zv
->zv_min_bs
;
1679 dkm
.dki_media_type
= DK_UNKNOWN
;
1680 mutex_exit(&zfsdev_state_lock
);
1681 if (ddi_copyout(&dkm
, (void *)arg
, sizeof (dkm
), flag
))
1682 error
= SET_ERROR(EFAULT
);
1686 case DKIOCGMEDIAINFOEXT
:
1688 struct dk_minfo_ext dkmext
;
1690 bzero(&dkmext
, sizeof (dkmext
));
1691 dkmext
.dki_lbsize
= 1U << zv
->zv_min_bs
;
1692 dkmext
.dki_pbsize
= zv
->zv_volblocksize
;
1693 dkmext
.dki_capacity
= zv
->zv_volsize
>> zv
->zv_min_bs
;
1694 dkmext
.dki_media_type
= DK_UNKNOWN
;
1695 mutex_exit(&zfsdev_state_lock
);
1696 if (ddi_copyout(&dkmext
, (void *)arg
, sizeof (dkmext
), flag
))
1697 error
= SET_ERROR(EFAULT
);
1703 uint64_t vs
= zv
->zv_volsize
;
1704 uint8_t bs
= zv
->zv_min_bs
;
1706 mutex_exit(&zfsdev_state_lock
);
1707 error
= zvol_getefi((void *)arg
, flag
, vs
, bs
);
1711 case DKIOCFLUSHWRITECACHE
:
1712 dkc
= (struct dk_callback
*)arg
;
1713 mutex_exit(&zfsdev_state_lock
);
1714 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1715 if ((flag
& FKIOCTL
) && dkc
!= NULL
&& dkc
->dkc_callback
) {
1716 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, error
);
1723 int wce
= (zv
->zv_flags
& ZVOL_WCE
) ? 1 : 0;
1724 if (ddi_copyout(&wce
, (void *)arg
, sizeof (int),
1726 error
= SET_ERROR(EFAULT
);
1732 if (ddi_copyin((void *)arg
, &wce
, sizeof (int),
1734 error
= SET_ERROR(EFAULT
);
1738 zv
->zv_flags
|= ZVOL_WCE
;
1739 mutex_exit(&zfsdev_state_lock
);
1741 zv
->zv_flags
&= ~ZVOL_WCE
;
1742 mutex_exit(&zfsdev_state_lock
);
1743 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1751 * commands using these (like prtvtoc) expect ENOTSUP
1752 * since we're emulating an EFI label
1754 error
= SET_ERROR(ENOTSUP
);
1758 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1760 error
= zvol_dumpify(zv
);
1761 zfs_range_unlock(rl
);
1765 if (!(zv
->zv_flags
& ZVOL_DUMPIFIED
))
1767 rl
= zfs_range_lock(&zv
->zv_znode
, 0, zv
->zv_volsize
,
1769 error
= zvol_dump_fini(zv
);
1770 zfs_range_unlock(rl
);
1778 if (!zvol_unmap_enabled
)
1781 if (ddi_copyin((void *)arg
, &df
, sizeof (df
), flag
)) {
1782 error
= SET_ERROR(EFAULT
);
1787 * Apply Postel's Law to length-checking. If they overshoot,
1788 * just blank out until the end, if there's a need to blank
1791 if (df
.df_start
>= zv
->zv_volsize
)
1792 break; /* No need to do anything... */
1794 mutex_exit(&zfsdev_state_lock
);
1796 rl
= zfs_range_lock(&zv
->zv_znode
, df
.df_start
, df
.df_length
,
1798 tx
= dmu_tx_create(zv
->zv_objset
);
1799 dmu_tx_mark_netfree(tx
);
1800 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1804 zvol_log_truncate(zv
, tx
, df
.df_start
,
1805 df
.df_length
, B_TRUE
);
1807 error
= dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
,
1808 df
.df_start
, df
.df_length
);
1811 zfs_range_unlock(rl
);
1815 * If the write-cache is disabled or 'sync' property
1816 * is set to 'always' then treat this as a synchronous
1817 * operation (i.e. commit to zil).
1819 if (!(zv
->zv_flags
& ZVOL_WCE
) ||
1820 (zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
))
1821 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1824 * If the caller really wants synchronous writes, and
1825 * can't wait for them, don't return until the write
1828 if (df
.df_flags
& DF_WAIT_SYNC
) {
1830 dmu_objset_pool(zv
->zv_objset
), 0);
1837 error
= SET_ERROR(ENOTTY
);
1841 mutex_exit(&zfsdev_state_lock
);
1848 return (zvol_minors
!= 0);
1854 VERIFY(ddi_soft_state_init(&zfsdev_state
, sizeof (zfs_soft_state_t
),
1856 mutex_init(&zfsdev_state_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1862 mutex_destroy(&zfsdev_state_lock
);
1863 ddi_soft_state_fini(&zfsdev_state
);
1868 zfs_mvdev_dump_feature_check(void *arg
, dmu_tx_t
*tx
)
1870 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1872 if (spa_feature_is_active(spa
, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
))
1879 zfs_mvdev_dump_activate_feature_sync(void *arg
, dmu_tx_t
*tx
)
1881 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1883 spa_feature_incr(spa
, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
, tx
);
1887 zvol_dump_init(zvol_state_t
*zv
, boolean_t resize
)
1891 objset_t
*os
= zv
->zv_objset
;
1892 spa_t
*spa
= dmu_objset_spa(os
);
1893 vdev_t
*vd
= spa
->spa_root_vdev
;
1894 nvlist_t
*nv
= NULL
;
1895 uint64_t version
= spa_version(spa
);
1896 enum zio_checksum checksum
;
1898 ASSERT(MUTEX_HELD(&zfsdev_state_lock
));
1899 ASSERT(vd
->vdev_ops
== &vdev_root_ops
);
1901 error
= dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, 0,
1903 /* wait for dmu_free_long_range to actually free the blocks */
1904 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
1907 * If the pool on which the dump device is being initialized has more
1908 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1909 * enabled. If so, bump that feature's counter to indicate that the
1910 * feature is active. We also check the vdev type to handle the
1912 * # zpool create test raidz disk1 disk2 disk3
1913 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1914 * the raidz vdev itself has 3 children.
1916 if (vd
->vdev_children
> 1 || vd
->vdev_ops
== &vdev_raidz_ops
) {
1917 if (!spa_feature_is_enabled(spa
,
1918 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
))
1919 return (SET_ERROR(ENOTSUP
));
1920 (void) dsl_sync_task(spa_name(spa
),
1921 zfs_mvdev_dump_feature_check
,
1922 zfs_mvdev_dump_activate_feature_sync
, NULL
,
1923 2, ZFS_SPACE_CHECK_RESERVED
);
1926 tx
= dmu_tx_create(os
);
1927 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
1928 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
1929 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1936 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
1937 * function. Otherwise, use the old default -- OFF.
1939 checksum
= spa_feature_is_active(spa
,
1940 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP
) ? ZIO_CHECKSUM_NOPARITY
:
1944 * If we are resizing the dump device then we only need to
1945 * update the refreservation to match the newly updated
1946 * zvolsize. Otherwise, we save off the original state of the
1947 * zvol so that we can restore them if the zvol is ever undumpified.
1950 error
= zap_update(os
, ZVOL_ZAP_OBJ
,
1951 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1952 &zv
->zv_volsize
, tx
);
1954 uint64_t checksum
, compress
, refresrv
, vbs
, dedup
;
1956 error
= dsl_prop_get_integer(zv
->zv_name
,
1957 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), &compress
, NULL
);
1958 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1959 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), &checksum
, NULL
);
1960 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1961 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &refresrv
, NULL
);
1962 error
= error
? error
: dsl_prop_get_integer(zv
->zv_name
,
1963 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &vbs
, NULL
);
1964 if (version
>= SPA_VERSION_DEDUP
) {
1965 error
= error
? error
:
1966 dsl_prop_get_integer(zv
->zv_name
,
1967 zfs_prop_to_name(ZFS_PROP_DEDUP
), &dedup
, NULL
);
1970 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1971 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1,
1973 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1974 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
, tx
);
1975 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1976 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1,
1978 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1979 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1,
1981 error
= error
? error
: dmu_object_set_blocksize(
1982 os
, ZVOL_OBJ
, SPA_OLD_MAXBLOCKSIZE
, 0, tx
);
1983 if (version
>= SPA_VERSION_DEDUP
) {
1984 error
= error
? error
: zap_update(os
, ZVOL_ZAP_OBJ
,
1985 zfs_prop_to_name(ZFS_PROP_DEDUP
), 8, 1,
1989 zv
->zv_volblocksize
= SPA_OLD_MAXBLOCKSIZE
;
1994 * We only need update the zvol's property if we are initializing
1995 * the dump area for the first time.
1998 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1999 VERIFY(nvlist_add_uint64(nv
,
2000 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 0) == 0);
2001 VERIFY(nvlist_add_uint64(nv
,
2002 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
2003 ZIO_COMPRESS_OFF
) == 0);
2004 VERIFY(nvlist_add_uint64(nv
,
2005 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
2007 if (version
>= SPA_VERSION_DEDUP
) {
2008 VERIFY(nvlist_add_uint64(nv
,
2009 zfs_prop_to_name(ZFS_PROP_DEDUP
),
2010 ZIO_CHECKSUM_OFF
) == 0);
2013 error
= zfs_set_prop_nvlist(zv
->zv_name
, ZPROP_SRC_LOCAL
,
2021 /* Allocate the space for the dump */
2022 error
= zvol_prealloc(zv
);
2027 zvol_dumpify(zvol_state_t
*zv
)
2030 uint64_t dumpsize
= 0;
2032 objset_t
*os
= zv
->zv_objset
;
2034 if (zv
->zv_flags
& ZVOL_RDONLY
)
2035 return (SET_ERROR(EROFS
));
2037 if (zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
,
2038 8, 1, &dumpsize
) != 0 || dumpsize
!= zv
->zv_volsize
) {
2039 boolean_t resize
= (dumpsize
> 0);
2041 if ((error
= zvol_dump_init(zv
, resize
)) != 0) {
2042 (void) zvol_dump_fini(zv
);
2048 * Build up our lba mapping.
2050 error
= zvol_get_lbas(zv
);
2052 (void) zvol_dump_fini(zv
);
2056 tx
= dmu_tx_create(os
);
2057 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
2058 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2061 (void) zvol_dump_fini(zv
);
2065 zv
->zv_flags
|= ZVOL_DUMPIFIED
;
2066 error
= zap_update(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, 8, 1,
2067 &zv
->zv_volsize
, tx
);
2071 (void) zvol_dump_fini(zv
);
2075 txg_wait_synced(dmu_objset_pool(os
), 0);
2080 zvol_dump_fini(zvol_state_t
*zv
)
2083 objset_t
*os
= zv
->zv_objset
;
2086 uint64_t checksum
, compress
, refresrv
, vbs
, dedup
;
2087 uint64_t version
= spa_version(dmu_objset_spa(zv
->zv_objset
));
2090 * Attempt to restore the zvol back to its pre-dumpified state.
2091 * This is a best-effort attempt as it's possible that not all
2092 * of these properties were initialized during the dumpify process
2093 * (i.e. error during zvol_dump_init).
2096 tx
= dmu_tx_create(os
);
2097 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
2098 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2103 (void) zap_remove(os
, ZVOL_ZAP_OBJ
, ZVOL_DUMPSIZE
, tx
);
2106 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2107 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), 8, 1, &checksum
);
2108 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2109 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), 8, 1, &compress
);
2110 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2111 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), 8, 1, &refresrv
);
2112 (void) zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2113 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), 8, 1, &vbs
);
2115 VERIFY(nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
2116 (void) nvlist_add_uint64(nv
,
2117 zfs_prop_to_name(ZFS_PROP_CHECKSUM
), checksum
);
2118 (void) nvlist_add_uint64(nv
,
2119 zfs_prop_to_name(ZFS_PROP_COMPRESSION
), compress
);
2120 (void) nvlist_add_uint64(nv
,
2121 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), refresrv
);
2122 if (version
>= SPA_VERSION_DEDUP
&&
2123 zap_lookup(zv
->zv_objset
, ZVOL_ZAP_OBJ
,
2124 zfs_prop_to_name(ZFS_PROP_DEDUP
), 8, 1, &dedup
) == 0) {
2125 (void) nvlist_add_uint64(nv
,
2126 zfs_prop_to_name(ZFS_PROP_DEDUP
), dedup
);
2128 (void) zfs_set_prop_nvlist(zv
->zv_name
, ZPROP_SRC_LOCAL
,
2132 zvol_free_extents(zv
);
2133 zv
->zv_flags
&= ~ZVOL_DUMPIFIED
;
2134 (void) dmu_free_long_range(os
, ZVOL_OBJ
, 0, DMU_OBJECT_END
);
2135 /* wait for dmu_free_long_range to actually free the blocks */
2136 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
2137 tx
= dmu_tx_create(os
);
2138 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
2139 error
= dmu_tx_assign(tx
, TXG_WAIT
);
2144 if (dmu_object_set_blocksize(os
, ZVOL_OBJ
, vbs
, 0, tx
) == 0)
2145 zv
->zv_volblocksize
= vbs
;