1949 crash during reguid causes stale config
[unleashed.git] / usr / src / uts / common / fs / zfs / zvol.c
bloba59c2a9772870b7a0ee097cf6e8dd4187e6d67e3
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2010 Robert Milkowski
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 * ZFS volume emulation driver.
32 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
33 * Volumes are accessed through the symbolic links named:
35 * /dev/zvol/dsk/<pool_name>/<dataset_name>
36 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
38 * These links are created by the /dev filesystem (sdev_zvolops.c).
39 * Volumes are persistent through reboot. No user command needs to be
40 * run before opening and using a device.
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/errno.h>
46 #include <sys/uio.h>
47 #include <sys/buf.h>
48 #include <sys/modctl.h>
49 #include <sys/open.h>
50 #include <sys/kmem.h>
51 #include <sys/conf.h>
52 #include <sys/cmn_err.h>
53 #include <sys/stat.h>
54 #include <sys/zap.h>
55 #include <sys/spa.h>
56 #include <sys/zio.h>
57 #include <sys/dmu_traverse.h>
58 #include <sys/dnode.h>
59 #include <sys/dsl_dataset.h>
60 #include <sys/dsl_prop.h>
61 #include <sys/dkio.h>
62 #include <sys/efi_partition.h>
63 #include <sys/byteorder.h>
64 #include <sys/pathname.h>
65 #include <sys/ddi.h>
66 #include <sys/sunddi.h>
67 #include <sys/crc32.h>
68 #include <sys/dirent.h>
69 #include <sys/policy.h>
70 #include <sys/fs/zfs.h>
71 #include <sys/zfs_ioctl.h>
72 #include <sys/mkdev.h>
73 #include <sys/zil.h>
74 #include <sys/refcount.h>
75 #include <sys/zfs_znode.h>
76 #include <sys/zfs_rlock.h>
77 #include <sys/vdev_disk.h>
78 #include <sys/vdev_impl.h>
79 #include <sys/zvol.h>
80 #include <sys/dumphdr.h>
81 #include <sys/zil_impl.h>
83 #include "zfs_namecheck.h"
85 void *zfsdev_state;
86 static char *zvol_tag = "zvol_tag";
88 #define ZVOL_DUMPSIZE "dumpsize"
91 * This lock protects the zfsdev_state structure from being modified
92 * while it's being used, e.g. an open that comes in before a create
93 * finishes. It also protects temporary opens of the dataset so that,
94 * e.g., an open doesn't get a spurious EBUSY.
96 kmutex_t zfsdev_state_lock;
97 static uint32_t zvol_minors;
99 typedef struct zvol_extent {
100 list_node_t ze_node;
101 dva_t ze_dva; /* dva associated with this extent */
102 uint64_t ze_nblks; /* number of blocks in extent */
103 } zvol_extent_t;
106 * The in-core state of each volume.
108 typedef struct zvol_state {
109 char zv_name[MAXPATHLEN]; /* pool/dd name */
110 uint64_t zv_volsize; /* amount of space we advertise */
111 uint64_t zv_volblocksize; /* volume block size */
112 minor_t zv_minor; /* minor number */
113 uint8_t zv_min_bs; /* minimum addressable block shift */
114 uint8_t zv_flags; /* readonly, dumpified, etc. */
115 objset_t *zv_objset; /* objset handle */
116 uint32_t zv_open_count[OTYPCNT]; /* open counts */
117 uint32_t zv_total_opens; /* total open count */
118 zilog_t *zv_zilog; /* ZIL handle */
119 list_t zv_extents; /* List of extents for dump */
120 znode_t zv_znode; /* for range locking */
121 dmu_buf_t *zv_dbuf; /* bonus handle */
122 } zvol_state_t;
125 * zvol specific flags
127 #define ZVOL_RDONLY 0x1
128 #define ZVOL_DUMPIFIED 0x2
129 #define ZVOL_EXCL 0x4
130 #define ZVOL_WCE 0x8
133 * zvol maximum transfer in one DMU tx.
135 int zvol_maxphys = DMU_MAX_ACCESS/2;
137 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
138 nvlist_t *, nvlist_t **);
139 static int zvol_remove_zv(zvol_state_t *);
140 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
141 static int zvol_dumpify(zvol_state_t *zv);
142 static int zvol_dump_fini(zvol_state_t *zv);
143 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
145 static void
146 zvol_size_changed(uint64_t volsize, major_t maj, minor_t min)
148 dev_t dev = makedevice(maj, min);
150 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
151 "Size", volsize) == DDI_SUCCESS);
152 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
153 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
155 /* Notify specfs to invalidate the cached size */
156 spec_size_invalidate(dev, VBLK);
157 spec_size_invalidate(dev, VCHR);
161 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
163 if (volsize == 0)
164 return (EINVAL);
166 if (volsize % blocksize != 0)
167 return (EINVAL);
169 #ifdef _ILP32
170 if (volsize - 1 > SPEC_MAXOFFSET_T)
171 return (EOVERFLOW);
172 #endif
173 return (0);
177 zvol_check_volblocksize(uint64_t volblocksize)
179 if (volblocksize < SPA_MINBLOCKSIZE ||
180 volblocksize > SPA_MAXBLOCKSIZE ||
181 !ISP2(volblocksize))
182 return (EDOM);
184 return (0);
188 zvol_get_stats(objset_t *os, nvlist_t *nv)
190 int error;
191 dmu_object_info_t doi;
192 uint64_t val;
194 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
195 if (error)
196 return (error);
198 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
200 error = dmu_object_info(os, ZVOL_OBJ, &doi);
202 if (error == 0) {
203 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
204 doi.doi_data_block_size);
207 return (error);
210 static zvol_state_t *
211 zvol_minor_lookup(const char *name)
213 minor_t minor;
214 zvol_state_t *zv;
216 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
218 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
219 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
220 if (zv == NULL)
221 continue;
222 if (strcmp(zv->zv_name, name) == 0)
223 return (zv);
226 return (NULL);
229 /* extent mapping arg */
230 struct maparg {
231 zvol_state_t *ma_zv;
232 uint64_t ma_blks;
235 /*ARGSUSED*/
236 static int
237 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
238 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
240 struct maparg *ma = arg;
241 zvol_extent_t *ze;
242 int bs = ma->ma_zv->zv_volblocksize;
244 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
245 return (0);
247 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
248 ma->ma_blks++;
250 /* Abort immediately if we have encountered gang blocks */
251 if (BP_IS_GANG(bp))
252 return (EFRAGS);
255 * See if the block is at the end of the previous extent.
257 ze = list_tail(&ma->ma_zv->zv_extents);
258 if (ze &&
259 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
260 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
261 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
262 ze->ze_nblks++;
263 return (0);
266 dprintf_bp(bp, "%s", "next blkptr:");
268 /* start a new extent */
269 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
270 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
271 ze->ze_nblks = 1;
272 list_insert_tail(&ma->ma_zv->zv_extents, ze);
273 return (0);
276 static void
277 zvol_free_extents(zvol_state_t *zv)
279 zvol_extent_t *ze;
281 while (ze = list_head(&zv->zv_extents)) {
282 list_remove(&zv->zv_extents, ze);
283 kmem_free(ze, sizeof (zvol_extent_t));
287 static int
288 zvol_get_lbas(zvol_state_t *zv)
290 objset_t *os = zv->zv_objset;
291 struct maparg ma;
292 int err;
294 ma.ma_zv = zv;
295 ma.ma_blks = 0;
296 zvol_free_extents(zv);
298 /* commit any in-flight changes before traversing the dataset */
299 txg_wait_synced(dmu_objset_pool(os), 0);
300 err = traverse_dataset(dmu_objset_ds(os), 0,
301 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
302 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
303 zvol_free_extents(zv);
304 return (err ? err : EIO);
307 return (0);
310 /* ARGSUSED */
311 void
312 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
314 zfs_creat_t *zct = arg;
315 nvlist_t *nvprops = zct->zct_props;
316 int error;
317 uint64_t volblocksize, volsize;
319 VERIFY(nvlist_lookup_uint64(nvprops,
320 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
321 if (nvlist_lookup_uint64(nvprops,
322 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
323 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
326 * These properties must be removed from the list so the generic
327 * property setting step won't apply to them.
329 VERIFY(nvlist_remove_all(nvprops,
330 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
331 (void) nvlist_remove_all(nvprops,
332 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
334 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
335 DMU_OT_NONE, 0, tx);
336 ASSERT(error == 0);
338 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
339 DMU_OT_NONE, 0, tx);
340 ASSERT(error == 0);
342 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
343 ASSERT(error == 0);
347 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
348 * implement DKIOCFREE/free-long-range.
350 static int
351 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
353 uint64_t offset, length;
355 if (byteswap)
356 byteswap_uint64_array(lr, sizeof (*lr));
358 offset = lr->lr_offset;
359 length = lr->lr_length;
361 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
365 * Replay a TX_WRITE ZIL transaction that didn't get committed
366 * after a system failure
368 static int
369 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
371 objset_t *os = zv->zv_objset;
372 char *data = (char *)(lr + 1); /* data follows lr_write_t */
373 uint64_t offset, length;
374 dmu_tx_t *tx;
375 int error;
377 if (byteswap)
378 byteswap_uint64_array(lr, sizeof (*lr));
380 offset = lr->lr_offset;
381 length = lr->lr_length;
383 /* If it's a dmu_sync() block, write the whole block */
384 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
385 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
386 if (length < blocksize) {
387 offset -= offset % blocksize;
388 length = blocksize;
392 tx = dmu_tx_create(os);
393 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
394 error = dmu_tx_assign(tx, TXG_WAIT);
395 if (error) {
396 dmu_tx_abort(tx);
397 } else {
398 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
399 dmu_tx_commit(tx);
402 return (error);
405 /* ARGSUSED */
406 static int
407 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
409 return (ENOTSUP);
413 * Callback vectors for replaying records.
414 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
416 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
417 zvol_replay_err, /* 0 no such transaction type */
418 zvol_replay_err, /* TX_CREATE */
419 zvol_replay_err, /* TX_MKDIR */
420 zvol_replay_err, /* TX_MKXATTR */
421 zvol_replay_err, /* TX_SYMLINK */
422 zvol_replay_err, /* TX_REMOVE */
423 zvol_replay_err, /* TX_RMDIR */
424 zvol_replay_err, /* TX_LINK */
425 zvol_replay_err, /* TX_RENAME */
426 zvol_replay_write, /* TX_WRITE */
427 zvol_replay_truncate, /* TX_TRUNCATE */
428 zvol_replay_err, /* TX_SETATTR */
429 zvol_replay_err, /* TX_ACL */
430 zvol_replay_err, /* TX_CREATE_ACL */
431 zvol_replay_err, /* TX_CREATE_ATTR */
432 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
433 zvol_replay_err, /* TX_MKDIR_ACL */
434 zvol_replay_err, /* TX_MKDIR_ATTR */
435 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
436 zvol_replay_err, /* TX_WRITE2 */
440 zvol_name2minor(const char *name, minor_t *minor)
442 zvol_state_t *zv;
444 mutex_enter(&zfsdev_state_lock);
445 zv = zvol_minor_lookup(name);
446 if (minor && zv)
447 *minor = zv->zv_minor;
448 mutex_exit(&zfsdev_state_lock);
449 return (zv ? 0 : -1);
453 * Create a minor node (plus a whole lot more) for the specified volume.
456 zvol_create_minor(const char *name)
458 zfs_soft_state_t *zs;
459 zvol_state_t *zv;
460 objset_t *os;
461 dmu_object_info_t doi;
462 minor_t minor = 0;
463 char chrbuf[30], blkbuf[30];
464 int error;
466 mutex_enter(&zfsdev_state_lock);
468 if (zvol_minor_lookup(name) != NULL) {
469 mutex_exit(&zfsdev_state_lock);
470 return (EEXIST);
473 /* lie and say we're read-only */
474 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
476 if (error) {
477 mutex_exit(&zfsdev_state_lock);
478 return (error);
481 if ((minor = zfsdev_minor_alloc()) == 0) {
482 dmu_objset_disown(os, FTAG);
483 mutex_exit(&zfsdev_state_lock);
484 return (ENXIO);
487 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
488 dmu_objset_disown(os, FTAG);
489 mutex_exit(&zfsdev_state_lock);
490 return (EAGAIN);
492 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
493 (char *)name);
495 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
497 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
498 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
499 ddi_soft_state_free(zfsdev_state, minor);
500 dmu_objset_disown(os, FTAG);
501 mutex_exit(&zfsdev_state_lock);
502 return (EAGAIN);
505 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
507 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
508 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
509 ddi_remove_minor_node(zfs_dip, chrbuf);
510 ddi_soft_state_free(zfsdev_state, minor);
511 dmu_objset_disown(os, FTAG);
512 mutex_exit(&zfsdev_state_lock);
513 return (EAGAIN);
516 zs = ddi_get_soft_state(zfsdev_state, minor);
517 zs->zss_type = ZSST_ZVOL;
518 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
519 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
520 zv->zv_min_bs = DEV_BSHIFT;
521 zv->zv_minor = minor;
522 zv->zv_objset = os;
523 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
524 zv->zv_flags |= ZVOL_RDONLY;
525 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
526 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
527 sizeof (rl_t), offsetof(rl_t, r_node));
528 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
529 offsetof(zvol_extent_t, ze_node));
530 /* get and cache the blocksize */
531 error = dmu_object_info(os, ZVOL_OBJ, &doi);
532 ASSERT(error == 0);
533 zv->zv_volblocksize = doi.doi_data_block_size;
535 if (spa_writeable(dmu_objset_spa(os))) {
536 if (zil_replay_disable)
537 zil_destroy(dmu_objset_zil(os), B_FALSE);
538 else
539 zil_replay(os, zv, zvol_replay_vector);
541 dmu_objset_disown(os, FTAG);
542 zv->zv_objset = NULL;
544 zvol_minors++;
546 mutex_exit(&zfsdev_state_lock);
548 return (0);
552 * Remove minor node for the specified volume.
554 static int
555 zvol_remove_zv(zvol_state_t *zv)
557 char nmbuf[20];
558 minor_t minor = zv->zv_minor;
560 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
561 if (zv->zv_total_opens != 0)
562 return (EBUSY);
564 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
565 ddi_remove_minor_node(zfs_dip, nmbuf);
567 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
568 ddi_remove_minor_node(zfs_dip, nmbuf);
570 avl_destroy(&zv->zv_znode.z_range_avl);
571 mutex_destroy(&zv->zv_znode.z_range_lock);
573 kmem_free(zv, sizeof (zvol_state_t));
575 ddi_soft_state_free(zfsdev_state, minor);
577 zvol_minors--;
578 return (0);
582 zvol_remove_minor(const char *name)
584 zvol_state_t *zv;
585 int rc;
587 mutex_enter(&zfsdev_state_lock);
588 if ((zv = zvol_minor_lookup(name)) == NULL) {
589 mutex_exit(&zfsdev_state_lock);
590 return (ENXIO);
592 rc = zvol_remove_zv(zv);
593 mutex_exit(&zfsdev_state_lock);
594 return (rc);
598 zvol_first_open(zvol_state_t *zv)
600 objset_t *os;
601 uint64_t volsize;
602 int error;
603 uint64_t readonly;
605 /* lie and say we're read-only */
606 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
607 zvol_tag, &os);
608 if (error)
609 return (error);
611 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
612 if (error) {
613 ASSERT(error == 0);
614 dmu_objset_disown(os, zvol_tag);
615 return (error);
617 zv->zv_objset = os;
618 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
619 if (error) {
620 dmu_objset_disown(os, zvol_tag);
621 return (error);
623 zv->zv_volsize = volsize;
624 zv->zv_zilog = zil_open(os, zvol_get_data);
625 zvol_size_changed(zv->zv_volsize, ddi_driver_major(zfs_dip),
626 zv->zv_minor);
628 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
629 NULL) == 0);
630 if (readonly || dmu_objset_is_snapshot(os) ||
631 !spa_writeable(dmu_objset_spa(os)))
632 zv->zv_flags |= ZVOL_RDONLY;
633 else
634 zv->zv_flags &= ~ZVOL_RDONLY;
635 return (error);
638 void
639 zvol_last_close(zvol_state_t *zv)
641 zil_close(zv->zv_zilog);
642 zv->zv_zilog = NULL;
643 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
644 zv->zv_dbuf = NULL;
645 dmu_objset_disown(zv->zv_objset, zvol_tag);
646 zv->zv_objset = NULL;
650 zvol_prealloc(zvol_state_t *zv)
652 objset_t *os = zv->zv_objset;
653 dmu_tx_t *tx;
654 uint64_t refd, avail, usedobjs, availobjs;
655 uint64_t resid = zv->zv_volsize;
656 uint64_t off = 0;
658 /* Check the space usage before attempting to allocate the space */
659 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
660 if (avail < zv->zv_volsize)
661 return (ENOSPC);
663 /* Free old extents if they exist */
664 zvol_free_extents(zv);
666 while (resid != 0) {
667 int error;
668 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
670 tx = dmu_tx_create(os);
671 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
672 error = dmu_tx_assign(tx, TXG_WAIT);
673 if (error) {
674 dmu_tx_abort(tx);
675 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
676 return (error);
678 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
679 dmu_tx_commit(tx);
680 off += bytes;
681 resid -= bytes;
683 txg_wait_synced(dmu_objset_pool(os), 0);
685 return (0);
689 zvol_update_volsize(objset_t *os, uint64_t volsize)
691 dmu_tx_t *tx;
692 int error;
694 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
696 tx = dmu_tx_create(os);
697 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
698 error = dmu_tx_assign(tx, TXG_WAIT);
699 if (error) {
700 dmu_tx_abort(tx);
701 return (error);
704 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
705 &volsize, tx);
706 dmu_tx_commit(tx);
708 if (error == 0)
709 error = dmu_free_long_range(os,
710 ZVOL_OBJ, volsize, DMU_OBJECT_END);
711 return (error);
714 void
715 zvol_remove_minors(const char *name)
717 zvol_state_t *zv;
718 char *namebuf;
719 minor_t minor;
721 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
722 (void) strncpy(namebuf, name, strlen(name));
723 (void) strcat(namebuf, "/");
724 mutex_enter(&zfsdev_state_lock);
725 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
727 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
728 if (zv == NULL)
729 continue;
730 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
731 (void) zvol_remove_zv(zv);
733 kmem_free(namebuf, strlen(name) + 2);
735 mutex_exit(&zfsdev_state_lock);
739 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
741 zvol_state_t *zv = NULL;
742 objset_t *os;
743 int error;
744 dmu_object_info_t doi;
745 uint64_t old_volsize = 0ULL;
746 uint64_t readonly;
748 mutex_enter(&zfsdev_state_lock);
749 zv = zvol_minor_lookup(name);
750 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
751 mutex_exit(&zfsdev_state_lock);
752 return (error);
755 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
756 (error = zvol_check_volsize(volsize,
757 doi.doi_data_block_size)) != 0)
758 goto out;
760 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
761 NULL) == 0);
762 if (readonly) {
763 error = EROFS;
764 goto out;
767 error = zvol_update_volsize(os, volsize);
769 * Reinitialize the dump area to the new size. If we
770 * failed to resize the dump area then restore it back to
771 * its original size.
773 if (zv && error == 0) {
774 if (zv->zv_flags & ZVOL_DUMPIFIED) {
775 old_volsize = zv->zv_volsize;
776 zv->zv_volsize = volsize;
777 if ((error = zvol_dumpify(zv)) != 0 ||
778 (error = dumpvp_resize()) != 0) {
779 (void) zvol_update_volsize(os, old_volsize);
780 zv->zv_volsize = old_volsize;
781 error = zvol_dumpify(zv);
784 if (error == 0) {
785 zv->zv_volsize = volsize;
786 zvol_size_changed(volsize, maj, zv->zv_minor);
791 * Generate a LUN expansion event.
793 if (zv && error == 0) {
794 sysevent_id_t eid;
795 nvlist_t *attr;
796 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
798 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
799 zv->zv_minor);
801 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
802 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
804 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
805 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
807 nvlist_free(attr);
808 kmem_free(physpath, MAXPATHLEN);
811 out:
812 dmu_objset_rele(os, FTAG);
814 mutex_exit(&zfsdev_state_lock);
816 return (error);
819 /*ARGSUSED*/
821 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
823 zvol_state_t *zv;
824 int err = 0;
826 mutex_enter(&zfsdev_state_lock);
828 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
829 if (zv == NULL) {
830 mutex_exit(&zfsdev_state_lock);
831 return (ENXIO);
834 if (zv->zv_total_opens == 0)
835 err = zvol_first_open(zv);
836 if (err) {
837 mutex_exit(&zfsdev_state_lock);
838 return (err);
840 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
841 err = EROFS;
842 goto out;
844 if (zv->zv_flags & ZVOL_EXCL) {
845 err = EBUSY;
846 goto out;
848 if (flag & FEXCL) {
849 if (zv->zv_total_opens != 0) {
850 err = EBUSY;
851 goto out;
853 zv->zv_flags |= ZVOL_EXCL;
856 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
857 zv->zv_open_count[otyp]++;
858 zv->zv_total_opens++;
860 mutex_exit(&zfsdev_state_lock);
862 return (err);
863 out:
864 if (zv->zv_total_opens == 0)
865 zvol_last_close(zv);
866 mutex_exit(&zfsdev_state_lock);
867 return (err);
870 /*ARGSUSED*/
872 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
874 minor_t minor = getminor(dev);
875 zvol_state_t *zv;
876 int error = 0;
878 mutex_enter(&zfsdev_state_lock);
880 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
881 if (zv == NULL) {
882 mutex_exit(&zfsdev_state_lock);
883 return (ENXIO);
886 if (zv->zv_flags & ZVOL_EXCL) {
887 ASSERT(zv->zv_total_opens == 1);
888 zv->zv_flags &= ~ZVOL_EXCL;
892 * If the open count is zero, this is a spurious close.
893 * That indicates a bug in the kernel / DDI framework.
895 ASSERT(zv->zv_open_count[otyp] != 0);
896 ASSERT(zv->zv_total_opens != 0);
899 * You may get multiple opens, but only one close.
901 zv->zv_open_count[otyp]--;
902 zv->zv_total_opens--;
904 if (zv->zv_total_opens == 0)
905 zvol_last_close(zv);
907 mutex_exit(&zfsdev_state_lock);
908 return (error);
911 static void
912 zvol_get_done(zgd_t *zgd, int error)
914 if (zgd->zgd_db)
915 dmu_buf_rele(zgd->zgd_db, zgd);
917 zfs_range_unlock(zgd->zgd_rl);
919 if (error == 0 && zgd->zgd_bp)
920 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
922 kmem_free(zgd, sizeof (zgd_t));
926 * Get data to generate a TX_WRITE intent log record.
928 static int
929 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
931 zvol_state_t *zv = arg;
932 objset_t *os = zv->zv_objset;
933 uint64_t object = ZVOL_OBJ;
934 uint64_t offset = lr->lr_offset;
935 uint64_t size = lr->lr_length; /* length of user data */
936 blkptr_t *bp = &lr->lr_blkptr;
937 dmu_buf_t *db;
938 zgd_t *zgd;
939 int error;
941 ASSERT(zio != NULL);
942 ASSERT(size != 0);
944 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
945 zgd->zgd_zilog = zv->zv_zilog;
946 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
949 * Write records come in two flavors: immediate and indirect.
950 * For small writes it's cheaper to store the data with the
951 * log record (immediate); for large writes it's cheaper to
952 * sync the data and get a pointer to it (indirect) so that
953 * we don't have to write the data twice.
955 if (buf != NULL) { /* immediate write */
956 error = dmu_read(os, object, offset, size, buf,
957 DMU_READ_NO_PREFETCH);
958 } else {
959 size = zv->zv_volblocksize;
960 offset = P2ALIGN(offset, size);
961 error = dmu_buf_hold(os, object, offset, zgd, &db,
962 DMU_READ_NO_PREFETCH);
963 if (error == 0) {
964 zgd->zgd_db = db;
965 zgd->zgd_bp = bp;
967 ASSERT(db->db_offset == offset);
968 ASSERT(db->db_size == size);
970 error = dmu_sync(zio, lr->lr_common.lrc_txg,
971 zvol_get_done, zgd);
973 if (error == 0)
974 return (0);
978 zvol_get_done(zgd, error);
980 return (error);
984 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
986 * We store data in the log buffers if it's small enough.
987 * Otherwise we will later flush the data out via dmu_sync().
989 ssize_t zvol_immediate_write_sz = 32768;
991 static void
992 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
993 boolean_t sync)
995 uint32_t blocksize = zv->zv_volblocksize;
996 zilog_t *zilog = zv->zv_zilog;
997 boolean_t slogging;
998 ssize_t immediate_write_sz;
1000 if (zil_replaying(zilog, tx))
1001 return;
1003 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1004 ? 0 : zvol_immediate_write_sz;
1006 slogging = spa_has_slogs(zilog->zl_spa) &&
1007 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1009 while (resid) {
1010 itx_t *itx;
1011 lr_write_t *lr;
1012 ssize_t len;
1013 itx_wr_state_t write_state;
1016 * Unlike zfs_log_write() we can be called with
1017 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1019 if (blocksize > immediate_write_sz && !slogging &&
1020 resid >= blocksize && off % blocksize == 0) {
1021 write_state = WR_INDIRECT; /* uses dmu_sync */
1022 len = blocksize;
1023 } else if (sync) {
1024 write_state = WR_COPIED;
1025 len = MIN(ZIL_MAX_LOG_DATA, resid);
1026 } else {
1027 write_state = WR_NEED_COPY;
1028 len = MIN(ZIL_MAX_LOG_DATA, resid);
1031 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1032 (write_state == WR_COPIED ? len : 0));
1033 lr = (lr_write_t *)&itx->itx_lr;
1034 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1035 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1036 zil_itx_destroy(itx);
1037 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1038 lr = (lr_write_t *)&itx->itx_lr;
1039 write_state = WR_NEED_COPY;
1042 itx->itx_wr_state = write_state;
1043 if (write_state == WR_NEED_COPY)
1044 itx->itx_sod += len;
1045 lr->lr_foid = ZVOL_OBJ;
1046 lr->lr_offset = off;
1047 lr->lr_length = len;
1048 lr->lr_blkoff = 0;
1049 BP_ZERO(&lr->lr_blkptr);
1051 itx->itx_private = zv;
1052 itx->itx_sync = sync;
1054 zil_itx_assign(zilog, itx, tx);
1056 off += len;
1057 resid -= len;
1061 static int
1062 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
1063 boolean_t doread, boolean_t isdump)
1065 vdev_disk_t *dvd;
1066 int c;
1067 int numerrors = 0;
1069 for (c = 0; c < vd->vdev_children; c++) {
1070 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
1071 vd->vdev_ops == &vdev_replacing_ops ||
1072 vd->vdev_ops == &vdev_spare_ops);
1073 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1074 addr, offset, size, doread, isdump);
1075 if (err != 0) {
1076 numerrors++;
1077 } else if (doread) {
1078 break;
1082 if (!vd->vdev_ops->vdev_op_leaf)
1083 return (numerrors < vd->vdev_children ? 0 : EIO);
1085 if (doread && !vdev_readable(vd))
1086 return (EIO);
1087 else if (!doread && !vdev_writeable(vd))
1088 return (EIO);
1090 dvd = vd->vdev_tsd;
1091 ASSERT3P(dvd, !=, NULL);
1092 offset += VDEV_LABEL_START_SIZE;
1094 if (ddi_in_panic() || isdump) {
1095 ASSERT(!doread);
1096 if (doread)
1097 return (EIO);
1098 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1099 lbtodb(size)));
1100 } else {
1101 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1102 doread ? B_READ : B_WRITE));
1106 static int
1107 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1108 boolean_t doread, boolean_t isdump)
1110 vdev_t *vd;
1111 int error;
1112 zvol_extent_t *ze;
1113 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1115 /* Must be sector aligned, and not stradle a block boundary. */
1116 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1117 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1118 return (EINVAL);
1120 ASSERT(size <= zv->zv_volblocksize);
1122 /* Locate the extent this belongs to */
1123 ze = list_head(&zv->zv_extents);
1124 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1125 offset -= ze->ze_nblks * zv->zv_volblocksize;
1126 ze = list_next(&zv->zv_extents, ze);
1129 if (!ddi_in_panic())
1130 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1132 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1133 offset += DVA_GET_OFFSET(&ze->ze_dva);
1134 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump);
1136 if (!ddi_in_panic())
1137 spa_config_exit(spa, SCL_STATE, FTAG);
1139 return (error);
1143 zvol_strategy(buf_t *bp)
1145 zfs_soft_state_t *zs = NULL;
1146 zvol_state_t *zv;
1147 uint64_t off, volsize;
1148 size_t resid;
1149 char *addr;
1150 objset_t *os;
1151 rl_t *rl;
1152 int error = 0;
1153 boolean_t doread = bp->b_flags & B_READ;
1154 boolean_t is_dump;
1155 boolean_t sync;
1157 if (getminor(bp->b_edev) == 0) {
1158 error = EINVAL;
1159 } else {
1160 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1161 if (zs == NULL)
1162 error = ENXIO;
1163 else if (zs->zss_type != ZSST_ZVOL)
1164 error = EINVAL;
1167 if (error) {
1168 bioerror(bp, error);
1169 biodone(bp);
1170 return (0);
1173 zv = zs->zss_data;
1175 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1176 bioerror(bp, EROFS);
1177 biodone(bp);
1178 return (0);
1181 off = ldbtob(bp->b_blkno);
1182 volsize = zv->zv_volsize;
1184 os = zv->zv_objset;
1185 ASSERT(os != NULL);
1187 bp_mapin(bp);
1188 addr = bp->b_un.b_addr;
1189 resid = bp->b_bcount;
1191 if (resid > 0 && (off < 0 || off >= volsize)) {
1192 bioerror(bp, EIO);
1193 biodone(bp);
1194 return (0);
1197 is_dump = zv->zv_flags & ZVOL_DUMPIFIED;
1198 sync = ((!(bp->b_flags & B_ASYNC) &&
1199 !(zv->zv_flags & ZVOL_WCE)) ||
1200 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1201 !doread && !is_dump;
1204 * There must be no buffer changes when doing a dmu_sync() because
1205 * we can't change the data whilst calculating the checksum.
1207 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1208 doread ? RL_READER : RL_WRITER);
1210 while (resid != 0 && off < volsize) {
1211 size_t size = MIN(resid, zvol_maxphys);
1212 if (is_dump) {
1213 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1214 error = zvol_dumpio(zv, addr, off, size,
1215 doread, B_FALSE);
1216 } else if (doread) {
1217 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1218 DMU_READ_PREFETCH);
1219 } else {
1220 dmu_tx_t *tx = dmu_tx_create(os);
1221 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1222 error = dmu_tx_assign(tx, TXG_WAIT);
1223 if (error) {
1224 dmu_tx_abort(tx);
1225 } else {
1226 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1227 zvol_log_write(zv, tx, off, size, sync);
1228 dmu_tx_commit(tx);
1231 if (error) {
1232 /* convert checksum errors into IO errors */
1233 if (error == ECKSUM)
1234 error = EIO;
1235 break;
1237 off += size;
1238 addr += size;
1239 resid -= size;
1241 zfs_range_unlock(rl);
1243 if ((bp->b_resid = resid) == bp->b_bcount)
1244 bioerror(bp, off > volsize ? EINVAL : error);
1246 if (sync)
1247 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1248 biodone(bp);
1250 return (0);
1254 * Set the buffer count to the zvol maximum transfer.
1255 * Using our own routine instead of the default minphys()
1256 * means that for larger writes we write bigger buffers on X86
1257 * (128K instead of 56K) and flush the disk write cache less often
1258 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1259 * 56K on X86 and 128K on sparc).
1261 void
1262 zvol_minphys(struct buf *bp)
1264 if (bp->b_bcount > zvol_maxphys)
1265 bp->b_bcount = zvol_maxphys;
1269 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1271 minor_t minor = getminor(dev);
1272 zvol_state_t *zv;
1273 int error = 0;
1274 uint64_t size;
1275 uint64_t boff;
1276 uint64_t resid;
1278 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1279 if (zv == NULL)
1280 return (ENXIO);
1282 boff = ldbtob(blkno);
1283 resid = ldbtob(nblocks);
1285 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1287 while (resid) {
1288 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1289 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1290 if (error)
1291 break;
1292 boff += size;
1293 addr += size;
1294 resid -= size;
1297 return (error);
1300 /*ARGSUSED*/
1302 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1304 minor_t minor = getminor(dev);
1305 zvol_state_t *zv;
1306 uint64_t volsize;
1307 rl_t *rl;
1308 int error = 0;
1310 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1311 if (zv == NULL)
1312 return (ENXIO);
1314 volsize = zv->zv_volsize;
1315 if (uio->uio_resid > 0 &&
1316 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1317 return (EIO);
1319 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1320 error = physio(zvol_strategy, NULL, dev, B_READ,
1321 zvol_minphys, uio);
1322 return (error);
1325 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1326 RL_READER);
1327 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1328 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1330 /* don't read past the end */
1331 if (bytes > volsize - uio->uio_loffset)
1332 bytes = volsize - uio->uio_loffset;
1334 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1335 if (error) {
1336 /* convert checksum errors into IO errors */
1337 if (error == ECKSUM)
1338 error = EIO;
1339 break;
1342 zfs_range_unlock(rl);
1343 return (error);
1346 /*ARGSUSED*/
1348 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1350 minor_t minor = getminor(dev);
1351 zvol_state_t *zv;
1352 uint64_t volsize;
1353 rl_t *rl;
1354 int error = 0;
1355 boolean_t sync;
1357 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1358 if (zv == NULL)
1359 return (ENXIO);
1361 volsize = zv->zv_volsize;
1362 if (uio->uio_resid > 0 &&
1363 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1364 return (EIO);
1366 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1367 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1368 zvol_minphys, uio);
1369 return (error);
1372 sync = !(zv->zv_flags & ZVOL_WCE) ||
1373 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1375 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1376 RL_WRITER);
1377 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1378 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1379 uint64_t off = uio->uio_loffset;
1380 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1382 if (bytes > volsize - off) /* don't write past the end */
1383 bytes = volsize - off;
1385 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1386 error = dmu_tx_assign(tx, TXG_WAIT);
1387 if (error) {
1388 dmu_tx_abort(tx);
1389 break;
1391 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1392 if (error == 0)
1393 zvol_log_write(zv, tx, off, bytes, sync);
1394 dmu_tx_commit(tx);
1396 if (error)
1397 break;
1399 zfs_range_unlock(rl);
1400 if (sync)
1401 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1402 return (error);
1406 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1408 struct uuid uuid = EFI_RESERVED;
1409 efi_gpe_t gpe = { 0 };
1410 uint32_t crc;
1411 dk_efi_t efi;
1412 int length;
1413 char *ptr;
1415 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1416 return (EFAULT);
1417 ptr = (char *)(uintptr_t)efi.dki_data_64;
1418 length = efi.dki_length;
1420 * Some clients may attempt to request a PMBR for the
1421 * zvol. Currently this interface will return EINVAL to
1422 * such requests. These requests could be supported by
1423 * adding a check for lba == 0 and consing up an appropriate
1424 * PMBR.
1426 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1427 return (EINVAL);
1429 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1430 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1431 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1433 if (efi.dki_lba == 1) {
1434 efi_gpt_t gpt = { 0 };
1436 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1437 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1438 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1439 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1440 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1441 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1442 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1443 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1444 gpt.efi_gpt_SizeOfPartitionEntry =
1445 LE_32(sizeof (efi_gpe_t));
1446 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1447 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1448 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1449 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1450 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1451 flag))
1452 return (EFAULT);
1453 ptr += sizeof (gpt);
1454 length -= sizeof (gpt);
1456 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1457 length), flag))
1458 return (EFAULT);
1459 return (0);
1463 * BEGIN entry points to allow external callers access to the volume.
1466 * Return the volume parameters needed for access from an external caller.
1467 * These values are invariant as long as the volume is held open.
1470 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1471 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1472 void **rl_hdl, void **bonus_hdl)
1474 zvol_state_t *zv;
1476 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1477 if (zv == NULL)
1478 return (ENXIO);
1479 if (zv->zv_flags & ZVOL_DUMPIFIED)
1480 return (ENXIO);
1482 ASSERT(blksize && max_xfer_len && minor_hdl &&
1483 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1485 *blksize = zv->zv_volblocksize;
1486 *max_xfer_len = (uint64_t)zvol_maxphys;
1487 *minor_hdl = zv;
1488 *objset_hdl = zv->zv_objset;
1489 *zil_hdl = zv->zv_zilog;
1490 *rl_hdl = &zv->zv_znode;
1491 *bonus_hdl = zv->zv_dbuf;
1492 return (0);
1496 * Return the current volume size to an external caller.
1497 * The size can change while the volume is open.
1499 uint64_t
1500 zvol_get_volume_size(void *minor_hdl)
1502 zvol_state_t *zv = minor_hdl;
1504 return (zv->zv_volsize);
1508 * Return the current WCE setting to an external caller.
1509 * The WCE setting can change while the volume is open.
1512 zvol_get_volume_wce(void *minor_hdl)
1514 zvol_state_t *zv = minor_hdl;
1516 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1520 * Entry point for external callers to zvol_log_write
1522 void
1523 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1524 boolean_t sync)
1526 zvol_state_t *zv = minor_hdl;
1528 zvol_log_write(zv, tx, off, resid, sync);
1531 * END entry points to allow external callers access to the volume.
1535 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1537 static void
1538 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1539 boolean_t sync)
1541 itx_t *itx;
1542 lr_truncate_t *lr;
1543 zilog_t *zilog = zv->zv_zilog;
1545 if (zil_replaying(zilog, tx))
1546 return;
1548 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1549 lr = (lr_truncate_t *)&itx->itx_lr;
1550 lr->lr_foid = ZVOL_OBJ;
1551 lr->lr_offset = off;
1552 lr->lr_length = len;
1554 itx->itx_sync = sync;
1555 zil_itx_assign(zilog, itx, tx);
1559 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1560 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1562 /*ARGSUSED*/
1564 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1566 zvol_state_t *zv;
1567 struct dk_cinfo dki;
1568 struct dk_minfo dkm;
1569 struct dk_callback *dkc;
1570 int error = 0;
1571 rl_t *rl;
1573 mutex_enter(&zfsdev_state_lock);
1575 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1577 if (zv == NULL) {
1578 mutex_exit(&zfsdev_state_lock);
1579 return (ENXIO);
1581 ASSERT(zv->zv_total_opens > 0);
1583 switch (cmd) {
1585 case DKIOCINFO:
1586 bzero(&dki, sizeof (dki));
1587 (void) strcpy(dki.dki_cname, "zvol");
1588 (void) strcpy(dki.dki_dname, "zvol");
1589 dki.dki_ctype = DKC_UNKNOWN;
1590 dki.dki_unit = getminor(dev);
1591 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1592 mutex_exit(&zfsdev_state_lock);
1593 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1594 error = EFAULT;
1595 return (error);
1597 case DKIOCGMEDIAINFO:
1598 bzero(&dkm, sizeof (dkm));
1599 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1600 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1601 dkm.dki_media_type = DK_UNKNOWN;
1602 mutex_exit(&zfsdev_state_lock);
1603 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1604 error = EFAULT;
1605 return (error);
1607 case DKIOCGETEFI:
1609 uint64_t vs = zv->zv_volsize;
1610 uint8_t bs = zv->zv_min_bs;
1612 mutex_exit(&zfsdev_state_lock);
1613 error = zvol_getefi((void *)arg, flag, vs, bs);
1614 return (error);
1617 case DKIOCFLUSHWRITECACHE:
1618 dkc = (struct dk_callback *)arg;
1619 mutex_exit(&zfsdev_state_lock);
1620 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1621 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1622 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1623 error = 0;
1625 return (error);
1627 case DKIOCGETWCE:
1629 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1630 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1631 flag))
1632 error = EFAULT;
1633 break;
1635 case DKIOCSETWCE:
1637 int wce;
1638 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1639 flag)) {
1640 error = EFAULT;
1641 break;
1643 if (wce) {
1644 zv->zv_flags |= ZVOL_WCE;
1645 mutex_exit(&zfsdev_state_lock);
1646 } else {
1647 zv->zv_flags &= ~ZVOL_WCE;
1648 mutex_exit(&zfsdev_state_lock);
1649 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1651 return (0);
1654 case DKIOCGGEOM:
1655 case DKIOCGVTOC:
1657 * commands using these (like prtvtoc) expect ENOTSUP
1658 * since we're emulating an EFI label
1660 error = ENOTSUP;
1661 break;
1663 case DKIOCDUMPINIT:
1664 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1665 RL_WRITER);
1666 error = zvol_dumpify(zv);
1667 zfs_range_unlock(rl);
1668 break;
1670 case DKIOCDUMPFINI:
1671 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1672 break;
1673 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1674 RL_WRITER);
1675 error = zvol_dump_fini(zv);
1676 zfs_range_unlock(rl);
1677 break;
1679 case DKIOCFREE:
1681 dkioc_free_t df;
1682 dmu_tx_t *tx;
1684 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1685 error = EFAULT;
1686 break;
1690 * Apply Postel's Law to length-checking. If they overshoot,
1691 * just blank out until the end, if there's a need to blank
1692 * out anything.
1694 if (df.df_start >= zv->zv_volsize)
1695 break; /* No need to do anything... */
1696 if (df.df_start + df.df_length > zv->zv_volsize)
1697 df.df_length = DMU_OBJECT_END;
1699 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1700 RL_WRITER);
1701 tx = dmu_tx_create(zv->zv_objset);
1702 error = dmu_tx_assign(tx, TXG_WAIT);
1703 if (error != 0) {
1704 dmu_tx_abort(tx);
1705 } else {
1706 zvol_log_truncate(zv, tx, df.df_start,
1707 df.df_length, B_TRUE);
1708 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1709 df.df_start, df.df_length);
1710 dmu_tx_commit(tx);
1713 zfs_range_unlock(rl);
1715 if (error == 0) {
1717 * If the write-cache is disabled or 'sync' property
1718 * is set to 'always' then treat this as a synchronous
1719 * operation (i.e. commit to zil).
1721 if (!(zv->zv_flags & ZVOL_WCE) ||
1722 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1723 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1726 * If the caller really wants synchronous writes, and
1727 * can't wait for them, don't return until the write
1728 * is done.
1730 if (df.df_flags & DF_WAIT_SYNC) {
1731 txg_wait_synced(
1732 dmu_objset_pool(zv->zv_objset), 0);
1735 break;
1738 default:
1739 error = ENOTTY;
1740 break;
1743 mutex_exit(&zfsdev_state_lock);
1744 return (error);
1748 zvol_busy(void)
1750 return (zvol_minors != 0);
1753 void
1754 zvol_init(void)
1756 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1757 1) == 0);
1758 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1761 void
1762 zvol_fini(void)
1764 mutex_destroy(&zfsdev_state_lock);
1765 ddi_soft_state_fini(&zfsdev_state);
1768 static int
1769 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1771 dmu_tx_t *tx;
1772 int error = 0;
1773 objset_t *os = zv->zv_objset;
1774 nvlist_t *nv = NULL;
1775 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1777 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1778 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1779 DMU_OBJECT_END);
1780 /* wait for dmu_free_long_range to actually free the blocks */
1781 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1783 tx = dmu_tx_create(os);
1784 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1785 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1786 error = dmu_tx_assign(tx, TXG_WAIT);
1787 if (error) {
1788 dmu_tx_abort(tx);
1789 return (error);
1793 * If we are resizing the dump device then we only need to
1794 * update the refreservation to match the newly updated
1795 * zvolsize. Otherwise, we save off the original state of the
1796 * zvol so that we can restore them if the zvol is ever undumpified.
1798 if (resize) {
1799 error = zap_update(os, ZVOL_ZAP_OBJ,
1800 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1801 &zv->zv_volsize, tx);
1802 } else {
1803 uint64_t checksum, compress, refresrv, vbs, dedup;
1805 error = dsl_prop_get_integer(zv->zv_name,
1806 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1807 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1808 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1809 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1810 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1811 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1812 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1813 if (version >= SPA_VERSION_DEDUP) {
1814 error = error ? error :
1815 dsl_prop_get_integer(zv->zv_name,
1816 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1819 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1820 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1821 &compress, tx);
1822 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1823 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1824 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1825 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1826 &refresrv, tx);
1827 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1828 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1829 &vbs, tx);
1830 error = error ? error : dmu_object_set_blocksize(
1831 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1832 if (version >= SPA_VERSION_DEDUP) {
1833 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1834 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1835 &dedup, tx);
1837 if (error == 0)
1838 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1840 dmu_tx_commit(tx);
1843 * We only need update the zvol's property if we are initializing
1844 * the dump area for the first time.
1846 if (!resize) {
1847 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1848 VERIFY(nvlist_add_uint64(nv,
1849 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1850 VERIFY(nvlist_add_uint64(nv,
1851 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1852 ZIO_COMPRESS_OFF) == 0);
1853 VERIFY(nvlist_add_uint64(nv,
1854 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1855 ZIO_CHECKSUM_OFF) == 0);
1856 if (version >= SPA_VERSION_DEDUP) {
1857 VERIFY(nvlist_add_uint64(nv,
1858 zfs_prop_to_name(ZFS_PROP_DEDUP),
1859 ZIO_CHECKSUM_OFF) == 0);
1862 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1863 nv, NULL);
1864 nvlist_free(nv);
1866 if (error)
1867 return (error);
1870 /* Allocate the space for the dump */
1871 error = zvol_prealloc(zv);
1872 return (error);
1875 static int
1876 zvol_dumpify(zvol_state_t *zv)
1878 int error = 0;
1879 uint64_t dumpsize = 0;
1880 dmu_tx_t *tx;
1881 objset_t *os = zv->zv_objset;
1883 if (zv->zv_flags & ZVOL_RDONLY)
1884 return (EROFS);
1886 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1887 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1888 boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1890 if ((error = zvol_dump_init(zv, resize)) != 0) {
1891 (void) zvol_dump_fini(zv);
1892 return (error);
1897 * Build up our lba mapping.
1899 error = zvol_get_lbas(zv);
1900 if (error) {
1901 (void) zvol_dump_fini(zv);
1902 return (error);
1905 tx = dmu_tx_create(os);
1906 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1907 error = dmu_tx_assign(tx, TXG_WAIT);
1908 if (error) {
1909 dmu_tx_abort(tx);
1910 (void) zvol_dump_fini(zv);
1911 return (error);
1914 zv->zv_flags |= ZVOL_DUMPIFIED;
1915 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1916 &zv->zv_volsize, tx);
1917 dmu_tx_commit(tx);
1919 if (error) {
1920 (void) zvol_dump_fini(zv);
1921 return (error);
1924 txg_wait_synced(dmu_objset_pool(os), 0);
1925 return (0);
1928 static int
1929 zvol_dump_fini(zvol_state_t *zv)
1931 dmu_tx_t *tx;
1932 objset_t *os = zv->zv_objset;
1933 nvlist_t *nv;
1934 int error = 0;
1935 uint64_t checksum, compress, refresrv, vbs, dedup;
1936 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1939 * Attempt to restore the zvol back to its pre-dumpified state.
1940 * This is a best-effort attempt as it's possible that not all
1941 * of these properties were initialized during the dumpify process
1942 * (i.e. error during zvol_dump_init).
1945 tx = dmu_tx_create(os);
1946 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1947 error = dmu_tx_assign(tx, TXG_WAIT);
1948 if (error) {
1949 dmu_tx_abort(tx);
1950 return (error);
1952 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1953 dmu_tx_commit(tx);
1955 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1956 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1957 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1958 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1959 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1960 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1961 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1962 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
1964 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1965 (void) nvlist_add_uint64(nv,
1966 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1967 (void) nvlist_add_uint64(nv,
1968 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1969 (void) nvlist_add_uint64(nv,
1970 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1971 if (version >= SPA_VERSION_DEDUP &&
1972 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1973 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
1974 (void) nvlist_add_uint64(nv,
1975 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
1977 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1978 nv, NULL);
1979 nvlist_free(nv);
1981 zvol_free_extents(zv);
1982 zv->zv_flags &= ~ZVOL_DUMPIFIED;
1983 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
1984 /* wait for dmu_free_long_range to actually free the blocks */
1985 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1986 tx = dmu_tx_create(os);
1987 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1988 error = dmu_tx_assign(tx, TXG_WAIT);
1989 if (error) {
1990 dmu_tx_abort(tx);
1991 return (error);
1993 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
1994 zv->zv_volblocksize = vbs;
1995 dmu_tx_commit(tx);
1997 return (0);