4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011 by Delphix. All rights reserved.
37 #include <sys/efi_partition.h>
39 #include <sys/zfs_ioctl.h>
42 #include "zfs_namecheck.h"
44 #include "libzfs_impl.h"
45 #include "zfs_comutil.h"
47 static int read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
);
49 #define DISK_ROOT "/dev/dsk"
50 #define RDISK_ROOT "/dev/rdsk"
51 #define BACKUP_SLICE "s2"
53 typedef struct prop_flags
{
54 int create
:1; /* Validate property on creation */
55 int import
:1; /* Validate property on import */
59 * ====================================================================
60 * zpool property functions
61 * ====================================================================
65 zpool_get_all_props(zpool_handle_t
*zhp
)
68 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
70 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
72 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, 0) != 0)
75 while (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_PROPS
, &zc
) != 0) {
76 if (errno
== ENOMEM
) {
77 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
78 zcmd_free_nvlists(&zc
);
82 zcmd_free_nvlists(&zc
);
87 if (zcmd_read_dst_nvlist(hdl
, &zc
, &zhp
->zpool_props
) != 0) {
88 zcmd_free_nvlists(&zc
);
92 zcmd_free_nvlists(&zc
);
98 zpool_props_refresh(zpool_handle_t
*zhp
)
102 old_props
= zhp
->zpool_props
;
104 if (zpool_get_all_props(zhp
) != 0)
107 nvlist_free(old_props
);
112 zpool_get_prop_string(zpool_handle_t
*zhp
, zpool_prop_t prop
,
118 zprop_source_t source
;
120 nvl
= zhp
->zpool_props
;
121 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
122 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &ival
) == 0);
124 verify(nvlist_lookup_string(nv
, ZPROP_VALUE
, &value
) == 0);
126 source
= ZPROP_SRC_DEFAULT
;
127 if ((value
= (char *)zpool_prop_default_string(prop
)) == NULL
)
138 zpool_get_prop_int(zpool_handle_t
*zhp
, zpool_prop_t prop
, zprop_source_t
*src
)
142 zprop_source_t source
;
144 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
)) {
146 * zpool_get_all_props() has most likely failed because
147 * the pool is faulted, but if all we need is the top level
148 * vdev's guid then get it from the zhp config nvlist.
150 if ((prop
== ZPOOL_PROP_GUID
) &&
151 (nvlist_lookup_nvlist(zhp
->zpool_config
,
152 ZPOOL_CONFIG_VDEV_TREE
, &nv
) == 0) &&
153 (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &value
)
157 return (zpool_prop_default_numeric(prop
));
160 nvl
= zhp
->zpool_props
;
161 if (nvlist_lookup_nvlist(nvl
, zpool_prop_to_name(prop
), &nv
) == 0) {
162 verify(nvlist_lookup_uint64(nv
, ZPROP_SOURCE
, &value
) == 0);
164 verify(nvlist_lookup_uint64(nv
, ZPROP_VALUE
, &value
) == 0);
166 source
= ZPROP_SRC_DEFAULT
;
167 value
= zpool_prop_default_numeric(prop
);
177 * Map VDEV STATE to printed strings.
180 zpool_state_to_name(vdev_state_t state
, vdev_aux_t aux
)
183 case VDEV_STATE_CLOSED
:
184 case VDEV_STATE_OFFLINE
:
185 return (gettext("OFFLINE"));
186 case VDEV_STATE_REMOVED
:
187 return (gettext("REMOVED"));
188 case VDEV_STATE_CANT_OPEN
:
189 if (aux
== VDEV_AUX_CORRUPT_DATA
|| aux
== VDEV_AUX_BAD_LOG
)
190 return (gettext("FAULTED"));
191 else if (aux
== VDEV_AUX_SPLIT_POOL
)
192 return (gettext("SPLIT"));
194 return (gettext("UNAVAIL"));
195 case VDEV_STATE_FAULTED
:
196 return (gettext("FAULTED"));
197 case VDEV_STATE_DEGRADED
:
198 return (gettext("DEGRADED"));
199 case VDEV_STATE_HEALTHY
:
200 return (gettext("ONLINE"));
203 return (gettext("UNKNOWN"));
207 * Get a zpool property value for 'prop' and return the value in
208 * a pre-allocated buffer.
211 zpool_get_prop(zpool_handle_t
*zhp
, zpool_prop_t prop
, char *buf
, size_t len
,
212 zprop_source_t
*srctype
)
216 zprop_source_t src
= ZPROP_SRC_NONE
;
221 if (zpool_get_state(zhp
) == POOL_STATE_UNAVAIL
) {
223 case ZPOOL_PROP_NAME
:
224 (void) strlcpy(buf
, zpool_get_name(zhp
), len
);
227 case ZPOOL_PROP_HEALTH
:
228 (void) strlcpy(buf
, "FAULTED", len
);
231 case ZPOOL_PROP_GUID
:
232 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
233 (void) snprintf(buf
, len
, "%llu", intval
);
236 case ZPOOL_PROP_ALTROOT
:
237 case ZPOOL_PROP_CACHEFILE
:
238 case ZPOOL_PROP_COMMENT
:
239 if (zhp
->zpool_props
!= NULL
||
240 zpool_get_all_props(zhp
) == 0) {
242 zpool_get_prop_string(zhp
, prop
, &src
),
250 (void) strlcpy(buf
, "-", len
);
259 if (zhp
->zpool_props
== NULL
&& zpool_get_all_props(zhp
) &&
260 prop
!= ZPOOL_PROP_NAME
)
263 switch (zpool_prop_get_type(prop
)) {
264 case PROP_TYPE_STRING
:
265 (void) strlcpy(buf
, zpool_get_prop_string(zhp
, prop
, &src
),
269 case PROP_TYPE_NUMBER
:
270 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
273 case ZPOOL_PROP_SIZE
:
274 case ZPOOL_PROP_ALLOCATED
:
275 case ZPOOL_PROP_FREE
:
276 (void) zfs_nicenum(intval
, buf
, len
);
279 case ZPOOL_PROP_CAPACITY
:
280 (void) snprintf(buf
, len
, "%llu%%",
281 (u_longlong_t
)intval
);
284 case ZPOOL_PROP_DEDUPRATIO
:
285 (void) snprintf(buf
, len
, "%llu.%02llux",
286 (u_longlong_t
)(intval
/ 100),
287 (u_longlong_t
)(intval
% 100));
290 case ZPOOL_PROP_HEALTH
:
291 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
292 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
293 verify(nvlist_lookup_uint64_array(nvroot
,
294 ZPOOL_CONFIG_VDEV_STATS
, (uint64_t **)&vs
, &vsc
)
297 (void) strlcpy(buf
, zpool_state_to_name(intval
,
301 (void) snprintf(buf
, len
, "%llu", intval
);
305 case PROP_TYPE_INDEX
:
306 intval
= zpool_get_prop_int(zhp
, prop
, &src
);
307 if (zpool_prop_index_to_string(prop
, intval
, &strval
)
310 (void) strlcpy(buf
, strval
, len
);
324 * Check if the bootfs name has the same pool name as it is set to.
325 * Assuming bootfs is a valid dataset name.
328 bootfs_name_valid(const char *pool
, char *bootfs
)
330 int len
= strlen(pool
);
332 if (!zfs_name_valid(bootfs
, ZFS_TYPE_FILESYSTEM
|ZFS_TYPE_SNAPSHOT
))
335 if (strncmp(pool
, bootfs
, len
) == 0 &&
336 (bootfs
[len
] == '/' || bootfs
[len
] == '\0'))
343 * Inspect the configuration to determine if any of the devices contain
347 pool_uses_efi(nvlist_t
*config
)
352 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
353 &child
, &children
) != 0)
354 return (read_efi_label(config
, NULL
) >= 0);
356 for (c
= 0; c
< children
; c
++) {
357 if (pool_uses_efi(child
[c
]))
364 pool_is_bootable(zpool_handle_t
*zhp
)
366 char bootfs
[ZPOOL_MAXNAMELEN
];
368 return (zpool_get_prop(zhp
, ZPOOL_PROP_BOOTFS
, bootfs
,
369 sizeof (bootfs
), NULL
) == 0 && strncmp(bootfs
, "-",
370 sizeof (bootfs
)) != 0);
375 * Given an nvlist of zpool properties to be set, validate that they are
376 * correct, and parse any numeric properties (index, boolean, etc) if they are
377 * specified as strings.
380 zpool_valid_proplist(libzfs_handle_t
*hdl
, const char *poolname
,
381 nvlist_t
*props
, uint64_t version
, prop_flags_t flags
, char *errbuf
)
389 struct stat64 statbuf
;
393 if (nvlist_alloc(&retprops
, NV_UNIQUE_NAME
, 0) != 0) {
394 (void) no_memory(hdl
);
399 while ((elem
= nvlist_next_nvpair(props
, elem
)) != NULL
) {
400 const char *propname
= nvpair_name(elem
);
403 * Make sure this property is valid and applies to this type.
405 if ((prop
= zpool_name_to_prop(propname
)) == ZPROP_INVAL
) {
406 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
407 "invalid property '%s'"), propname
);
408 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
412 if (zpool_prop_readonly(prop
)) {
413 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
414 "is readonly"), propname
);
415 (void) zfs_error(hdl
, EZFS_PROPREADONLY
, errbuf
);
419 if (zprop_parse_value(hdl
, elem
, prop
, ZFS_TYPE_POOL
, retprops
,
420 &strval
, &intval
, errbuf
) != 0)
424 * Perform additional checking for specific properties.
427 case ZPOOL_PROP_VERSION
:
428 if (intval
< version
|| intval
> SPA_VERSION
) {
429 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
430 "property '%s' number %d is invalid."),
432 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
437 case ZPOOL_PROP_BOOTFS
:
438 if (flags
.create
|| flags
.import
) {
439 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
440 "property '%s' cannot be set at creation "
441 "or import time"), propname
);
442 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
446 if (version
< SPA_VERSION_BOOTFS
) {
447 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
448 "pool must be upgraded to support "
449 "'%s' property"), propname
);
450 (void) zfs_error(hdl
, EZFS_BADVERSION
, errbuf
);
455 * bootfs property value has to be a dataset name and
456 * the dataset has to be in the same pool as it sets to.
458 if (strval
[0] != '\0' && !bootfs_name_valid(poolname
,
460 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "'%s' "
461 "is an invalid name"), strval
);
462 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
466 if ((zhp
= zpool_open_canfail(hdl
, poolname
)) == NULL
) {
467 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
468 "could not open pool '%s'"), poolname
);
469 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
472 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
473 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
476 * bootfs property cannot be set on a disk which has
479 if (pool_uses_efi(nvroot
)) {
480 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
481 "property '%s' not supported on "
482 "EFI labeled devices"), propname
);
483 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
);
490 case ZPOOL_PROP_ALTROOT
:
491 if (!flags
.create
&& !flags
.import
) {
492 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
493 "property '%s' can only be set during pool "
494 "creation or import"), propname
);
495 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
499 if (strval
[0] != '/') {
500 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
501 "bad alternate root '%s'"), strval
);
502 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
507 case ZPOOL_PROP_CACHEFILE
:
508 if (strval
[0] == '\0')
511 if (strcmp(strval
, "none") == 0)
514 if (strval
[0] != '/') {
515 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
516 "property '%s' must be empty, an "
517 "absolute path, or 'none'"), propname
);
518 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
522 slash
= strrchr(strval
, '/');
524 if (slash
[1] == '\0' || strcmp(slash
, "/.") == 0 ||
525 strcmp(slash
, "/..") == 0) {
526 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
527 "'%s' is not a valid file"), strval
);
528 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
534 if (strval
[0] != '\0' &&
535 (stat64(strval
, &statbuf
) != 0 ||
536 !S_ISDIR(statbuf
.st_mode
))) {
537 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
538 "'%s' is not a valid directory"),
540 (void) zfs_error(hdl
, EZFS_BADPATH
, errbuf
);
547 case ZPOOL_PROP_COMMENT
:
548 for (check
= strval
; *check
!= '\0'; check
++) {
549 if (!isprint(*check
)) {
551 dgettext(TEXT_DOMAIN
,
552 "comment may only have printable "
554 (void) zfs_error(hdl
, EZFS_BADPROP
,
559 if (strlen(strval
) > ZPROP_MAX_COMMENT
) {
560 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
561 "comment must not exceed %d characters"),
563 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
567 case ZPOOL_PROP_READONLY
:
569 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
570 "property '%s' can only be set at "
571 "import time"), propname
);
572 (void) zfs_error(hdl
, EZFS_BADPROP
, errbuf
);
581 nvlist_free(retprops
);
586 * Set zpool property : propname=propval.
589 zpool_set_prop(zpool_handle_t
*zhp
, const char *propname
, const char *propval
)
591 zfs_cmd_t zc
= { 0 };
594 nvlist_t
*nvl
= NULL
;
597 prop_flags_t flags
= { 0 };
599 (void) snprintf(errbuf
, sizeof (errbuf
),
600 dgettext(TEXT_DOMAIN
, "cannot set property for '%s'"),
603 if (nvlist_alloc(&nvl
, NV_UNIQUE_NAME
, 0) != 0)
604 return (no_memory(zhp
->zpool_hdl
));
606 if (nvlist_add_string(nvl
, propname
, propval
) != 0) {
608 return (no_memory(zhp
->zpool_hdl
));
611 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
612 if ((realprops
= zpool_valid_proplist(zhp
->zpool_hdl
,
613 zhp
->zpool_name
, nvl
, version
, flags
, errbuf
)) == NULL
) {
622 * Execute the corresponding ioctl() to set this property.
624 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
626 if (zcmd_write_src_nvlist(zhp
->zpool_hdl
, &zc
, nvl
) != 0) {
631 ret
= zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_SET_PROPS
, &zc
);
633 zcmd_free_nvlists(&zc
);
637 (void) zpool_standard_error(zhp
->zpool_hdl
, errno
, errbuf
);
639 (void) zpool_props_refresh(zhp
);
645 zpool_expand_proplist(zpool_handle_t
*zhp
, zprop_list_t
**plp
)
647 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
649 char buf
[ZFS_MAXPROPLEN
];
651 if (zprop_expand_list(hdl
, plp
, ZFS_TYPE_POOL
) != 0)
654 for (entry
= *plp
; entry
!= NULL
; entry
= entry
->pl_next
) {
659 if (entry
->pl_prop
!= ZPROP_INVAL
&&
660 zpool_get_prop(zhp
, entry
->pl_prop
, buf
, sizeof (buf
),
662 if (strlen(buf
) > entry
->pl_width
)
663 entry
->pl_width
= strlen(buf
);
672 * Don't start the slice at the default block of 34; many storage
673 * devices will use a stripe width of 128k, so start there instead.
675 #define NEW_START_BLOCK 256
678 * Validate the given pool name, optionally putting an extended error message in
682 zpool_name_valid(libzfs_handle_t
*hdl
, boolean_t isopen
, const char *pool
)
688 ret
= pool_namecheck(pool
, &why
, &what
);
691 * The rules for reserved pool names were extended at a later point.
692 * But we need to support users with existing pools that may now be
693 * invalid. So we only check for this expanded set of names during a
694 * create (or import), and only in userland.
696 if (ret
== 0 && !isopen
&&
697 (strncmp(pool
, "mirror", 6) == 0 ||
698 strncmp(pool
, "raidz", 5) == 0 ||
699 strncmp(pool
, "spare", 5) == 0 ||
700 strcmp(pool
, "log") == 0)) {
703 dgettext(TEXT_DOMAIN
, "name is reserved"));
711 case NAME_ERR_TOOLONG
:
713 dgettext(TEXT_DOMAIN
, "name is too long"));
716 case NAME_ERR_INVALCHAR
:
718 dgettext(TEXT_DOMAIN
, "invalid character "
719 "'%c' in pool name"), what
);
722 case NAME_ERR_NOLETTER
:
723 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
724 "name must begin with a letter"));
727 case NAME_ERR_RESERVED
:
728 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
729 "name is reserved"));
732 case NAME_ERR_DISKLIKE
:
733 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
734 "pool name is reserved"));
737 case NAME_ERR_LEADING_SLASH
:
738 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
739 "leading slash in name"));
742 case NAME_ERR_EMPTY_COMPONENT
:
743 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
744 "empty component in name"));
747 case NAME_ERR_TRAILING_SLASH
:
748 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
749 "trailing slash in name"));
752 case NAME_ERR_MULTIPLE_AT
:
753 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
754 "multiple '@' delimiters in name"));
766 * Open a handle to the given pool, even if the pool is currently in the FAULTED
770 zpool_open_canfail(libzfs_handle_t
*hdl
, const char *pool
)
776 * Make sure the pool name is valid.
778 if (!zpool_name_valid(hdl
, B_TRUE
, pool
)) {
779 (void) zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
780 dgettext(TEXT_DOMAIN
, "cannot open '%s'"),
785 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
788 zhp
->zpool_hdl
= hdl
;
789 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
791 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
797 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "no such pool"));
798 (void) zfs_error_fmt(hdl
, EZFS_NOENT
,
799 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), pool
);
808 * Like the above, but silent on error. Used when iterating over pools (because
809 * the configuration cache may be out of date).
812 zpool_open_silent(libzfs_handle_t
*hdl
, const char *pool
, zpool_handle_t
**ret
)
817 if ((zhp
= zfs_alloc(hdl
, sizeof (zpool_handle_t
))) == NULL
)
820 zhp
->zpool_hdl
= hdl
;
821 (void) strlcpy(zhp
->zpool_name
, pool
, sizeof (zhp
->zpool_name
));
823 if (zpool_refresh_stats(zhp
, &missing
) != 0) {
839 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
843 zpool_open(libzfs_handle_t
*hdl
, const char *pool
)
847 if ((zhp
= zpool_open_canfail(hdl
, pool
)) == NULL
)
850 if (zhp
->zpool_state
== POOL_STATE_UNAVAIL
) {
851 (void) zfs_error_fmt(hdl
, EZFS_POOLUNAVAIL
,
852 dgettext(TEXT_DOMAIN
, "cannot open '%s'"), zhp
->zpool_name
);
861 * Close the handle. Simply frees the memory associated with the handle.
864 zpool_close(zpool_handle_t
*zhp
)
866 if (zhp
->zpool_config
)
867 nvlist_free(zhp
->zpool_config
);
868 if (zhp
->zpool_old_config
)
869 nvlist_free(zhp
->zpool_old_config
);
870 if (zhp
->zpool_props
)
871 nvlist_free(zhp
->zpool_props
);
876 * Return the name of the pool.
879 zpool_get_name(zpool_handle_t
*zhp
)
881 return (zhp
->zpool_name
);
886 * Return the state of the pool (ACTIVE or UNAVAILABLE)
889 zpool_get_state(zpool_handle_t
*zhp
)
891 return (zhp
->zpool_state
);
895 * Create the named pool, using the provided vdev list. It is assumed
896 * that the consumer has already validated the contents of the nvlist, so we
897 * don't have to worry about error semantics.
900 zpool_create(libzfs_handle_t
*hdl
, const char *pool
, nvlist_t
*nvroot
,
901 nvlist_t
*props
, nvlist_t
*fsprops
)
903 zfs_cmd_t zc
= { 0 };
904 nvlist_t
*zc_fsprops
= NULL
;
905 nvlist_t
*zc_props
= NULL
;
910 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
911 "cannot create '%s'"), pool
);
913 if (!zpool_name_valid(hdl
, B_FALSE
, pool
))
914 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
916 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
920 prop_flags_t flags
= { .create
= B_TRUE
, .import
= B_FALSE
};
922 if ((zc_props
= zpool_valid_proplist(hdl
, pool
, props
,
923 SPA_VERSION_1
, flags
, msg
)) == NULL
) {
932 zoned
= ((nvlist_lookup_string(fsprops
,
933 zfs_prop_to_name(ZFS_PROP_ZONED
), &zonestr
) == 0) &&
934 strcmp(zonestr
, "on") == 0);
936 if ((zc_fsprops
= zfs_valid_proplist(hdl
,
937 ZFS_TYPE_FILESYSTEM
, fsprops
, zoned
, NULL
, msg
)) == NULL
) {
941 (nvlist_alloc(&zc_props
, NV_UNIQUE_NAME
, 0) != 0)) {
944 if (nvlist_add_nvlist(zc_props
,
945 ZPOOL_ROOTFS_PROPS
, zc_fsprops
) != 0) {
950 if (zc_props
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
953 (void) strlcpy(zc
.zc_name
, pool
, sizeof (zc
.zc_name
));
955 if ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_CREATE
, &zc
)) != 0) {
957 zcmd_free_nvlists(&zc
);
958 nvlist_free(zc_props
);
959 nvlist_free(zc_fsprops
);
964 * This can happen if the user has specified the same
965 * device multiple times. We can't reliably detect this
966 * until we try to add it and see we already have a
969 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
970 "one or more vdevs refer to the same device"));
971 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
975 * This occurs when one of the devices is below
976 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
977 * device was the problem device since there's no
978 * reliable way to determine device size from userland.
983 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
985 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
986 "one or more devices is less than the "
987 "minimum size (%s)"), buf
);
989 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
992 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
993 "one or more devices is out of space"));
994 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
997 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
998 "cache device must be a disk or disk slice"));
999 return (zfs_error(hdl
, EZFS_BADDEV
, msg
));
1002 return (zpool_standard_error(hdl
, errno
, msg
));
1007 * If this is an alternate root pool, then we automatically set the
1008 * mountpoint of the root dataset to be '/'.
1010 if (nvlist_lookup_string(props
, zpool_prop_to_name(ZPOOL_PROP_ALTROOT
),
1014 verify((zhp
= zfs_open(hdl
, pool
, ZFS_TYPE_DATASET
)) != NULL
);
1015 verify(zfs_prop_set(zhp
, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT
),
1022 zcmd_free_nvlists(&zc
);
1023 nvlist_free(zc_props
);
1024 nvlist_free(zc_fsprops
);
1029 * Destroy the given pool. It is up to the caller to ensure that there are no
1030 * datasets left in the pool.
1033 zpool_destroy(zpool_handle_t
*zhp
)
1035 zfs_cmd_t zc
= { 0 };
1036 zfs_handle_t
*zfp
= NULL
;
1037 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1040 if (zhp
->zpool_state
== POOL_STATE_ACTIVE
&&
1041 (zfp
= zfs_open(hdl
, zhp
->zpool_name
, ZFS_TYPE_FILESYSTEM
)) == NULL
)
1044 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1046 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_DESTROY
, &zc
) != 0) {
1047 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1048 "cannot destroy '%s'"), zhp
->zpool_name
);
1050 if (errno
== EROFS
) {
1051 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1052 "one or more devices is read only"));
1053 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1055 (void) zpool_standard_error(hdl
, errno
, msg
);
1064 remove_mountpoint(zfp
);
1072 * Add the given vdevs to the pool. The caller must have already performed the
1073 * necessary verification to ensure that the vdev specification is well-formed.
1076 zpool_add(zpool_handle_t
*zhp
, nvlist_t
*nvroot
)
1078 zfs_cmd_t zc
= { 0 };
1080 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1082 nvlist_t
**spares
, **l2cache
;
1083 uint_t nspares
, nl2cache
;
1085 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1086 "cannot add to '%s'"), zhp
->zpool_name
);
1088 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1089 SPA_VERSION_SPARES
&&
1090 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_SPARES
,
1091 &spares
, &nspares
) == 0) {
1092 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1093 "upgraded to add hot spares"));
1094 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1097 if (pool_is_bootable(zhp
) && nvlist_lookup_nvlist_array(nvroot
,
1098 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0) {
1101 for (s
= 0; s
< nspares
; s
++) {
1104 if (nvlist_lookup_string(spares
[s
], ZPOOL_CONFIG_PATH
,
1105 &path
) == 0 && pool_uses_efi(spares
[s
])) {
1106 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1107 "device '%s' contains an EFI label and "
1108 "cannot be used on root pools."),
1109 zpool_vdev_name(hdl
, NULL
, spares
[s
],
1111 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
1116 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
) <
1117 SPA_VERSION_L2CACHE
&&
1118 nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_L2CACHE
,
1119 &l2cache
, &nl2cache
) == 0) {
1120 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "pool must be "
1121 "upgraded to add cache devices"));
1122 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
1125 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
1127 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1129 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_ADD
, &zc
) != 0) {
1133 * This can happen if the user has specified the same
1134 * device multiple times. We can't reliably detect this
1135 * until we try to add it and see we already have a
1138 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1139 "one or more vdevs refer to the same device"));
1140 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1145 * This occurrs when one of the devices is below
1146 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1147 * device was the problem device since there's no
1148 * reliable way to determine device size from userland.
1153 zfs_nicenum(SPA_MINDEVSIZE
, buf
, sizeof (buf
));
1155 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1156 "device is less than the minimum "
1159 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1163 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1164 "pool must be upgraded to add these vdevs"));
1165 (void) zfs_error(hdl
, EZFS_BADVERSION
, msg
);
1169 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1170 "root pool can not have multiple vdevs"
1171 " or separate logs"));
1172 (void) zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
);
1176 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1177 "cache device must be a disk or disk slice"));
1178 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
1182 (void) zpool_standard_error(hdl
, errno
, msg
);
1190 zcmd_free_nvlists(&zc
);
1196 * Exports the pool from the system. The caller must ensure that there are no
1197 * mounted datasets in the pool.
1200 zpool_export_common(zpool_handle_t
*zhp
, boolean_t force
, boolean_t hardforce
)
1202 zfs_cmd_t zc
= { 0 };
1205 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
1206 "cannot export '%s'"), zhp
->zpool_name
);
1208 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1209 zc
.zc_cookie
= force
;
1210 zc
.zc_guid
= hardforce
;
1212 if (zfs_ioctl(zhp
->zpool_hdl
, ZFS_IOC_POOL_EXPORT
, &zc
) != 0) {
1215 zfs_error_aux(zhp
->zpool_hdl
, dgettext(TEXT_DOMAIN
,
1216 "use '-f' to override the following errors:\n"
1217 "'%s' has an active shared spare which could be"
1218 " used by other pools once '%s' is exported."),
1219 zhp
->zpool_name
, zhp
->zpool_name
);
1220 return (zfs_error(zhp
->zpool_hdl
, EZFS_ACTIVE_SPARE
,
1223 return (zpool_standard_error_fmt(zhp
->zpool_hdl
, errno
,
1232 zpool_export(zpool_handle_t
*zhp
, boolean_t force
)
1234 return (zpool_export_common(zhp
, force
, B_FALSE
));
1238 zpool_export_force(zpool_handle_t
*zhp
)
1240 return (zpool_export_common(zhp
, B_TRUE
, B_TRUE
));
1244 zpool_rewind_exclaim(libzfs_handle_t
*hdl
, const char *name
, boolean_t dryrun
,
1247 nvlist_t
*nv
= NULL
;
1253 if (!hdl
->libzfs_printerr
|| config
== NULL
)
1256 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0)
1259 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1261 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1263 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1264 strftime(timestr
, 128, 0, &t
) != 0) {
1266 (void) printf(dgettext(TEXT_DOMAIN
,
1267 "Would be able to return %s "
1268 "to its state as of %s.\n"),
1271 (void) printf(dgettext(TEXT_DOMAIN
,
1272 "Pool %s returned to its state as of %s.\n"),
1276 (void) printf(dgettext(TEXT_DOMAIN
,
1277 "%s approximately %lld "),
1278 dryrun
? "Would discard" : "Discarded",
1280 (void) printf(dgettext(TEXT_DOMAIN
,
1281 "minutes of transactions.\n"));
1282 } else if (loss
> 0) {
1283 (void) printf(dgettext(TEXT_DOMAIN
,
1284 "%s approximately %lld "),
1285 dryrun
? "Would discard" : "Discarded", loss
);
1286 (void) printf(dgettext(TEXT_DOMAIN
,
1287 "seconds of transactions.\n"));
1293 zpool_explain_recover(libzfs_handle_t
*hdl
, const char *name
, int reason
,
1296 nvlist_t
*nv
= NULL
;
1298 uint64_t edata
= UINT64_MAX
;
1303 if (!hdl
->libzfs_printerr
)
1307 (void) printf(dgettext(TEXT_DOMAIN
, "action: "));
1309 (void) printf(dgettext(TEXT_DOMAIN
, "\t"));
1311 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1312 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_LOAD_INFO
, &nv
) != 0 ||
1313 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_TIME
, &rewindto
) != 0)
1316 (void) nvlist_lookup_int64(nv
, ZPOOL_CONFIG_REWIND_TIME
, &loss
);
1317 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_LOAD_DATA_ERRORS
,
1320 (void) printf(dgettext(TEXT_DOMAIN
,
1321 "Recovery is possible, but will result in some data loss.\n"));
1323 if (localtime_r((time_t *)&rewindto
, &t
) != NULL
&&
1324 strftime(timestr
, 128, 0, &t
) != 0) {
1325 (void) printf(dgettext(TEXT_DOMAIN
,
1326 "\tReturning the pool to its state as of %s\n"
1327 "\tshould correct the problem. "),
1330 (void) printf(dgettext(TEXT_DOMAIN
,
1331 "\tReverting the pool to an earlier state "
1332 "should correct the problem.\n\t"));
1336 (void) printf(dgettext(TEXT_DOMAIN
,
1337 "Approximately %lld minutes of data\n"
1338 "\tmust be discarded, irreversibly. "), (loss
+ 30) / 60);
1339 } else if (loss
> 0) {
1340 (void) printf(dgettext(TEXT_DOMAIN
,
1341 "Approximately %lld seconds of data\n"
1342 "\tmust be discarded, irreversibly. "), loss
);
1344 if (edata
!= 0 && edata
!= UINT64_MAX
) {
1346 (void) printf(dgettext(TEXT_DOMAIN
,
1347 "After rewind, at least\n"
1348 "\tone persistent user-data error will remain. "));
1350 (void) printf(dgettext(TEXT_DOMAIN
,
1351 "After rewind, several\n"
1352 "\tpersistent user-data errors will remain. "));
1355 (void) printf(dgettext(TEXT_DOMAIN
,
1356 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1357 reason
>= 0 ? "clear" : "import", name
);
1359 (void) printf(dgettext(TEXT_DOMAIN
,
1360 "A scrub of the pool\n"
1361 "\tis strongly recommended after recovery.\n"));
1365 (void) printf(dgettext(TEXT_DOMAIN
,
1366 "Destroy and re-create the pool from\n\ta backup source.\n"));
1370 * zpool_import() is a contracted interface. Should be kept the same
1373 * Applications should use zpool_import_props() to import a pool with
1374 * new properties value to be set.
1377 zpool_import(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1380 nvlist_t
*props
= NULL
;
1383 if (altroot
!= NULL
) {
1384 if (nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) != 0) {
1385 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1386 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1390 if (nvlist_add_string(props
,
1391 zpool_prop_to_name(ZPOOL_PROP_ALTROOT
), altroot
) != 0 ||
1392 nvlist_add_string(props
,
1393 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE
), "none") != 0) {
1395 return (zfs_error_fmt(hdl
, EZFS_NOMEM
,
1396 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1401 ret
= zpool_import_props(hdl
, config
, newname
, props
,
1409 print_vdev_tree(libzfs_handle_t
*hdl
, const char *name
, nvlist_t
*nv
,
1415 uint64_t is_log
= 0;
1417 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_LOG
,
1421 (void) printf("\t%*s%s%s\n", indent
, "", name
,
1422 is_log
? " [log]" : "");
1424 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1425 &child
, &children
) != 0)
1428 for (c
= 0; c
< children
; c
++) {
1429 vname
= zpool_vdev_name(hdl
, NULL
, child
[c
], B_TRUE
);
1430 print_vdev_tree(hdl
, vname
, child
[c
], indent
+ 2);
1436 * Import the given pool using the known configuration and a list of
1437 * properties to be set. The configuration should have come from
1438 * zpool_find_import(). The 'newname' parameters control whether the pool
1439 * is imported with a different name.
1442 zpool_import_props(libzfs_handle_t
*hdl
, nvlist_t
*config
, const char *newname
,
1443 nvlist_t
*props
, int flags
)
1445 zfs_cmd_t zc
= { 0 };
1446 zpool_rewind_policy_t policy
;
1447 nvlist_t
*nv
= NULL
;
1448 nvlist_t
*nvinfo
= NULL
;
1449 nvlist_t
*missing
= NULL
;
1456 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
1459 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
1460 "cannot import pool '%s'"), origname
);
1462 if (newname
!= NULL
) {
1463 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
1464 return (zfs_error_fmt(hdl
, EZFS_INVALIDNAME
,
1465 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1467 thename
= (char *)newname
;
1474 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
1476 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
,
1479 if ((props
= zpool_valid_proplist(hdl
, origname
,
1480 props
, version
, flags
, errbuf
)) == NULL
) {
1482 } else if (zcmd_write_src_nvlist(hdl
, &zc
, props
) != 0) {
1488 (void) strlcpy(zc
.zc_name
, thename
, sizeof (zc
.zc_name
));
1490 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
1493 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0) {
1497 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zc
.zc_nvlist_conf_size
* 2) != 0) {
1502 zc
.zc_cookie
= flags
;
1503 while ((ret
= zfs_ioctl(hdl
, ZFS_IOC_POOL_IMPORT
, &zc
)) != 0 &&
1505 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
1506 zcmd_free_nvlists(&zc
);
1513 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nv
);
1514 zpool_get_rewind_policy(config
, &policy
);
1520 * Dry-run failed, but we print out what success
1521 * looks like if we found a best txg
1523 if (policy
.zrp_request
& ZPOOL_TRY_REWIND
) {
1524 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1530 if (newname
== NULL
)
1531 (void) snprintf(desc
, sizeof (desc
),
1532 dgettext(TEXT_DOMAIN
, "cannot import '%s'"),
1535 (void) snprintf(desc
, sizeof (desc
),
1536 dgettext(TEXT_DOMAIN
, "cannot import '%s' as '%s'"),
1542 * Unsupported version.
1544 (void) zfs_error(hdl
, EZFS_BADVERSION
, desc
);
1548 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, desc
);
1552 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
1553 "one or more devices is read only"));
1554 (void) zfs_error(hdl
, EZFS_BADDEV
, desc
);
1558 if (nv
&& nvlist_lookup_nvlist(nv
,
1559 ZPOOL_CONFIG_LOAD_INFO
, &nvinfo
) == 0 &&
1560 nvlist_lookup_nvlist(nvinfo
,
1561 ZPOOL_CONFIG_MISSING_DEVICES
, &missing
) == 0) {
1562 (void) printf(dgettext(TEXT_DOMAIN
,
1563 "The devices below are missing, use "
1564 "'-m' to import the pool anyway:\n"));
1565 print_vdev_tree(hdl
, NULL
, missing
, 2);
1566 (void) printf("\n");
1568 (void) zpool_standard_error(hdl
, error
, desc
);
1572 (void) zpool_standard_error(hdl
, error
, desc
);
1576 (void) zpool_standard_error(hdl
, error
, desc
);
1577 zpool_explain_recover(hdl
,
1578 newname
? origname
: thename
, -error
, nv
);
1585 zpool_handle_t
*zhp
;
1588 * This should never fail, but play it safe anyway.
1590 if (zpool_open_silent(hdl
, thename
, &zhp
) != 0)
1592 else if (zhp
!= NULL
)
1594 if (policy
.zrp_request
&
1595 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
1596 zpool_rewind_exclaim(hdl
, newname
? origname
: thename
,
1597 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0), nv
);
1603 zcmd_free_nvlists(&zc
);
1613 zpool_scan(zpool_handle_t
*zhp
, pool_scan_func_t func
)
1615 zfs_cmd_t zc
= { 0 };
1617 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
1619 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
1620 zc
.zc_cookie
= func
;
1622 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_SCAN
, &zc
) == 0 ||
1623 (errno
== ENOENT
&& func
!= POOL_SCAN_NONE
))
1626 if (func
== POOL_SCAN_SCRUB
) {
1627 (void) snprintf(msg
, sizeof (msg
),
1628 dgettext(TEXT_DOMAIN
, "cannot scrub %s"), zc
.zc_name
);
1629 } else if (func
== POOL_SCAN_NONE
) {
1630 (void) snprintf(msg
, sizeof (msg
),
1631 dgettext(TEXT_DOMAIN
, "cannot cancel scrubbing %s"),
1634 assert(!"unexpected result");
1637 if (errno
== EBUSY
) {
1639 pool_scan_stat_t
*ps
= NULL
;
1642 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
1643 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
1644 (void) nvlist_lookup_uint64_array(nvroot
,
1645 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t **)&ps
, &psc
);
1646 if (ps
&& ps
->pss_func
== POOL_SCAN_SCRUB
)
1647 return (zfs_error(hdl
, EZFS_SCRUBBING
, msg
));
1649 return (zfs_error(hdl
, EZFS_RESILVERING
, msg
));
1650 } else if (errno
== ENOENT
) {
1651 return (zfs_error(hdl
, EZFS_NO_SCRUB
, msg
));
1653 return (zpool_standard_error(hdl
, errno
, msg
));
1658 * This provides a very minimal check whether a given string is likely a
1659 * c#t#d# style string. Users of this are expected to do their own
1660 * verification of the s# part.
1662 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1665 * More elaborate version for ones which may start with "/dev/dsk/"
1669 ctd_check_path(char *str
) {
1671 * If it starts with a slash, check the last component.
1673 if (str
&& str
[0] == '/') {
1674 char *tmp
= strrchr(str
, '/');
1677 * If it ends in "/old", check the second-to-last
1678 * component of the string instead.
1680 if (tmp
!= str
&& strcmp(tmp
, "/old") == 0) {
1681 for (tmp
--; *tmp
!= '/'; tmp
--)
1686 return (CTD_CHECK(str
));
1690 * Find a vdev that matches the search criteria specified. We use the
1691 * the nvpair name to determine how we should look for the device.
1692 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1693 * spare; but FALSE if its an INUSE spare.
1696 vdev_to_nvlist_iter(nvlist_t
*nv
, nvlist_t
*search
, boolean_t
*avail_spare
,
1697 boolean_t
*l2cache
, boolean_t
*log
)
1704 nvpair_t
*pair
= nvlist_next_nvpair(search
, NULL
);
1706 /* Nothing to look for */
1707 if (search
== NULL
|| pair
== NULL
)
1710 /* Obtain the key we will use to search */
1711 srchkey
= nvpair_name(pair
);
1713 switch (nvpair_type(pair
)) {
1714 case DATA_TYPE_UINT64
:
1715 if (strcmp(srchkey
, ZPOOL_CONFIG_GUID
) == 0) {
1716 uint64_t srchval
, theguid
;
1718 verify(nvpair_value_uint64(pair
, &srchval
) == 0);
1719 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
1721 if (theguid
== srchval
)
1726 case DATA_TYPE_STRING
: {
1727 char *srchval
, *val
;
1729 verify(nvpair_value_string(pair
, &srchval
) == 0);
1730 if (nvlist_lookup_string(nv
, srchkey
, &val
) != 0)
1734 * Search for the requested value. Special cases:
1736 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1737 * "s0" or "s0/old". The "s0" part is hidden from the user,
1738 * but included in the string, so this matches around it.
1739 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1741 * Otherwise, all other searches are simple string compares.
1743 if (strcmp(srchkey
, ZPOOL_CONFIG_PATH
) == 0 &&
1744 ctd_check_path(val
)) {
1745 uint64_t wholedisk
= 0;
1747 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
1750 int slen
= strlen(srchval
);
1751 int vlen
= strlen(val
);
1753 if (slen
!= vlen
- 2)
1757 * make_leaf_vdev() should only set
1758 * wholedisk for ZPOOL_CONFIG_PATHs which
1759 * will include "/dev/dsk/", giving plenty of
1760 * room for the indices used next.
1765 * strings identical except trailing "s0"
1767 if (strcmp(&val
[vlen
- 2], "s0") == 0 &&
1768 strncmp(srchval
, val
, slen
) == 0)
1772 * strings identical except trailing "s0/old"
1774 if (strcmp(&val
[vlen
- 6], "s0/old") == 0 &&
1775 strcmp(&srchval
[slen
- 4], "/old") == 0 &&
1776 strncmp(srchval
, val
, slen
- 4) == 0)
1781 } else if (strcmp(srchkey
, ZPOOL_CONFIG_TYPE
) == 0 && val
) {
1782 char *type
, *idx
, *end
, *p
;
1783 uint64_t id
, vdev_id
;
1786 * Determine our vdev type, keeping in mind
1787 * that the srchval is composed of a type and
1788 * vdev id pair (i.e. mirror-4).
1790 if ((type
= strdup(srchval
)) == NULL
)
1793 if ((p
= strrchr(type
, '-')) == NULL
) {
1801 * If the types don't match then keep looking.
1803 if (strncmp(val
, type
, strlen(val
)) != 0) {
1808 verify(strncmp(type
, VDEV_TYPE_RAIDZ
,
1809 strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
1810 strncmp(type
, VDEV_TYPE_MIRROR
,
1811 strlen(VDEV_TYPE_MIRROR
)) == 0);
1812 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
1816 vdev_id
= strtoull(idx
, &end
, 10);
1823 * Now verify that we have the correct vdev id.
1832 if (strcmp(srchval
, val
) == 0)
1841 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
1842 &child
, &children
) != 0)
1845 for (c
= 0; c
< children
; c
++) {
1846 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
1847 avail_spare
, l2cache
, NULL
)) != NULL
) {
1849 * The 'is_log' value is only set for the toplevel
1850 * vdev, not the leaf vdevs. So we always lookup the
1851 * log device from the root of the vdev tree (where
1852 * 'log' is non-NULL).
1855 nvlist_lookup_uint64(child
[c
],
1856 ZPOOL_CONFIG_IS_LOG
, &is_log
) == 0 &&
1864 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_SPARES
,
1865 &child
, &children
) == 0) {
1866 for (c
= 0; c
< children
; c
++) {
1867 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
1868 avail_spare
, l2cache
, NULL
)) != NULL
) {
1869 *avail_spare
= B_TRUE
;
1875 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_L2CACHE
,
1876 &child
, &children
) == 0) {
1877 for (c
= 0; c
< children
; c
++) {
1878 if ((ret
= vdev_to_nvlist_iter(child
[c
], search
,
1879 avail_spare
, l2cache
, NULL
)) != NULL
) {
1890 * Given a physical path (minus the "/devices" prefix), find the
1894 zpool_find_vdev_by_physpath(zpool_handle_t
*zhp
, const char *ppath
,
1895 boolean_t
*avail_spare
, boolean_t
*l2cache
, boolean_t
*log
)
1897 nvlist_t
*search
, *nvroot
, *ret
;
1899 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1900 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PHYS_PATH
, ppath
) == 0);
1902 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1905 *avail_spare
= B_FALSE
;
1909 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
1910 nvlist_free(search
);
1916 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1919 zpool_vdev_is_interior(const char *name
)
1921 if (strncmp(name
, VDEV_TYPE_RAIDZ
, strlen(VDEV_TYPE_RAIDZ
)) == 0 ||
1922 strncmp(name
, VDEV_TYPE_MIRROR
, strlen(VDEV_TYPE_MIRROR
)) == 0)
1928 zpool_find_vdev(zpool_handle_t
*zhp
, const char *path
, boolean_t
*avail_spare
,
1929 boolean_t
*l2cache
, boolean_t
*log
)
1931 char buf
[MAXPATHLEN
];
1933 nvlist_t
*nvroot
, *search
, *ret
;
1936 verify(nvlist_alloc(&search
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1938 guid
= strtoull(path
, &end
, 10);
1939 if (guid
!= 0 && *end
== '\0') {
1940 verify(nvlist_add_uint64(search
, ZPOOL_CONFIG_GUID
, guid
) == 0);
1941 } else if (zpool_vdev_is_interior(path
)) {
1942 verify(nvlist_add_string(search
, ZPOOL_CONFIG_TYPE
, path
) == 0);
1943 } else if (path
[0] != '/') {
1944 (void) snprintf(buf
, sizeof (buf
), "%s%s", "/dev/dsk/", path
);
1945 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, buf
) == 0);
1947 verify(nvlist_add_string(search
, ZPOOL_CONFIG_PATH
, path
) == 0);
1950 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
1953 *avail_spare
= B_FALSE
;
1957 ret
= vdev_to_nvlist_iter(nvroot
, search
, avail_spare
, l2cache
, log
);
1958 nvlist_free(search
);
1964 vdev_online(nvlist_t
*nv
)
1968 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, &ival
) == 0 ||
1969 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_FAULTED
, &ival
) == 0 ||
1970 nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_REMOVED
, &ival
) == 0)
1977 * Helper function for zpool_get_physpaths().
1980 vdev_get_one_physpath(nvlist_t
*config
, char *physpath
, size_t physpath_size
,
1981 size_t *bytes_written
)
1983 size_t bytes_left
, pos
, rsz
;
1987 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PHYS_PATH
,
1989 return (EZFS_NODEVICE
);
1991 pos
= *bytes_written
;
1992 bytes_left
= physpath_size
- pos
;
1993 format
= (pos
== 0) ? "%s" : " %s";
1995 rsz
= snprintf(physpath
+ pos
, bytes_left
, format
, tmppath
);
1996 *bytes_written
+= rsz
;
1998 if (rsz
>= bytes_left
) {
1999 /* if physpath was not copied properly, clear it */
2000 if (bytes_left
!= 0) {
2003 return (EZFS_NOSPC
);
2009 vdev_get_physpaths(nvlist_t
*nv
, char *physpath
, size_t phypath_size
,
2010 size_t *rsz
, boolean_t is_spare
)
2015 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
2016 return (EZFS_INVALCONFIG
);
2018 if (strcmp(type
, VDEV_TYPE_DISK
) == 0) {
2020 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2021 * For a spare vdev, we only want to boot from the active
2026 (void) nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
,
2029 return (EZFS_INVALCONFIG
);
2032 if (vdev_online(nv
)) {
2033 if ((ret
= vdev_get_one_physpath(nv
, physpath
,
2034 phypath_size
, rsz
)) != 0)
2037 } else if (strcmp(type
, VDEV_TYPE_MIRROR
) == 0 ||
2038 strcmp(type
, VDEV_TYPE_REPLACING
) == 0 ||
2039 (is_spare
= (strcmp(type
, VDEV_TYPE_SPARE
) == 0))) {
2044 if (nvlist_lookup_nvlist_array(nv
,
2045 ZPOOL_CONFIG_CHILDREN
, &child
, &count
) != 0)
2046 return (EZFS_INVALCONFIG
);
2048 for (i
= 0; i
< count
; i
++) {
2049 ret
= vdev_get_physpaths(child
[i
], physpath
,
2050 phypath_size
, rsz
, is_spare
);
2051 if (ret
== EZFS_NOSPC
)
2056 return (EZFS_POOL_INVALARG
);
2060 * Get phys_path for a root pool config.
2061 * Return 0 on success; non-zero on failure.
2064 zpool_get_config_physpath(nvlist_t
*config
, char *physpath
, size_t phypath_size
)
2067 nvlist_t
*vdev_root
;
2074 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
2076 return (EZFS_INVALCONFIG
);
2078 if (nvlist_lookup_string(vdev_root
, ZPOOL_CONFIG_TYPE
, &type
) != 0 ||
2079 nvlist_lookup_nvlist_array(vdev_root
, ZPOOL_CONFIG_CHILDREN
,
2080 &child
, &count
) != 0)
2081 return (EZFS_INVALCONFIG
);
2084 * root pool can not have EFI labeled disks and can only have
2085 * a single top-level vdev.
2087 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0 || count
!= 1 ||
2088 pool_uses_efi(vdev_root
))
2089 return (EZFS_POOL_INVALARG
);
2091 (void) vdev_get_physpaths(child
[0], physpath
, phypath_size
, &rsz
,
2094 /* No online devices */
2096 return (EZFS_NODEVICE
);
2102 * Get phys_path for a root pool
2103 * Return 0 on success; non-zero on failure.
2106 zpool_get_physpath(zpool_handle_t
*zhp
, char *physpath
, size_t phypath_size
)
2108 return (zpool_get_config_physpath(zhp
->zpool_config
, physpath
,
2113 * If the device has being dynamically expanded then we need to relabel
2114 * the disk to use the new unallocated space.
2117 zpool_relabel_disk(libzfs_handle_t
*hdl
, const char *name
)
2119 char path
[MAXPATHLEN
];
2122 int (*_efi_use_whole_disk
)(int);
2124 if ((_efi_use_whole_disk
= (int (*)(int))dlsym(RTLD_DEFAULT
,
2125 "efi_use_whole_disk")) == NULL
)
2128 (void) snprintf(path
, sizeof (path
), "%s/%s", RDISK_ROOT
, name
);
2130 if ((fd
= open(path
, O_RDWR
| O_NDELAY
)) < 0) {
2131 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2132 "relabel '%s': unable to open device"), name
);
2133 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
2137 * It's possible that we might encounter an error if the device
2138 * does not have any unallocated space left. If so, we simply
2139 * ignore that error and continue on.
2141 error
= _efi_use_whole_disk(fd
);
2143 if (error
&& error
!= VT_ENOSPC
) {
2144 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "cannot "
2145 "relabel '%s': unable to read disk capacity"), name
);
2146 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
2152 * Bring the specified vdev online. The 'flags' parameter is a set of the
2153 * ZFS_ONLINE_* flags.
2156 zpool_vdev_online(zpool_handle_t
*zhp
, const char *path
, int flags
,
2157 vdev_state_t
*newstate
)
2159 zfs_cmd_t zc
= { 0 };
2162 boolean_t avail_spare
, l2cache
, islog
;
2163 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2165 if (flags
& ZFS_ONLINE_EXPAND
) {
2166 (void) snprintf(msg
, sizeof (msg
),
2167 dgettext(TEXT_DOMAIN
, "cannot expand %s"), path
);
2169 (void) snprintf(msg
, sizeof (msg
),
2170 dgettext(TEXT_DOMAIN
, "cannot online %s"), path
);
2173 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2174 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2176 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2178 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2181 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2183 if (flags
& ZFS_ONLINE_EXPAND
||
2184 zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
2185 char *pathname
= NULL
;
2186 uint64_t wholedisk
= 0;
2188 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
2190 verify(nvlist_lookup_string(tgt
, ZPOOL_CONFIG_PATH
,
2194 * XXX - L2ARC 1.0 devices can't support expansion.
2197 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2198 "cannot expand cache devices"));
2199 return (zfs_error(hdl
, EZFS_VDEVNOTSUP
, msg
));
2203 pathname
+= strlen(DISK_ROOT
) + 1;
2204 (void) zpool_relabel_disk(hdl
, pathname
);
2208 zc
.zc_cookie
= VDEV_STATE_ONLINE
;
2211 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) != 0) {
2212 if (errno
== EINVAL
) {
2213 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "was split "
2214 "from this pool into a new one. Use '%s' "
2215 "instead"), "zpool detach");
2216 return (zfs_error(hdl
, EZFS_POSTSPLIT_ONLINE
, msg
));
2218 return (zpool_standard_error(hdl
, errno
, msg
));
2221 *newstate
= zc
.zc_cookie
;
2226 * Take the specified vdev offline
2229 zpool_vdev_offline(zpool_handle_t
*zhp
, const char *path
, boolean_t istmp
)
2231 zfs_cmd_t zc
= { 0 };
2234 boolean_t avail_spare
, l2cache
;
2235 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2237 (void) snprintf(msg
, sizeof (msg
),
2238 dgettext(TEXT_DOMAIN
, "cannot offline %s"), path
);
2240 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2241 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2243 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2245 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2248 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2250 zc
.zc_cookie
= VDEV_STATE_OFFLINE
;
2251 zc
.zc_obj
= istmp
? ZFS_OFFLINE_TEMPORARY
: 0;
2253 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2260 * There are no other replicas of this device.
2262 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2266 * The log device has unplayed logs
2268 return (zfs_error(hdl
, EZFS_UNPLAYED_LOGS
, msg
));
2271 return (zpool_standard_error(hdl
, errno
, msg
));
2276 * Mark the given vdev faulted.
2279 zpool_vdev_fault(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2281 zfs_cmd_t zc
= { 0 };
2283 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2285 (void) snprintf(msg
, sizeof (msg
),
2286 dgettext(TEXT_DOMAIN
, "cannot fault %llu"), guid
);
2288 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2290 zc
.zc_cookie
= VDEV_STATE_FAULTED
;
2293 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2300 * There are no other replicas of this device.
2302 return (zfs_error(hdl
, EZFS_NOREPLICAS
, msg
));
2305 return (zpool_standard_error(hdl
, errno
, msg
));
2311 * Mark the given vdev degraded.
2314 zpool_vdev_degrade(zpool_handle_t
*zhp
, uint64_t guid
, vdev_aux_t aux
)
2316 zfs_cmd_t zc
= { 0 };
2318 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2320 (void) snprintf(msg
, sizeof (msg
),
2321 dgettext(TEXT_DOMAIN
, "cannot degrade %llu"), guid
);
2323 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2325 zc
.zc_cookie
= VDEV_STATE_DEGRADED
;
2328 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_VDEV_SET_STATE
, &zc
) == 0)
2331 return (zpool_standard_error(hdl
, errno
, msg
));
2335 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2339 is_replacing_spare(nvlist_t
*search
, nvlist_t
*tgt
, int which
)
2345 if (nvlist_lookup_nvlist_array(search
, ZPOOL_CONFIG_CHILDREN
, &child
,
2347 verify(nvlist_lookup_string(search
, ZPOOL_CONFIG_TYPE
,
2350 if (strcmp(type
, VDEV_TYPE_SPARE
) == 0 &&
2351 children
== 2 && child
[which
] == tgt
)
2354 for (c
= 0; c
< children
; c
++)
2355 if (is_replacing_spare(child
[c
], tgt
, which
))
2363 * Attach new_disk (fully described by nvroot) to old_disk.
2364 * If 'replacing' is specified, the new disk will replace the old one.
2367 zpool_vdev_attach(zpool_handle_t
*zhp
,
2368 const char *old_disk
, const char *new_disk
, nvlist_t
*nvroot
, int replacing
)
2370 zfs_cmd_t zc
= { 0 };
2374 boolean_t avail_spare
, l2cache
, islog
;
2379 nvlist_t
*config_root
;
2380 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2381 boolean_t rootpool
= pool_is_bootable(zhp
);
2384 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2385 "cannot replace %s with %s"), old_disk
, new_disk
);
2387 (void) snprintf(msg
, sizeof (msg
), dgettext(TEXT_DOMAIN
,
2388 "cannot attach %s to %s"), new_disk
, old_disk
);
2391 * If this is a root pool, make sure that we're not attaching an
2392 * EFI labeled device.
2394 if (rootpool
&& pool_uses_efi(nvroot
)) {
2395 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2396 "EFI labeled devices are not supported on root pools."));
2397 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, msg
));
2400 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2401 if ((tgt
= zpool_find_vdev(zhp
, old_disk
, &avail_spare
, &l2cache
,
2403 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2406 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2409 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2411 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2412 zc
.zc_cookie
= replacing
;
2414 if (nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
2415 &child
, &children
) != 0 || children
!= 1) {
2416 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2417 "new device must be a single disk"));
2418 return (zfs_error(hdl
, EZFS_INVALCONFIG
, msg
));
2421 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
2422 ZPOOL_CONFIG_VDEV_TREE
, &config_root
) == 0);
2424 if ((newname
= zpool_vdev_name(NULL
, NULL
, child
[0], B_FALSE
)) == NULL
)
2428 * If the target is a hot spare that has been swapped in, we can only
2429 * replace it with another hot spare.
2432 nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_IS_SPARE
, &val
) == 0 &&
2433 (zpool_find_vdev(zhp
, newname
, &avail_spare
, &l2cache
,
2434 NULL
) == NULL
|| !avail_spare
) &&
2435 is_replacing_spare(config_root
, tgt
, 1)) {
2436 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2437 "can only be replaced by another hot spare"));
2439 return (zfs_error(hdl
, EZFS_BADTARGET
, msg
));
2444 if (zcmd_write_conf_nvlist(hdl
, &zc
, nvroot
) != 0)
2447 ret
= zfs_ioctl(hdl
, ZFS_IOC_VDEV_ATTACH
, &zc
);
2449 zcmd_free_nvlists(&zc
);
2454 * XXX need a better way to prevent user from
2455 * booting up a half-baked vdev.
2457 (void) fprintf(stderr
, dgettext(TEXT_DOMAIN
, "Make "
2458 "sure to wait until resilver is done "
2459 "before rebooting.\n"));
2467 * Can't attach to or replace this type of vdev.
2470 uint64_t version
= zpool_get_prop_int(zhp
,
2471 ZPOOL_PROP_VERSION
, NULL
);
2474 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2475 "cannot replace a log with a spare"));
2476 else if (version
>= SPA_VERSION_MULTI_REPLACE
)
2477 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2478 "already in replacing/spare config; wait "
2479 "for completion or use 'zpool detach'"));
2481 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2482 "cannot replace a replacing device"));
2484 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2485 "can only attach to mirrors and top-level "
2488 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2493 * The new device must be a single disk.
2495 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2496 "new device must be a single disk"));
2497 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2501 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "%s is busy"),
2503 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2508 * The new device is too small.
2510 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2511 "device is too small"));
2512 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2517 * The new device has a different alignment requirement.
2519 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2520 "devices have different sector alignment"));
2521 (void) zfs_error(hdl
, EZFS_BADDEV
, msg
);
2526 * The resulting top-level vdev spec won't fit in the label.
2528 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, msg
);
2532 (void) zpool_standard_error(hdl
, errno
, msg
);
2539 * Detach the specified device.
2542 zpool_vdev_detach(zpool_handle_t
*zhp
, const char *path
)
2544 zfs_cmd_t zc
= { 0 };
2547 boolean_t avail_spare
, l2cache
;
2548 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2550 (void) snprintf(msg
, sizeof (msg
),
2551 dgettext(TEXT_DOMAIN
, "cannot detach %s"), path
);
2553 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2554 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2556 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2559 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2562 return (zfs_error(hdl
, EZFS_ISL2CACHE
, msg
));
2564 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2566 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_DETACH
, &zc
) == 0)
2573 * Can't detach from this type of vdev.
2575 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "only "
2576 "applicable to mirror and replacing vdevs"));
2577 (void) zfs_error(hdl
, EZFS_BADTARGET
, msg
);
2582 * There are no other replicas of this device.
2584 (void) zfs_error(hdl
, EZFS_NOREPLICAS
, msg
);
2588 (void) zpool_standard_error(hdl
, errno
, msg
);
2595 * Find a mirror vdev in the source nvlist.
2597 * The mchild array contains a list of disks in one of the top-level mirrors
2598 * of the source pool. The schild array contains a list of disks that the
2599 * user specified on the command line. We loop over the mchild array to
2600 * see if any entry in the schild array matches.
2602 * If a disk in the mchild array is found in the schild array, we return
2603 * the index of that entry. Otherwise we return -1.
2606 find_vdev_entry(zpool_handle_t
*zhp
, nvlist_t
**mchild
, uint_t mchildren
,
2607 nvlist_t
**schild
, uint_t schildren
)
2611 for (mc
= 0; mc
< mchildren
; mc
++) {
2613 char *mpath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2614 mchild
[mc
], B_FALSE
);
2616 for (sc
= 0; sc
< schildren
; sc
++) {
2617 char *spath
= zpool_vdev_name(zhp
->zpool_hdl
, zhp
,
2618 schild
[sc
], B_FALSE
);
2619 boolean_t result
= (strcmp(mpath
, spath
) == 0);
2635 * Split a mirror pool. If newroot points to null, then a new nvlist
2636 * is generated and it is the responsibility of the caller to free it.
2639 zpool_vdev_split(zpool_handle_t
*zhp
, char *newname
, nvlist_t
**newroot
,
2640 nvlist_t
*props
, splitflags_t flags
)
2642 zfs_cmd_t zc
= { 0 };
2644 nvlist_t
*tree
, *config
, **child
, **newchild
, *newconfig
= NULL
;
2645 nvlist_t
**varray
= NULL
, *zc_props
= NULL
;
2646 uint_t c
, children
, newchildren
, lastlog
= 0, vcount
, found
= 0;
2647 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2649 boolean_t freelist
= B_FALSE
, memory_err
= B_TRUE
;
2652 (void) snprintf(msg
, sizeof (msg
),
2653 dgettext(TEXT_DOMAIN
, "Unable to split %s"), zhp
->zpool_name
);
2655 if (!zpool_name_valid(hdl
, B_FALSE
, newname
))
2656 return (zfs_error(hdl
, EZFS_INVALIDNAME
, msg
));
2658 if ((config
= zpool_get_config(zhp
, NULL
)) == NULL
) {
2659 (void) fprintf(stderr
, gettext("Internal error: unable to "
2660 "retrieve pool configuration\n"));
2664 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, &tree
)
2666 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_VERSION
, &vers
) == 0);
2669 prop_flags_t flags
= { .create
= B_FALSE
, .import
= B_TRUE
};
2670 if ((zc_props
= zpool_valid_proplist(hdl
, zhp
->zpool_name
,
2671 props
, vers
, flags
, msg
)) == NULL
)
2675 if (nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2677 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2678 "Source pool is missing vdev tree"));
2680 nvlist_free(zc_props
);
2684 varray
= zfs_alloc(hdl
, children
* sizeof (nvlist_t
*));
2687 if (*newroot
== NULL
||
2688 nvlist_lookup_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
,
2689 &newchild
, &newchildren
) != 0)
2692 for (c
= 0; c
< children
; c
++) {
2693 uint64_t is_log
= B_FALSE
, is_hole
= B_FALSE
;
2695 nvlist_t
**mchild
, *vdev
;
2700 * Unlike cache & spares, slogs are stored in the
2701 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2703 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
2705 (void) nvlist_lookup_uint64(child
[c
], ZPOOL_CONFIG_IS_HOLE
,
2707 if (is_log
|| is_hole
) {
2709 * Create a hole vdev and put it in the config.
2711 if (nvlist_alloc(&vdev
, NV_UNIQUE_NAME
, 0) != 0)
2713 if (nvlist_add_string(vdev
, ZPOOL_CONFIG_TYPE
,
2714 VDEV_TYPE_HOLE
) != 0)
2716 if (nvlist_add_uint64(vdev
, ZPOOL_CONFIG_IS_HOLE
,
2721 varray
[vcount
++] = vdev
;
2725 verify(nvlist_lookup_string(child
[c
], ZPOOL_CONFIG_TYPE
, &type
)
2727 if (strcmp(type
, VDEV_TYPE_MIRROR
) != 0) {
2728 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2729 "Source pool must be composed only of mirrors\n"));
2730 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2734 verify(nvlist_lookup_nvlist_array(child
[c
],
2735 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2737 /* find or add an entry for this top-level vdev */
2738 if (newchildren
> 0 &&
2739 (entry
= find_vdev_entry(zhp
, mchild
, mchildren
,
2740 newchild
, newchildren
)) >= 0) {
2741 /* We found a disk that the user specified. */
2742 vdev
= mchild
[entry
];
2745 /* User didn't specify a disk for this vdev. */
2746 vdev
= mchild
[mchildren
- 1];
2749 if (nvlist_dup(vdev
, &varray
[vcount
++], 0) != 0)
2753 /* did we find every disk the user specified? */
2754 if (found
!= newchildren
) {
2755 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
, "Device list must "
2756 "include at most one disk from each mirror"));
2757 retval
= zfs_error(hdl
, EZFS_INVALCONFIG
, msg
);
2761 /* Prepare the nvlist for populating. */
2762 if (*newroot
== NULL
) {
2763 if (nvlist_alloc(newroot
, NV_UNIQUE_NAME
, 0) != 0)
2766 if (nvlist_add_string(*newroot
, ZPOOL_CONFIG_TYPE
,
2767 VDEV_TYPE_ROOT
) != 0)
2770 verify(nvlist_remove_all(*newroot
, ZPOOL_CONFIG_CHILDREN
) == 0);
2773 /* Add all the children we found */
2774 if (nvlist_add_nvlist_array(*newroot
, ZPOOL_CONFIG_CHILDREN
, varray
,
2775 lastlog
== 0 ? vcount
: lastlog
) != 0)
2779 * If we're just doing a dry run, exit now with success.
2782 memory_err
= B_FALSE
;
2787 /* now build up the config list & call the ioctl */
2788 if (nvlist_alloc(&newconfig
, NV_UNIQUE_NAME
, 0) != 0)
2791 if (nvlist_add_nvlist(newconfig
,
2792 ZPOOL_CONFIG_VDEV_TREE
, *newroot
) != 0 ||
2793 nvlist_add_string(newconfig
,
2794 ZPOOL_CONFIG_POOL_NAME
, newname
) != 0 ||
2795 nvlist_add_uint64(newconfig
, ZPOOL_CONFIG_VERSION
, vers
) != 0)
2799 * The new pool is automatically part of the namespace unless we
2800 * explicitly export it.
2803 zc
.zc_cookie
= ZPOOL_EXPORT_AFTER_SPLIT
;
2804 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2805 (void) strlcpy(zc
.zc_string
, newname
, sizeof (zc
.zc_string
));
2806 if (zcmd_write_conf_nvlist(hdl
, &zc
, newconfig
) != 0)
2808 if (zc_props
!= NULL
&& zcmd_write_src_nvlist(hdl
, &zc
, zc_props
) != 0)
2811 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_SPLIT
, &zc
) != 0) {
2812 retval
= zpool_standard_error(hdl
, errno
, msg
);
2817 memory_err
= B_FALSE
;
2820 if (varray
!= NULL
) {
2823 for (v
= 0; v
< vcount
; v
++)
2824 nvlist_free(varray
[v
]);
2827 zcmd_free_nvlists(&zc
);
2829 nvlist_free(zc_props
);
2831 nvlist_free(newconfig
);
2833 nvlist_free(*newroot
);
2841 return (no_memory(hdl
));
2847 * Remove the given device. Currently, this is supported only for hot spares
2848 * and level 2 cache devices.
2851 zpool_vdev_remove(zpool_handle_t
*zhp
, const char *path
)
2853 zfs_cmd_t zc
= { 0 };
2856 boolean_t avail_spare
, l2cache
, islog
;
2857 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2860 (void) snprintf(msg
, sizeof (msg
),
2861 dgettext(TEXT_DOMAIN
, "cannot remove %s"), path
);
2863 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2864 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
, &l2cache
,
2866 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2868 * XXX - this should just go away.
2870 if (!avail_spare
&& !l2cache
&& !islog
) {
2871 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2872 "only inactive hot spares, cache, top-level, "
2873 "or log devices can be removed"));
2874 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2877 version
= zpool_get_prop_int(zhp
, ZPOOL_PROP_VERSION
, NULL
);
2878 if (islog
&& version
< SPA_VERSION_HOLES
) {
2879 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
2880 "pool must be upgrade to support log removal"));
2881 return (zfs_error(hdl
, EZFS_BADVERSION
, msg
));
2884 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
, &zc
.zc_guid
) == 0);
2886 if (zfs_ioctl(hdl
, ZFS_IOC_VDEV_REMOVE
, &zc
) == 0)
2889 return (zpool_standard_error(hdl
, errno
, msg
));
2893 * Clear the errors for the pool, or the particular device if specified.
2896 zpool_clear(zpool_handle_t
*zhp
, const char *path
, nvlist_t
*rewindnvl
)
2898 zfs_cmd_t zc
= { 0 };
2901 zpool_rewind_policy_t policy
;
2902 boolean_t avail_spare
, l2cache
;
2903 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2904 nvlist_t
*nvi
= NULL
;
2908 (void) snprintf(msg
, sizeof (msg
),
2909 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
2912 (void) snprintf(msg
, sizeof (msg
),
2913 dgettext(TEXT_DOMAIN
, "cannot clear errors for %s"),
2916 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2918 if ((tgt
= zpool_find_vdev(zhp
, path
, &avail_spare
,
2919 &l2cache
, NULL
)) == 0)
2920 return (zfs_error(hdl
, EZFS_NODEVICE
, msg
));
2923 * Don't allow error clearing for hot spares. Do allow
2924 * error clearing for l2cache devices.
2927 return (zfs_error(hdl
, EZFS_ISSPARE
, msg
));
2929 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_GUID
,
2933 zpool_get_rewind_policy(rewindnvl
, &policy
);
2934 zc
.zc_cookie
= policy
.zrp_request
;
2936 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, zhp
->zpool_config_size
* 2) != 0)
2939 if (zcmd_write_src_nvlist(hdl
, &zc
, rewindnvl
) != 0)
2942 while ((error
= zfs_ioctl(hdl
, ZFS_IOC_CLEAR
, &zc
)) != 0 &&
2944 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
2945 zcmd_free_nvlists(&zc
);
2950 if (!error
|| ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) &&
2951 errno
!= EPERM
&& errno
!= EACCES
)) {
2952 if (policy
.zrp_request
&
2953 (ZPOOL_DO_REWIND
| ZPOOL_TRY_REWIND
)) {
2954 (void) zcmd_read_dst_nvlist(hdl
, &zc
, &nvi
);
2955 zpool_rewind_exclaim(hdl
, zc
.zc_name
,
2956 ((policy
.zrp_request
& ZPOOL_TRY_REWIND
) != 0),
2960 zcmd_free_nvlists(&zc
);
2964 zcmd_free_nvlists(&zc
);
2965 return (zpool_standard_error(hdl
, errno
, msg
));
2969 * Similar to zpool_clear(), but takes a GUID (used by fmd).
2972 zpool_vdev_clear(zpool_handle_t
*zhp
, uint64_t guid
)
2974 zfs_cmd_t zc
= { 0 };
2976 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
2978 (void) snprintf(msg
, sizeof (msg
),
2979 dgettext(TEXT_DOMAIN
, "cannot clear errors for %llx"),
2982 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
2984 zc
.zc_cookie
= ZPOOL_NO_REWIND
;
2986 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_CLEAR
, &zc
) == 0)
2989 return (zpool_standard_error(hdl
, errno
, msg
));
2993 * Change the GUID for a pool.
2996 zpool_reguid(zpool_handle_t
*zhp
)
2999 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3000 zfs_cmd_t zc
= { 0 };
3002 (void) snprintf(msg
, sizeof (msg
),
3003 dgettext(TEXT_DOMAIN
, "cannot reguid '%s'"), zhp
->zpool_name
);
3005 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3006 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_REGUID
, &zc
) == 0)
3009 return (zpool_standard_error(hdl
, errno
, msg
));
3013 * Convert from a devid string to a path.
3016 devid_to_path(char *devid_str
)
3021 devid_nmlist_t
*list
= NULL
;
3024 if (devid_str_decode(devid_str
, &devid
, &minor
) != 0)
3027 ret
= devid_deviceid_to_nmlist("/dev", devid
, minor
, &list
);
3029 devid_str_free(minor
);
3035 if ((path
= strdup(list
[0].devname
)) == NULL
)
3038 devid_free_nmlist(list
);
3044 * Convert from a path to a devid string.
3047 path_to_devid(const char *path
)
3053 if ((fd
= open(path
, O_RDONLY
)) < 0)
3058 if (devid_get(fd
, &devid
) == 0) {
3059 if (devid_get_minor_name(fd
, &minor
) == 0)
3060 ret
= devid_str_encode(devid
, minor
);
3062 devid_str_free(minor
);
3071 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3072 * ignore any failure here, since a common case is for an unprivileged user to
3073 * type 'zpool status', and we'll display the correct information anyway.
3076 set_path(zpool_handle_t
*zhp
, nvlist_t
*nv
, const char *path
)
3078 zfs_cmd_t zc
= { 0 };
3080 (void) strncpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3081 (void) strncpy(zc
.zc_value
, path
, sizeof (zc
.zc_value
));
3082 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3085 (void) ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_VDEV_SETPATH
, &zc
);
3089 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3090 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3091 * We also check if this is a whole disk, in which case we strip off the
3092 * trailing 's0' slice name.
3094 * This routine is also responsible for identifying when disks have been
3095 * reconfigured in a new location. The kernel will have opened the device by
3096 * devid, but the path will still refer to the old location. To catch this, we
3097 * first do a path -> devid translation (which is fast for the common case). If
3098 * the devid matches, we're done. If not, we do a reverse devid -> path
3099 * translation and issue the appropriate ioctl() to update the path of the vdev.
3100 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3104 zpool_vdev_name(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, nvlist_t
*nv
,
3113 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
,
3115 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
,
3117 (void) snprintf(buf
, sizeof (buf
), "%llu",
3118 (u_longlong_t
)value
);
3120 } else if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) == 0) {
3123 * If the device is dead (faulted, offline, etc) then don't
3124 * bother opening it. Otherwise we may be forcing the user to
3125 * open a misbehaving device, which can have undesirable
3128 if ((nvlist_lookup_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
3129 (uint64_t **)&vs
, &vsc
) != 0 ||
3130 vs
->vs_state
>= VDEV_STATE_DEGRADED
) &&
3132 nvlist_lookup_string(nv
, ZPOOL_CONFIG_DEVID
, &devid
) == 0) {
3134 * Determine if the current path is correct.
3136 char *newdevid
= path_to_devid(path
);
3138 if (newdevid
== NULL
||
3139 strcmp(devid
, newdevid
) != 0) {
3142 if ((newpath
= devid_to_path(devid
)) != NULL
) {
3144 * Update the path appropriately.
3146 set_path(zhp
, nv
, newpath
);
3147 if (nvlist_add_string(nv
,
3148 ZPOOL_CONFIG_PATH
, newpath
) == 0)
3149 verify(nvlist_lookup_string(nv
,
3157 devid_str_free(newdevid
);
3160 if (strncmp(path
, "/dev/dsk/", 9) == 0)
3163 if (nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
3164 &value
) == 0 && value
) {
3165 int pathlen
= strlen(path
);
3166 char *tmp
= zfs_strdup(hdl
, path
);
3169 * If it starts with c#, and ends with "s0", chop
3170 * the "s0" off, or if it ends with "s0/old", remove
3171 * the "s0" from the middle.
3173 if (CTD_CHECK(tmp
)) {
3174 if (strcmp(&tmp
[pathlen
- 2], "s0") == 0) {
3175 tmp
[pathlen
- 2] = '\0';
3176 } else if (pathlen
> 6 &&
3177 strcmp(&tmp
[pathlen
- 6], "s0/old") == 0) {
3178 (void) strcpy(&tmp
[pathlen
- 6],
3185 verify(nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &path
) == 0);
3188 * If it's a raidz device, we need to stick in the parity level.
3190 if (strcmp(path
, VDEV_TYPE_RAIDZ
) == 0) {
3191 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_NPARITY
,
3193 (void) snprintf(buf
, sizeof (buf
), "%s%llu", path
,
3194 (u_longlong_t
)value
);
3199 * We identify each top-level vdev by using a <type-id>
3200 * naming convention.
3205 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_ID
,
3207 (void) snprintf(buf
, sizeof (buf
), "%s-%llu", path
,
3213 return (zfs_strdup(hdl
, path
));
3217 zbookmark_compare(const void *a
, const void *b
)
3219 return (memcmp(a
, b
, sizeof (zbookmark_t
)));
3223 * Retrieve the persistent error log, uniquify the members, and return to the
3227 zpool_get_errlog(zpool_handle_t
*zhp
, nvlist_t
**nverrlistp
)
3229 zfs_cmd_t zc
= { 0 };
3231 zbookmark_t
*zb
= NULL
;
3235 * Retrieve the raw error list from the kernel. If the number of errors
3236 * has increased, allocate more space and continue until we get the
3239 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_ERRCOUNT
,
3243 if ((zc
.zc_nvlist_dst
= (uintptr_t)zfs_alloc(zhp
->zpool_hdl
,
3244 count
* sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3246 zc
.zc_nvlist_dst_size
= count
;
3247 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3249 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_ERROR_LOG
,
3251 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3252 if (errno
== ENOMEM
) {
3253 count
= zc
.zc_nvlist_dst_size
;
3254 if ((zc
.zc_nvlist_dst
= (uintptr_t)
3255 zfs_alloc(zhp
->zpool_hdl
, count
*
3256 sizeof (zbookmark_t
))) == (uintptr_t)NULL
)
3267 * Sort the resulting bookmarks. This is a little confusing due to the
3268 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3269 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3270 * _not_ copied as part of the process. So we point the start of our
3271 * array appropriate and decrement the total number of elements.
3273 zb
= ((zbookmark_t
*)(uintptr_t)zc
.zc_nvlist_dst
) +
3274 zc
.zc_nvlist_dst_size
;
3275 count
-= zc
.zc_nvlist_dst_size
;
3277 qsort(zb
, count
, sizeof (zbookmark_t
), zbookmark_compare
);
3279 verify(nvlist_alloc(nverrlistp
, 0, KM_SLEEP
) == 0);
3282 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3284 for (i
= 0; i
< count
; i
++) {
3287 /* ignoring zb_blkid and zb_level for now */
3288 if (i
> 0 && zb
[i
-1].zb_objset
== zb
[i
].zb_objset
&&
3289 zb
[i
-1].zb_object
== zb
[i
].zb_object
)
3292 if (nvlist_alloc(&nv
, NV_UNIQUE_NAME
, KM_SLEEP
) != 0)
3294 if (nvlist_add_uint64(nv
, ZPOOL_ERR_DATASET
,
3295 zb
[i
].zb_objset
) != 0) {
3299 if (nvlist_add_uint64(nv
, ZPOOL_ERR_OBJECT
,
3300 zb
[i
].zb_object
) != 0) {
3304 if (nvlist_add_nvlist(*nverrlistp
, "ejk", nv
) != 0) {
3311 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3315 free((void *)(uintptr_t)zc
.zc_nvlist_dst
);
3316 return (no_memory(zhp
->zpool_hdl
));
3320 * Upgrade a ZFS pool to the latest on-disk version.
3323 zpool_upgrade(zpool_handle_t
*zhp
, uint64_t new_version
)
3325 zfs_cmd_t zc
= { 0 };
3326 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3328 (void) strcpy(zc
.zc_name
, zhp
->zpool_name
);
3329 zc
.zc_cookie
= new_version
;
3331 if (zfs_ioctl(hdl
, ZFS_IOC_POOL_UPGRADE
, &zc
) != 0)
3332 return (zpool_standard_error_fmt(hdl
, errno
,
3333 dgettext(TEXT_DOMAIN
, "cannot upgrade '%s'"),
3339 zpool_set_history_str(const char *subcommand
, int argc
, char **argv
,
3344 (void) strlcpy(history_str
, subcommand
, HIS_MAX_RECORD_LEN
);
3345 for (i
= 1; i
< argc
; i
++) {
3346 if (strlen(history_str
) + 1 + strlen(argv
[i
]) >
3349 (void) strlcat(history_str
, " ", HIS_MAX_RECORD_LEN
);
3350 (void) strlcat(history_str
, argv
[i
], HIS_MAX_RECORD_LEN
);
3355 * Stage command history for logging.
3358 zpool_stage_history(libzfs_handle_t
*hdl
, const char *history_str
)
3360 if (history_str
== NULL
)
3363 if (strlen(history_str
) > HIS_MAX_RECORD_LEN
)
3366 if (hdl
->libzfs_log_str
!= NULL
)
3367 free(hdl
->libzfs_log_str
);
3369 if ((hdl
->libzfs_log_str
= strdup(history_str
)) == NULL
)
3370 return (no_memory(hdl
));
3376 * Perform ioctl to get some command history of a pool.
3378 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3379 * logical offset of the history buffer to start reading from.
3381 * Upon return, 'off' is the next logical offset to read from and
3382 * 'len' is the actual amount of bytes read into 'buf'.
3385 get_history(zpool_handle_t
*zhp
, char *buf
, uint64_t *off
, uint64_t *len
)
3387 zfs_cmd_t zc
= { 0 };
3388 libzfs_handle_t
*hdl
= zhp
->zpool_hdl
;
3390 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3392 zc
.zc_history
= (uint64_t)(uintptr_t)buf
;
3393 zc
.zc_history_len
= *len
;
3394 zc
.zc_history_offset
= *off
;
3396 if (ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_GET_HISTORY
, &zc
) != 0) {
3399 return (zfs_error_fmt(hdl
, EZFS_PERM
,
3400 dgettext(TEXT_DOMAIN
,
3401 "cannot show history for pool '%s'"),
3404 return (zfs_error_fmt(hdl
, EZFS_NOHISTORY
,
3405 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3406 "'%s'"), zhp
->zpool_name
));
3408 return (zfs_error_fmt(hdl
, EZFS_BADVERSION
,
3409 dgettext(TEXT_DOMAIN
, "cannot get history for pool "
3410 "'%s', pool must be upgraded"), zhp
->zpool_name
));
3412 return (zpool_standard_error_fmt(hdl
, errno
,
3413 dgettext(TEXT_DOMAIN
,
3414 "cannot get history for '%s'"), zhp
->zpool_name
));
3418 *len
= zc
.zc_history_len
;
3419 *off
= zc
.zc_history_offset
;
3425 * Process the buffer of nvlists, unpacking and storing each nvlist record
3426 * into 'records'. 'leftover' is set to the number of bytes that weren't
3427 * processed as there wasn't a complete record.
3430 zpool_history_unpack(char *buf
, uint64_t bytes_read
, uint64_t *leftover
,
3431 nvlist_t
***records
, uint_t
*numrecords
)
3437 while (bytes_read
> sizeof (reclen
)) {
3439 /* get length of packed record (stored as little endian) */
3440 for (i
= 0, reclen
= 0; i
< sizeof (reclen
); i
++)
3441 reclen
+= (uint64_t)(((uchar_t
*)buf
)[i
]) << (8*i
);
3443 if (bytes_read
< sizeof (reclen
) + reclen
)
3447 if (nvlist_unpack(buf
+ sizeof (reclen
), reclen
, &nv
, 0) != 0)
3449 bytes_read
-= sizeof (reclen
) + reclen
;
3450 buf
+= sizeof (reclen
) + reclen
;
3452 /* add record to nvlist array */
3454 if (ISP2(*numrecords
+ 1)) {
3455 *records
= realloc(*records
,
3456 *numrecords
* 2 * sizeof (nvlist_t
*));
3458 (*records
)[*numrecords
- 1] = nv
;
3461 *leftover
= bytes_read
;
3465 #define HIS_BUF_LEN (128*1024)
3468 * Retrieve the command history of a pool.
3471 zpool_get_history(zpool_handle_t
*zhp
, nvlist_t
**nvhisp
)
3473 char buf
[HIS_BUF_LEN
];
3475 nvlist_t
**records
= NULL
;
3476 uint_t numrecords
= 0;
3480 uint64_t bytes_read
= sizeof (buf
);
3483 if ((err
= get_history(zhp
, buf
, &off
, &bytes_read
)) != 0)
3486 /* if nothing else was read in, we're at EOF, just return */
3490 if ((err
= zpool_history_unpack(buf
, bytes_read
,
3491 &leftover
, &records
, &numrecords
)) != 0)
3499 verify(nvlist_alloc(nvhisp
, NV_UNIQUE_NAME
, 0) == 0);
3500 verify(nvlist_add_nvlist_array(*nvhisp
, ZPOOL_HIST_RECORD
,
3501 records
, numrecords
) == 0);
3503 for (i
= 0; i
< numrecords
; i
++)
3504 nvlist_free(records
[i
]);
3511 zpool_obj_to_path(zpool_handle_t
*zhp
, uint64_t dsobj
, uint64_t obj
,
3512 char *pathname
, size_t len
)
3514 zfs_cmd_t zc
= { 0 };
3515 boolean_t mounted
= B_FALSE
;
3516 char *mntpnt
= NULL
;
3517 char dsname
[MAXNAMELEN
];
3520 /* special case for the MOS */
3521 (void) snprintf(pathname
, len
, "<metadata>:<0x%llx>", obj
);
3525 /* get the dataset's name */
3526 (void) strlcpy(zc
.zc_name
, zhp
->zpool_name
, sizeof (zc
.zc_name
));
3528 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
,
3529 ZFS_IOC_DSOBJ_TO_DSNAME
, &zc
) != 0) {
3530 /* just write out a path of two object numbers */
3531 (void) snprintf(pathname
, len
, "<0x%llx>:<0x%llx>",
3535 (void) strlcpy(dsname
, zc
.zc_value
, sizeof (dsname
));
3537 /* find out if the dataset is mounted */
3538 mounted
= is_mounted(zhp
->zpool_hdl
, dsname
, &mntpnt
);
3540 /* get the corrupted object's path */
3541 (void) strlcpy(zc
.zc_name
, dsname
, sizeof (zc
.zc_name
));
3543 if (ioctl(zhp
->zpool_hdl
->libzfs_fd
, ZFS_IOC_OBJ_TO_PATH
,
3546 (void) snprintf(pathname
, len
, "%s%s", mntpnt
,
3549 (void) snprintf(pathname
, len
, "%s:%s",
3550 dsname
, zc
.zc_value
);
3553 (void) snprintf(pathname
, len
, "%s:<0x%llx>", dsname
, obj
);
3559 * Read the EFI label from the config, if a label does not exist then
3560 * pass back the error to the caller. If the caller has passed a non-NULL
3561 * diskaddr argument then we set it to the starting address of the EFI
3565 read_efi_label(nvlist_t
*config
, diskaddr_t
*sb
)
3569 char diskname
[MAXPATHLEN
];
3572 if (nvlist_lookup_string(config
, ZPOOL_CONFIG_PATH
, &path
) != 0)
3575 (void) snprintf(diskname
, sizeof (diskname
), "%s%s", RDISK_ROOT
,
3576 strrchr(path
, '/'));
3577 if ((fd
= open(diskname
, O_RDONLY
|O_NDELAY
)) >= 0) {
3578 struct dk_gpt
*vtoc
;
3580 if ((err
= efi_alloc_and_read(fd
, &vtoc
)) >= 0) {
3582 *sb
= vtoc
->efi_parts
[0].p_start
;
3591 * determine where a partition starts on a disk in the current
3595 find_start_block(nvlist_t
*config
)
3599 diskaddr_t sb
= MAXOFFSET_T
;
3602 if (nvlist_lookup_nvlist_array(config
,
3603 ZPOOL_CONFIG_CHILDREN
, &child
, &children
) != 0) {
3604 if (nvlist_lookup_uint64(config
,
3605 ZPOOL_CONFIG_WHOLE_DISK
,
3606 &wholedisk
) != 0 || !wholedisk
) {
3607 return (MAXOFFSET_T
);
3609 if (read_efi_label(config
, &sb
) < 0)
3614 for (c
= 0; c
< children
; c
++) {
3615 sb
= find_start_block(child
[c
]);
3616 if (sb
!= MAXOFFSET_T
) {
3620 return (MAXOFFSET_T
);
3624 * Label an individual disk. The name provided is the short name,
3625 * stripped of any leading /dev path.
3628 zpool_label_disk(libzfs_handle_t
*hdl
, zpool_handle_t
*zhp
, char *name
)
3630 char path
[MAXPATHLEN
];
3631 struct dk_gpt
*vtoc
;
3633 size_t resv
= EFI_MIN_RESV_SIZE
;
3634 uint64_t slice_size
;
3635 diskaddr_t start_block
;
3638 /* prepare an error message just in case */
3639 (void) snprintf(errbuf
, sizeof (errbuf
),
3640 dgettext(TEXT_DOMAIN
, "cannot label '%s'"), name
);
3645 if (pool_is_bootable(zhp
)) {
3646 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3647 "EFI labeled devices are not supported on root "
3649 return (zfs_error(hdl
, EZFS_POOL_NOTSUP
, errbuf
));
3652 verify(nvlist_lookup_nvlist(zhp
->zpool_config
,
3653 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
3655 if (zhp
->zpool_start_block
== 0)
3656 start_block
= find_start_block(nvroot
);
3658 start_block
= zhp
->zpool_start_block
;
3659 zhp
->zpool_start_block
= start_block
;
3662 start_block
= NEW_START_BLOCK
;
3665 (void) snprintf(path
, sizeof (path
), "%s/%s%s", RDISK_ROOT
, name
,
3668 if ((fd
= open(path
, O_RDWR
| O_NDELAY
)) < 0) {
3670 * This shouldn't happen. We've long since verified that this
3671 * is a valid device.
3674 dgettext(TEXT_DOMAIN
, "unable to open device"));
3675 return (zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
));
3678 if (efi_alloc_and_init(fd
, EFI_NUMPAR
, &vtoc
) != 0) {
3680 * The only way this can fail is if we run out of memory, or we
3681 * were unable to read the disk's capacity
3683 if (errno
== ENOMEM
)
3684 (void) no_memory(hdl
);
3687 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3688 "unable to read disk capacity"), name
);
3690 return (zfs_error(hdl
, EZFS_NOCAP
, errbuf
));
3693 slice_size
= vtoc
->efi_last_u_lba
+ 1;
3694 slice_size
-= EFI_MIN_RESV_SIZE
;
3695 if (start_block
== MAXOFFSET_T
)
3696 start_block
= NEW_START_BLOCK
;
3697 slice_size
-= start_block
;
3699 vtoc
->efi_parts
[0].p_start
= start_block
;
3700 vtoc
->efi_parts
[0].p_size
= slice_size
;
3703 * Why we use V_USR: V_BACKUP confuses users, and is considered
3704 * disposable by some EFI utilities (since EFI doesn't have a backup
3705 * slice). V_UNASSIGNED is supposed to be used only for zero size
3706 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3707 * etc. were all pretty specific. V_USR is as close to reality as we
3708 * can get, in the absence of V_OTHER.
3710 vtoc
->efi_parts
[0].p_tag
= V_USR
;
3711 (void) strcpy(vtoc
->efi_parts
[0].p_name
, "zfs");
3713 vtoc
->efi_parts
[8].p_start
= slice_size
+ start_block
;
3714 vtoc
->efi_parts
[8].p_size
= resv
;
3715 vtoc
->efi_parts
[8].p_tag
= V_RESERVED
;
3717 if (efi_write(fd
, vtoc
) != 0) {
3719 * Some block drivers (like pcata) may not support EFI
3720 * GPT labels. Print out a helpful error message dir-
3721 * ecting the user to manually label the disk and give
3727 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3728 "try using fdisk(1M) and then provide a specific slice"));
3729 return (zfs_error(hdl
, EZFS_LABELFAILED
, errbuf
));
3738 supported_dump_vdev_type(libzfs_handle_t
*hdl
, nvlist_t
*config
, char *errbuf
)
3744 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_TYPE
, &type
) == 0);
3745 if (strcmp(type
, VDEV_TYPE_RAIDZ
) == 0 ||
3746 strcmp(type
, VDEV_TYPE_FILE
) == 0 ||
3747 strcmp(type
, VDEV_TYPE_LOG
) == 0 ||
3748 strcmp(type
, VDEV_TYPE_HOLE
) == 0 ||
3749 strcmp(type
, VDEV_TYPE_MISSING
) == 0) {
3750 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3751 "vdev type '%s' is not supported"), type
);
3752 (void) zfs_error(hdl
, EZFS_VDEVNOTSUP
, errbuf
);
3755 if (nvlist_lookup_nvlist_array(config
, ZPOOL_CONFIG_CHILDREN
,
3756 &child
, &children
) == 0) {
3757 for (c
= 0; c
< children
; c
++) {
3758 if (!supported_dump_vdev_type(hdl
, child
[c
], errbuf
))
3766 * check if this zvol is allowable for use as a dump device; zero if
3767 * it is, > 0 if it isn't, < 0 if it isn't a zvol
3770 zvol_check_dump_config(char *arg
)
3772 zpool_handle_t
*zhp
= NULL
;
3773 nvlist_t
*config
, *nvroot
;
3777 libzfs_handle_t
*hdl
;
3779 char poolname
[ZPOOL_MAXNAMELEN
];
3780 int pathlen
= strlen(ZVOL_FULL_DEV_DIR
);
3783 if (strncmp(arg
, ZVOL_FULL_DEV_DIR
, pathlen
)) {
3787 (void) snprintf(errbuf
, sizeof (errbuf
), dgettext(TEXT_DOMAIN
,
3788 "dump is not supported on device '%s'"), arg
);
3790 if ((hdl
= libzfs_init()) == NULL
)
3792 libzfs_print_on_error(hdl
, B_TRUE
);
3794 volname
= arg
+ pathlen
;
3796 /* check the configuration of the pool */
3797 if ((p
= strchr(volname
, '/')) == NULL
) {
3798 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3799 "malformed dataset name"));
3800 (void) zfs_error(hdl
, EZFS_INVALIDNAME
, errbuf
);
3802 } else if (p
- volname
>= ZFS_MAXNAMELEN
) {
3803 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3804 "dataset name is too long"));
3805 (void) zfs_error(hdl
, EZFS_NAMETOOLONG
, errbuf
);
3808 (void) strncpy(poolname
, volname
, p
- volname
);
3809 poolname
[p
- volname
] = '\0';
3812 if ((zhp
= zpool_open(hdl
, poolname
)) == NULL
) {
3813 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3814 "could not open pool '%s'"), poolname
);
3815 (void) zfs_error(hdl
, EZFS_OPENFAILED
, errbuf
);
3818 config
= zpool_get_config(zhp
, NULL
);
3819 if (nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
3821 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3822 "could not obtain vdev configuration for '%s'"), poolname
);
3823 (void) zfs_error(hdl
, EZFS_INVALCONFIG
, errbuf
);
3827 verify(nvlist_lookup_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
3828 &top
, &toplevels
) == 0);
3829 if (toplevels
!= 1) {
3830 zfs_error_aux(hdl
, dgettext(TEXT_DOMAIN
,
3831 "'%s' has multiple top level vdevs"), poolname
);
3832 (void) zfs_error(hdl
, EZFS_DEVOVERFLOW
, errbuf
);
3836 if (!supported_dump_vdev_type(hdl
, top
[0], errbuf
)) {