6069 libdisasm: instrlen arch op should have a sane default
[illumos-gate.git] / usr / src / lib / libzfs / common / libzfs_pool.c
blob3c992951793d2ef5d63c31348ec1b0754d5d6964
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 #include <ctype.h>
30 #include <errno.h>
31 #include <devid.h>
32 #include <fcntl.h>
33 #include <libintl.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <libgen.h>
39 #include <sys/efi_partition.h>
40 #include <sys/vtoc.h>
41 #include <sys/zfs_ioctl.h>
42 #include <dlfcn.h>
44 #include "zfs_namecheck.h"
45 #include "zfs_prop.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48 #include "zfeature_common.h"
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52 #define DISK_ROOT "/dev/dsk"
53 #define RDISK_ROOT "/dev/rdsk"
54 #define BACKUP_SLICE "s2"
56 typedef struct prop_flags {
57 int create:1; /* Validate property on creation */
58 int import:1; /* Validate property on import */
59 } prop_flags_t;
62 * ====================================================================
63 * zpool property functions
64 * ====================================================================
67 static int
68 zpool_get_all_props(zpool_handle_t *zhp)
70 zfs_cmd_t zc = { 0 };
71 libzfs_handle_t *hdl = zhp->zpool_hdl;
73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
76 return (-1);
78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
79 if (errno == ENOMEM) {
80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
81 zcmd_free_nvlists(&zc);
82 return (-1);
84 } else {
85 zcmd_free_nvlists(&zc);
86 return (-1);
90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
91 zcmd_free_nvlists(&zc);
92 return (-1);
95 zcmd_free_nvlists(&zc);
97 return (0);
100 static int
101 zpool_props_refresh(zpool_handle_t *zhp)
103 nvlist_t *old_props;
105 old_props = zhp->zpool_props;
107 if (zpool_get_all_props(zhp) != 0)
108 return (-1);
110 nvlist_free(old_props);
111 return (0);
114 static char *
115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
116 zprop_source_t *src)
118 nvlist_t *nv, *nvl;
119 uint64_t ival;
120 char *value;
121 zprop_source_t source;
123 nvl = zhp->zpool_props;
124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
126 source = ival;
127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
128 } else {
129 source = ZPROP_SRC_DEFAULT;
130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
131 value = "-";
134 if (src)
135 *src = source;
137 return (value);
140 uint64_t
141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
143 nvlist_t *nv, *nvl;
144 uint64_t value;
145 zprop_source_t source;
147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 * zpool_get_all_props() has most likely failed because
150 * the pool is faulted, but if all we need is the top level
151 * vdev's guid then get it from the zhp config nvlist.
153 if ((prop == ZPOOL_PROP_GUID) &&
154 (nvlist_lookup_nvlist(zhp->zpool_config,
155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
157 == 0)) {
158 return (value);
160 return (zpool_prop_default_numeric(prop));
163 nvl = zhp->zpool_props;
164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
166 source = value;
167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
168 } else {
169 source = ZPROP_SRC_DEFAULT;
170 value = zpool_prop_default_numeric(prop);
173 if (src)
174 *src = source;
176 return (value);
180 * Map VDEV STATE to printed strings.
182 char *
183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 switch (state) {
186 case VDEV_STATE_CLOSED:
187 case VDEV_STATE_OFFLINE:
188 return (gettext("OFFLINE"));
189 case VDEV_STATE_REMOVED:
190 return (gettext("REMOVED"));
191 case VDEV_STATE_CANT_OPEN:
192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
193 return (gettext("FAULTED"));
194 else if (aux == VDEV_AUX_SPLIT_POOL)
195 return (gettext("SPLIT"));
196 else
197 return (gettext("UNAVAIL"));
198 case VDEV_STATE_FAULTED:
199 return (gettext("FAULTED"));
200 case VDEV_STATE_DEGRADED:
201 return (gettext("DEGRADED"));
202 case VDEV_STATE_HEALTHY:
203 return (gettext("ONLINE"));
206 return (gettext("UNKNOWN"));
210 * Get a zpool property value for 'prop' and return the value in
211 * a pre-allocated buffer.
214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
215 zprop_source_t *srctype, boolean_t literal)
217 uint64_t intval;
218 const char *strval;
219 zprop_source_t src = ZPROP_SRC_NONE;
220 nvlist_t *nvroot;
221 vdev_stat_t *vs;
222 uint_t vsc;
224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
225 switch (prop) {
226 case ZPOOL_PROP_NAME:
227 (void) strlcpy(buf, zpool_get_name(zhp), len);
228 break;
230 case ZPOOL_PROP_HEALTH:
231 (void) strlcpy(buf, "FAULTED", len);
232 break;
234 case ZPOOL_PROP_GUID:
235 intval = zpool_get_prop_int(zhp, prop, &src);
236 (void) snprintf(buf, len, "%llu", intval);
237 break;
239 case ZPOOL_PROP_ALTROOT:
240 case ZPOOL_PROP_CACHEFILE:
241 case ZPOOL_PROP_COMMENT:
242 if (zhp->zpool_props != NULL ||
243 zpool_get_all_props(zhp) == 0) {
244 (void) strlcpy(buf,
245 zpool_get_prop_string(zhp, prop, &src),
246 len);
247 break;
249 /* FALLTHROUGH */
250 default:
251 (void) strlcpy(buf, "-", len);
252 break;
255 if (srctype != NULL)
256 *srctype = src;
257 return (0);
260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
261 prop != ZPOOL_PROP_NAME)
262 return (-1);
264 switch (zpool_prop_get_type(prop)) {
265 case PROP_TYPE_STRING:
266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
267 len);
268 break;
270 case PROP_TYPE_NUMBER:
271 intval = zpool_get_prop_int(zhp, prop, &src);
273 switch (prop) {
274 case ZPOOL_PROP_SIZE:
275 case ZPOOL_PROP_ALLOCATED:
276 case ZPOOL_PROP_FREE:
277 case ZPOOL_PROP_FREEING:
278 case ZPOOL_PROP_LEAKED:
279 if (literal) {
280 (void) snprintf(buf, len, "%llu",
281 (u_longlong_t)intval);
282 } else {
283 (void) zfs_nicenum(intval, buf, len);
285 break;
286 case ZPOOL_PROP_EXPANDSZ:
287 if (intval == 0) {
288 (void) strlcpy(buf, "-", len);
289 } else if (literal) {
290 (void) snprintf(buf, len, "%llu",
291 (u_longlong_t)intval);
292 } else {
293 (void) zfs_nicenum(intval, buf, len);
295 break;
296 case ZPOOL_PROP_CAPACITY:
297 if (literal) {
298 (void) snprintf(buf, len, "%llu",
299 (u_longlong_t)intval);
300 } else {
301 (void) snprintf(buf, len, "%llu%%",
302 (u_longlong_t)intval);
304 break;
305 case ZPOOL_PROP_FRAGMENTATION:
306 if (intval == UINT64_MAX) {
307 (void) strlcpy(buf, "-", len);
308 } else {
309 (void) snprintf(buf, len, "%llu%%",
310 (u_longlong_t)intval);
312 break;
313 case ZPOOL_PROP_DEDUPRATIO:
314 (void) snprintf(buf, len, "%llu.%02llux",
315 (u_longlong_t)(intval / 100),
316 (u_longlong_t)(intval % 100));
317 break;
318 case ZPOOL_PROP_HEALTH:
319 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
320 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
321 verify(nvlist_lookup_uint64_array(nvroot,
322 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
323 == 0);
325 (void) strlcpy(buf, zpool_state_to_name(intval,
326 vs->vs_aux), len);
327 break;
328 case ZPOOL_PROP_VERSION:
329 if (intval >= SPA_VERSION_FEATURES) {
330 (void) snprintf(buf, len, "-");
331 break;
333 /* FALLTHROUGH */
334 default:
335 (void) snprintf(buf, len, "%llu", intval);
337 break;
339 case PROP_TYPE_INDEX:
340 intval = zpool_get_prop_int(zhp, prop, &src);
341 if (zpool_prop_index_to_string(prop, intval, &strval)
342 != 0)
343 return (-1);
344 (void) strlcpy(buf, strval, len);
345 break;
347 default:
348 abort();
351 if (srctype)
352 *srctype = src;
354 return (0);
358 * Check if the bootfs name has the same pool name as it is set to.
359 * Assuming bootfs is a valid dataset name.
361 static boolean_t
362 bootfs_name_valid(const char *pool, char *bootfs)
364 int len = strlen(pool);
366 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
367 return (B_FALSE);
369 if (strncmp(pool, bootfs, len) == 0 &&
370 (bootfs[len] == '/' || bootfs[len] == '\0'))
371 return (B_TRUE);
373 return (B_FALSE);
376 boolean_t
377 zpool_is_bootable(zpool_handle_t *zhp)
379 char bootfs[ZPOOL_MAXNAMELEN];
381 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
382 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
383 sizeof (bootfs)) != 0);
388 * Given an nvlist of zpool properties to be set, validate that they are
389 * correct, and parse any numeric properties (index, boolean, etc) if they are
390 * specified as strings.
392 static nvlist_t *
393 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
394 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
396 nvpair_t *elem;
397 nvlist_t *retprops;
398 zpool_prop_t prop;
399 char *strval;
400 uint64_t intval;
401 char *slash, *check;
402 struct stat64 statbuf;
403 zpool_handle_t *zhp;
405 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
406 (void) no_memory(hdl);
407 return (NULL);
410 elem = NULL;
411 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
412 const char *propname = nvpair_name(elem);
414 prop = zpool_name_to_prop(propname);
415 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
416 int err;
417 char *fname = strchr(propname, '@') + 1;
419 err = zfeature_lookup_name(fname, NULL);
420 if (err != 0) {
421 ASSERT3U(err, ==, ENOENT);
422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423 "invalid feature '%s'"), fname);
424 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
425 goto error;
428 if (nvpair_type(elem) != DATA_TYPE_STRING) {
429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430 "'%s' must be a string"), propname);
431 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
432 goto error;
435 (void) nvpair_value_string(elem, &strval);
436 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
438 "property '%s' can only be set to "
439 "'enabled'"), propname);
440 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441 goto error;
444 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
445 (void) no_memory(hdl);
446 goto error;
448 continue;
452 * Make sure this property is valid and applies to this type.
454 if (prop == ZPROP_INVAL) {
455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
456 "invalid property '%s'"), propname);
457 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
458 goto error;
461 if (zpool_prop_readonly(prop)) {
462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
463 "is readonly"), propname);
464 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
465 goto error;
468 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
469 &strval, &intval, errbuf) != 0)
470 goto error;
473 * Perform additional checking for specific properties.
475 switch (prop) {
476 case ZPOOL_PROP_VERSION:
477 if (intval < version ||
478 !SPA_VERSION_IS_SUPPORTED(intval)) {
479 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
480 "property '%s' number %d is invalid."),
481 propname, intval);
482 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
483 goto error;
485 break;
487 case ZPOOL_PROP_BOOTFS:
488 if (flags.create || flags.import) {
489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 "property '%s' cannot be set at creation "
491 "or import time"), propname);
492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
493 goto error;
496 if (version < SPA_VERSION_BOOTFS) {
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
498 "pool must be upgraded to support "
499 "'%s' property"), propname);
500 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
501 goto error;
505 * bootfs property value has to be a dataset name and
506 * the dataset has to be in the same pool as it sets to.
508 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
509 strval)) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
511 "is an invalid name"), strval);
512 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
513 goto error;
516 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
518 "could not open pool '%s'"), poolname);
519 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
520 goto error;
522 zpool_close(zhp);
523 break;
525 case ZPOOL_PROP_ALTROOT:
526 if (!flags.create && !flags.import) {
527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528 "property '%s' can only be set during pool "
529 "creation or import"), propname);
530 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
531 goto error;
534 if (strval[0] != '/') {
535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
536 "bad alternate root '%s'"), strval);
537 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
538 goto error;
540 break;
542 case ZPOOL_PROP_CACHEFILE:
543 if (strval[0] == '\0')
544 break;
546 if (strcmp(strval, "none") == 0)
547 break;
549 if (strval[0] != '/') {
550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
551 "property '%s' must be empty, an "
552 "absolute path, or 'none'"), propname);
553 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
554 goto error;
557 slash = strrchr(strval, '/');
559 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
560 strcmp(slash, "/..") == 0) {
561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
562 "'%s' is not a valid file"), strval);
563 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
564 goto error;
567 *slash = '\0';
569 if (strval[0] != '\0' &&
570 (stat64(strval, &statbuf) != 0 ||
571 !S_ISDIR(statbuf.st_mode))) {
572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 "'%s' is not a valid directory"),
574 strval);
575 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
576 goto error;
579 *slash = '/';
580 break;
582 case ZPOOL_PROP_COMMENT:
583 for (check = strval; *check != '\0'; check++) {
584 if (!isprint(*check)) {
585 zfs_error_aux(hdl,
586 dgettext(TEXT_DOMAIN,
587 "comment may only have printable "
588 "characters"));
589 (void) zfs_error(hdl, EZFS_BADPROP,
590 errbuf);
591 goto error;
594 if (strlen(strval) > ZPROP_MAX_COMMENT) {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 "comment must not exceed %d characters"),
597 ZPROP_MAX_COMMENT);
598 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
599 goto error;
601 break;
602 case ZPOOL_PROP_READONLY:
603 if (!flags.import) {
604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
605 "property '%s' can only be set at "
606 "import time"), propname);
607 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
608 goto error;
610 break;
614 return (retprops);
615 error:
616 nvlist_free(retprops);
617 return (NULL);
621 * Set zpool property : propname=propval.
624 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
626 zfs_cmd_t zc = { 0 };
627 int ret = -1;
628 char errbuf[1024];
629 nvlist_t *nvl = NULL;
630 nvlist_t *realprops;
631 uint64_t version;
632 prop_flags_t flags = { 0 };
634 (void) snprintf(errbuf, sizeof (errbuf),
635 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
636 zhp->zpool_name);
638 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
639 return (no_memory(zhp->zpool_hdl));
641 if (nvlist_add_string(nvl, propname, propval) != 0) {
642 nvlist_free(nvl);
643 return (no_memory(zhp->zpool_hdl));
646 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
647 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
648 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
649 nvlist_free(nvl);
650 return (-1);
653 nvlist_free(nvl);
654 nvl = realprops;
657 * Execute the corresponding ioctl() to set this property.
659 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
661 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
662 nvlist_free(nvl);
663 return (-1);
666 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
668 zcmd_free_nvlists(&zc);
669 nvlist_free(nvl);
671 if (ret)
672 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
673 else
674 (void) zpool_props_refresh(zhp);
676 return (ret);
680 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
682 libzfs_handle_t *hdl = zhp->zpool_hdl;
683 zprop_list_t *entry;
684 char buf[ZFS_MAXPROPLEN];
685 nvlist_t *features = NULL;
686 zprop_list_t **last;
687 boolean_t firstexpand = (NULL == *plp);
689 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
690 return (-1);
692 last = plp;
693 while (*last != NULL)
694 last = &(*last)->pl_next;
696 if ((*plp)->pl_all)
697 features = zpool_get_features(zhp);
699 if ((*plp)->pl_all && firstexpand) {
700 for (int i = 0; i < SPA_FEATURES; i++) {
701 zprop_list_t *entry = zfs_alloc(hdl,
702 sizeof (zprop_list_t));
703 entry->pl_prop = ZPROP_INVAL;
704 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
705 spa_feature_table[i].fi_uname);
706 entry->pl_width = strlen(entry->pl_user_prop);
707 entry->pl_all = B_TRUE;
709 *last = entry;
710 last = &entry->pl_next;
714 /* add any unsupported features */
715 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
716 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
717 char *propname;
718 boolean_t found;
719 zprop_list_t *entry;
721 if (zfeature_is_supported(nvpair_name(nvp)))
722 continue;
724 propname = zfs_asprintf(hdl, "unsupported@%s",
725 nvpair_name(nvp));
728 * Before adding the property to the list make sure that no
729 * other pool already added the same property.
731 found = B_FALSE;
732 entry = *plp;
733 while (entry != NULL) {
734 if (entry->pl_user_prop != NULL &&
735 strcmp(propname, entry->pl_user_prop) == 0) {
736 found = B_TRUE;
737 break;
739 entry = entry->pl_next;
741 if (found) {
742 free(propname);
743 continue;
746 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
747 entry->pl_prop = ZPROP_INVAL;
748 entry->pl_user_prop = propname;
749 entry->pl_width = strlen(entry->pl_user_prop);
750 entry->pl_all = B_TRUE;
752 *last = entry;
753 last = &entry->pl_next;
756 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
758 if (entry->pl_fixed)
759 continue;
761 if (entry->pl_prop != ZPROP_INVAL &&
762 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
763 NULL, B_FALSE) == 0) {
764 if (strlen(buf) > entry->pl_width)
765 entry->pl_width = strlen(buf);
769 return (0);
773 * Get the state for the given feature on the given ZFS pool.
776 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
777 size_t len)
779 uint64_t refcount;
780 boolean_t found = B_FALSE;
781 nvlist_t *features = zpool_get_features(zhp);
782 boolean_t supported;
783 const char *feature = strchr(propname, '@') + 1;
785 supported = zpool_prop_feature(propname);
786 ASSERT(supported || zfs_prop_unsupported(propname));
789 * Convert from feature name to feature guid. This conversion is
790 * unecessary for unsupported@... properties because they already
791 * use guids.
793 if (supported) {
794 int ret;
795 spa_feature_t fid;
797 ret = zfeature_lookup_name(feature, &fid);
798 if (ret != 0) {
799 (void) strlcpy(buf, "-", len);
800 return (ENOTSUP);
802 feature = spa_feature_table[fid].fi_guid;
805 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
806 found = B_TRUE;
808 if (supported) {
809 if (!found) {
810 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
811 } else {
812 if (refcount == 0)
813 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
814 else
815 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
817 } else {
818 if (found) {
819 if (refcount == 0) {
820 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
821 } else {
822 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
824 } else {
825 (void) strlcpy(buf, "-", len);
826 return (ENOTSUP);
830 return (0);
834 * Don't start the slice at the default block of 34; many storage
835 * devices will use a stripe width of 128k, so start there instead.
837 #define NEW_START_BLOCK 256
840 * Validate the given pool name, optionally putting an extended error message in
841 * 'buf'.
843 boolean_t
844 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
846 namecheck_err_t why;
847 char what;
848 int ret;
850 ret = pool_namecheck(pool, &why, &what);
853 * The rules for reserved pool names were extended at a later point.
854 * But we need to support users with existing pools that may now be
855 * invalid. So we only check for this expanded set of names during a
856 * create (or import), and only in userland.
858 if (ret == 0 && !isopen &&
859 (strncmp(pool, "mirror", 6) == 0 ||
860 strncmp(pool, "raidz", 5) == 0 ||
861 strncmp(pool, "spare", 5) == 0 ||
862 strcmp(pool, "log") == 0)) {
863 if (hdl != NULL)
864 zfs_error_aux(hdl,
865 dgettext(TEXT_DOMAIN, "name is reserved"));
866 return (B_FALSE);
870 if (ret != 0) {
871 if (hdl != NULL) {
872 switch (why) {
873 case NAME_ERR_TOOLONG:
874 zfs_error_aux(hdl,
875 dgettext(TEXT_DOMAIN, "name is too long"));
876 break;
878 case NAME_ERR_INVALCHAR:
879 zfs_error_aux(hdl,
880 dgettext(TEXT_DOMAIN, "invalid character "
881 "'%c' in pool name"), what);
882 break;
884 case NAME_ERR_NOLETTER:
885 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
886 "name must begin with a letter"));
887 break;
889 case NAME_ERR_RESERVED:
890 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
891 "name is reserved"));
892 break;
894 case NAME_ERR_DISKLIKE:
895 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
896 "pool name is reserved"));
897 break;
899 case NAME_ERR_LEADING_SLASH:
900 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
901 "leading slash in name"));
902 break;
904 case NAME_ERR_EMPTY_COMPONENT:
905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
906 "empty component in name"));
907 break;
909 case NAME_ERR_TRAILING_SLASH:
910 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
911 "trailing slash in name"));
912 break;
914 case NAME_ERR_MULTIPLE_AT:
915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
916 "multiple '@' delimiters in name"));
917 break;
921 return (B_FALSE);
924 return (B_TRUE);
928 * Open a handle to the given pool, even if the pool is currently in the FAULTED
929 * state.
931 zpool_handle_t *
932 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
934 zpool_handle_t *zhp;
935 boolean_t missing;
938 * Make sure the pool name is valid.
940 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
941 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
942 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
943 pool);
944 return (NULL);
947 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
948 return (NULL);
950 zhp->zpool_hdl = hdl;
951 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
953 if (zpool_refresh_stats(zhp, &missing) != 0) {
954 zpool_close(zhp);
955 return (NULL);
958 if (missing) {
959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
960 (void) zfs_error_fmt(hdl, EZFS_NOENT,
961 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
962 zpool_close(zhp);
963 return (NULL);
966 return (zhp);
970 * Like the above, but silent on error. Used when iterating over pools (because
971 * the configuration cache may be out of date).
974 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
976 zpool_handle_t *zhp;
977 boolean_t missing;
979 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
980 return (-1);
982 zhp->zpool_hdl = hdl;
983 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
985 if (zpool_refresh_stats(zhp, &missing) != 0) {
986 zpool_close(zhp);
987 return (-1);
990 if (missing) {
991 zpool_close(zhp);
992 *ret = NULL;
993 return (0);
996 *ret = zhp;
997 return (0);
1001 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1002 * state.
1004 zpool_handle_t *
1005 zpool_open(libzfs_handle_t *hdl, const char *pool)
1007 zpool_handle_t *zhp;
1009 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1010 return (NULL);
1012 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1013 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1014 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1015 zpool_close(zhp);
1016 return (NULL);
1019 return (zhp);
1023 * Close the handle. Simply frees the memory associated with the handle.
1025 void
1026 zpool_close(zpool_handle_t *zhp)
1028 if (zhp->zpool_config)
1029 nvlist_free(zhp->zpool_config);
1030 if (zhp->zpool_old_config)
1031 nvlist_free(zhp->zpool_old_config);
1032 if (zhp->zpool_props)
1033 nvlist_free(zhp->zpool_props);
1034 free(zhp);
1038 * Return the name of the pool.
1040 const char *
1041 zpool_get_name(zpool_handle_t *zhp)
1043 return (zhp->zpool_name);
1048 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1051 zpool_get_state(zpool_handle_t *zhp)
1053 return (zhp->zpool_state);
1057 * Create the named pool, using the provided vdev list. It is assumed
1058 * that the consumer has already validated the contents of the nvlist, so we
1059 * don't have to worry about error semantics.
1062 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1063 nvlist_t *props, nvlist_t *fsprops)
1065 zfs_cmd_t zc = { 0 };
1066 nvlist_t *zc_fsprops = NULL;
1067 nvlist_t *zc_props = NULL;
1068 char msg[1024];
1069 int ret = -1;
1071 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1072 "cannot create '%s'"), pool);
1074 if (!zpool_name_valid(hdl, B_FALSE, pool))
1075 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1077 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1078 return (-1);
1080 if (props) {
1081 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1083 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1084 SPA_VERSION_1, flags, msg)) == NULL) {
1085 goto create_failed;
1089 if (fsprops) {
1090 uint64_t zoned;
1091 char *zonestr;
1093 zoned = ((nvlist_lookup_string(fsprops,
1094 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1095 strcmp(zonestr, "on") == 0);
1097 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1098 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1099 goto create_failed;
1101 if (!zc_props &&
1102 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1103 goto create_failed;
1105 if (nvlist_add_nvlist(zc_props,
1106 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1107 goto create_failed;
1111 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1112 goto create_failed;
1114 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1116 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1118 zcmd_free_nvlists(&zc);
1119 nvlist_free(zc_props);
1120 nvlist_free(zc_fsprops);
1122 switch (errno) {
1123 case EBUSY:
1125 * This can happen if the user has specified the same
1126 * device multiple times. We can't reliably detect this
1127 * until we try to add it and see we already have a
1128 * label.
1130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 "one or more vdevs refer to the same device"));
1132 return (zfs_error(hdl, EZFS_BADDEV, msg));
1134 case ERANGE:
1136 * This happens if the record size is smaller or larger
1137 * than the allowed size range, or not a power of 2.
1139 * NOTE: although zfs_valid_proplist is called earlier,
1140 * this case may have slipped through since the
1141 * pool does not exist yet and it is therefore
1142 * impossible to read properties e.g. max blocksize
1143 * from the pool.
1145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1146 "record size invalid"));
1147 return (zfs_error(hdl, EZFS_BADPROP, msg));
1149 case EOVERFLOW:
1151 * This occurs when one of the devices is below
1152 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1153 * device was the problem device since there's no
1154 * reliable way to determine device size from userland.
1157 char buf[64];
1159 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1162 "one or more devices is less than the "
1163 "minimum size (%s)"), buf);
1165 return (zfs_error(hdl, EZFS_BADDEV, msg));
1167 case ENOSPC:
1168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 "one or more devices is out of space"));
1170 return (zfs_error(hdl, EZFS_BADDEV, msg));
1172 case ENOTBLK:
1173 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1174 "cache device must be a disk or disk slice"));
1175 return (zfs_error(hdl, EZFS_BADDEV, msg));
1177 default:
1178 return (zpool_standard_error(hdl, errno, msg));
1182 create_failed:
1183 zcmd_free_nvlists(&zc);
1184 nvlist_free(zc_props);
1185 nvlist_free(zc_fsprops);
1186 return (ret);
1190 * Destroy the given pool. It is up to the caller to ensure that there are no
1191 * datasets left in the pool.
1194 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1196 zfs_cmd_t zc = { 0 };
1197 zfs_handle_t *zfp = NULL;
1198 libzfs_handle_t *hdl = zhp->zpool_hdl;
1199 char msg[1024];
1201 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1202 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1203 return (-1);
1205 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1206 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1208 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1209 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1210 "cannot destroy '%s'"), zhp->zpool_name);
1212 if (errno == EROFS) {
1213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1214 "one or more devices is read only"));
1215 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1216 } else {
1217 (void) zpool_standard_error(hdl, errno, msg);
1220 if (zfp)
1221 zfs_close(zfp);
1222 return (-1);
1225 if (zfp) {
1226 remove_mountpoint(zfp);
1227 zfs_close(zfp);
1230 return (0);
1234 * Add the given vdevs to the pool. The caller must have already performed the
1235 * necessary verification to ensure that the vdev specification is well-formed.
1238 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1240 zfs_cmd_t zc = { 0 };
1241 int ret;
1242 libzfs_handle_t *hdl = zhp->zpool_hdl;
1243 char msg[1024];
1244 nvlist_t **spares, **l2cache;
1245 uint_t nspares, nl2cache;
1247 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1248 "cannot add to '%s'"), zhp->zpool_name);
1250 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1251 SPA_VERSION_SPARES &&
1252 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1253 &spares, &nspares) == 0) {
1254 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1255 "upgraded to add hot spares"));
1256 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1259 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1260 SPA_VERSION_L2CACHE &&
1261 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1262 &l2cache, &nl2cache) == 0) {
1263 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1264 "upgraded to add cache devices"));
1265 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1268 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1269 return (-1);
1270 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1272 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1273 switch (errno) {
1274 case EBUSY:
1276 * This can happen if the user has specified the same
1277 * device multiple times. We can't reliably detect this
1278 * until we try to add it and see we already have a
1279 * label.
1281 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1282 "one or more vdevs refer to the same device"));
1283 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1284 break;
1286 case EOVERFLOW:
1288 * This occurrs when one of the devices is below
1289 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1290 * device was the problem device since there's no
1291 * reliable way to determine device size from userland.
1294 char buf[64];
1296 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1298 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1299 "device is less than the minimum "
1300 "size (%s)"), buf);
1302 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1303 break;
1305 case ENOTSUP:
1306 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1307 "pool must be upgraded to add these vdevs"));
1308 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1309 break;
1311 case EDOM:
1312 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1313 "root pool can not have multiple vdevs"
1314 " or separate logs"));
1315 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1316 break;
1318 case ENOTBLK:
1319 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1320 "cache device must be a disk or disk slice"));
1321 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1322 break;
1324 default:
1325 (void) zpool_standard_error(hdl, errno, msg);
1328 ret = -1;
1329 } else {
1330 ret = 0;
1333 zcmd_free_nvlists(&zc);
1335 return (ret);
1339 * Exports the pool from the system. The caller must ensure that there are no
1340 * mounted datasets in the pool.
1342 static int
1343 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1344 const char *log_str)
1346 zfs_cmd_t zc = { 0 };
1347 char msg[1024];
1349 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1350 "cannot export '%s'"), zhp->zpool_name);
1352 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1353 zc.zc_cookie = force;
1354 zc.zc_guid = hardforce;
1355 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1357 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1358 switch (errno) {
1359 case EXDEV:
1360 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1361 "use '-f' to override the following errors:\n"
1362 "'%s' has an active shared spare which could be"
1363 " used by other pools once '%s' is exported."),
1364 zhp->zpool_name, zhp->zpool_name);
1365 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1366 msg));
1367 default:
1368 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1369 msg));
1373 return (0);
1377 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1379 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1383 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1385 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1388 static void
1389 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1390 nvlist_t *config)
1392 nvlist_t *nv = NULL;
1393 uint64_t rewindto;
1394 int64_t loss = -1;
1395 struct tm t;
1396 char timestr[128];
1398 if (!hdl->libzfs_printerr || config == NULL)
1399 return;
1401 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1402 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1403 return;
1406 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1407 return;
1408 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1410 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1411 strftime(timestr, 128, 0, &t) != 0) {
1412 if (dryrun) {
1413 (void) printf(dgettext(TEXT_DOMAIN,
1414 "Would be able to return %s "
1415 "to its state as of %s.\n"),
1416 name, timestr);
1417 } else {
1418 (void) printf(dgettext(TEXT_DOMAIN,
1419 "Pool %s returned to its state as of %s.\n"),
1420 name, timestr);
1422 if (loss > 120) {
1423 (void) printf(dgettext(TEXT_DOMAIN,
1424 "%s approximately %lld "),
1425 dryrun ? "Would discard" : "Discarded",
1426 (loss + 30) / 60);
1427 (void) printf(dgettext(TEXT_DOMAIN,
1428 "minutes of transactions.\n"));
1429 } else if (loss > 0) {
1430 (void) printf(dgettext(TEXT_DOMAIN,
1431 "%s approximately %lld "),
1432 dryrun ? "Would discard" : "Discarded", loss);
1433 (void) printf(dgettext(TEXT_DOMAIN,
1434 "seconds of transactions.\n"));
1439 void
1440 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1441 nvlist_t *config)
1443 nvlist_t *nv = NULL;
1444 int64_t loss = -1;
1445 uint64_t edata = UINT64_MAX;
1446 uint64_t rewindto;
1447 struct tm t;
1448 char timestr[128];
1450 if (!hdl->libzfs_printerr)
1451 return;
1453 if (reason >= 0)
1454 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1455 else
1456 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1458 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1459 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1460 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1461 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1462 goto no_info;
1464 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1465 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1466 &edata);
1468 (void) printf(dgettext(TEXT_DOMAIN,
1469 "Recovery is possible, but will result in some data loss.\n"));
1471 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1472 strftime(timestr, 128, 0, &t) != 0) {
1473 (void) printf(dgettext(TEXT_DOMAIN,
1474 "\tReturning the pool to its state as of %s\n"
1475 "\tshould correct the problem. "),
1476 timestr);
1477 } else {
1478 (void) printf(dgettext(TEXT_DOMAIN,
1479 "\tReverting the pool to an earlier state "
1480 "should correct the problem.\n\t"));
1483 if (loss > 120) {
1484 (void) printf(dgettext(TEXT_DOMAIN,
1485 "Approximately %lld minutes of data\n"
1486 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1487 } else if (loss > 0) {
1488 (void) printf(dgettext(TEXT_DOMAIN,
1489 "Approximately %lld seconds of data\n"
1490 "\tmust be discarded, irreversibly. "), loss);
1492 if (edata != 0 && edata != UINT64_MAX) {
1493 if (edata == 1) {
1494 (void) printf(dgettext(TEXT_DOMAIN,
1495 "After rewind, at least\n"
1496 "\tone persistent user-data error will remain. "));
1497 } else {
1498 (void) printf(dgettext(TEXT_DOMAIN,
1499 "After rewind, several\n"
1500 "\tpersistent user-data errors will remain. "));
1503 (void) printf(dgettext(TEXT_DOMAIN,
1504 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1505 reason >= 0 ? "clear" : "import", name);
1507 (void) printf(dgettext(TEXT_DOMAIN,
1508 "A scrub of the pool\n"
1509 "\tis strongly recommended after recovery.\n"));
1510 return;
1512 no_info:
1513 (void) printf(dgettext(TEXT_DOMAIN,
1514 "Destroy and re-create the pool from\n\ta backup source.\n"));
1518 * zpool_import() is a contracted interface. Should be kept the same
1519 * if possible.
1521 * Applications should use zpool_import_props() to import a pool with
1522 * new properties value to be set.
1525 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1526 char *altroot)
1528 nvlist_t *props = NULL;
1529 int ret;
1531 if (altroot != NULL) {
1532 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1533 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1534 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1535 newname));
1538 if (nvlist_add_string(props,
1539 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1540 nvlist_add_string(props,
1541 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1542 nvlist_free(props);
1543 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1544 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1545 newname));
1549 ret = zpool_import_props(hdl, config, newname, props,
1550 ZFS_IMPORT_NORMAL);
1551 if (props)
1552 nvlist_free(props);
1553 return (ret);
1556 static void
1557 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1558 int indent)
1560 nvlist_t **child;
1561 uint_t c, children;
1562 char *vname;
1563 uint64_t is_log = 0;
1565 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1566 &is_log);
1568 if (name != NULL)
1569 (void) printf("\t%*s%s%s\n", indent, "", name,
1570 is_log ? " [log]" : "");
1572 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1573 &child, &children) != 0)
1574 return;
1576 for (c = 0; c < children; c++) {
1577 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1578 print_vdev_tree(hdl, vname, child[c], indent + 2);
1579 free(vname);
1583 void
1584 zpool_print_unsup_feat(nvlist_t *config)
1586 nvlist_t *nvinfo, *unsup_feat;
1588 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1590 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1591 &unsup_feat) == 0);
1593 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1594 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1595 char *desc;
1597 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1598 verify(nvpair_value_string(nvp, &desc) == 0);
1600 if (strlen(desc) > 0)
1601 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1602 else
1603 (void) printf("\t%s\n", nvpair_name(nvp));
1608 * Import the given pool using the known configuration and a list of
1609 * properties to be set. The configuration should have come from
1610 * zpool_find_import(). The 'newname' parameters control whether the pool
1611 * is imported with a different name.
1614 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1615 nvlist_t *props, int flags)
1617 zfs_cmd_t zc = { 0 };
1618 zpool_rewind_policy_t policy;
1619 nvlist_t *nv = NULL;
1620 nvlist_t *nvinfo = NULL;
1621 nvlist_t *missing = NULL;
1622 char *thename;
1623 char *origname;
1624 int ret;
1625 int error = 0;
1626 char errbuf[1024];
1628 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1629 &origname) == 0);
1631 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1632 "cannot import pool '%s'"), origname);
1634 if (newname != NULL) {
1635 if (!zpool_name_valid(hdl, B_FALSE, newname))
1636 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1637 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1638 newname));
1639 thename = (char *)newname;
1640 } else {
1641 thename = origname;
1644 if (props != NULL) {
1645 uint64_t version;
1646 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1648 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1649 &version) == 0);
1651 if ((props = zpool_valid_proplist(hdl, origname,
1652 props, version, flags, errbuf)) == NULL)
1653 return (-1);
1654 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1655 nvlist_free(props);
1656 return (-1);
1658 nvlist_free(props);
1661 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1663 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1664 &zc.zc_guid) == 0);
1666 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1667 zcmd_free_nvlists(&zc);
1668 return (-1);
1670 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1671 zcmd_free_nvlists(&zc);
1672 return (-1);
1675 zc.zc_cookie = flags;
1676 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1677 errno == ENOMEM) {
1678 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1679 zcmd_free_nvlists(&zc);
1680 return (-1);
1683 if (ret != 0)
1684 error = errno;
1686 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1688 zcmd_free_nvlists(&zc);
1690 zpool_get_rewind_policy(config, &policy);
1692 if (error) {
1693 char desc[1024];
1696 * Dry-run failed, but we print out what success
1697 * looks like if we found a best txg
1699 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1700 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1701 B_TRUE, nv);
1702 nvlist_free(nv);
1703 return (-1);
1706 if (newname == NULL)
1707 (void) snprintf(desc, sizeof (desc),
1708 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1709 thename);
1710 else
1711 (void) snprintf(desc, sizeof (desc),
1712 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1713 origname, thename);
1715 switch (error) {
1716 case ENOTSUP:
1717 if (nv != NULL && nvlist_lookup_nvlist(nv,
1718 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1719 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1720 (void) printf(dgettext(TEXT_DOMAIN, "This "
1721 "pool uses the following feature(s) not "
1722 "supported by this system:\n"));
1723 zpool_print_unsup_feat(nv);
1724 if (nvlist_exists(nvinfo,
1725 ZPOOL_CONFIG_CAN_RDONLY)) {
1726 (void) printf(dgettext(TEXT_DOMAIN,
1727 "All unsupported features are only "
1728 "required for writing to the pool."
1729 "\nThe pool can be imported using "
1730 "'-o readonly=on'.\n"));
1734 * Unsupported version.
1736 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1737 break;
1739 case EINVAL:
1740 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1741 break;
1743 case EROFS:
1744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1745 "one or more devices is read only"));
1746 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1747 break;
1749 case ENXIO:
1750 if (nv && nvlist_lookup_nvlist(nv,
1751 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1752 nvlist_lookup_nvlist(nvinfo,
1753 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1754 (void) printf(dgettext(TEXT_DOMAIN,
1755 "The devices below are missing, use "
1756 "'-m' to import the pool anyway:\n"));
1757 print_vdev_tree(hdl, NULL, missing, 2);
1758 (void) printf("\n");
1760 (void) zpool_standard_error(hdl, error, desc);
1761 break;
1763 case EEXIST:
1764 (void) zpool_standard_error(hdl, error, desc);
1765 break;
1767 default:
1768 (void) zpool_standard_error(hdl, error, desc);
1769 zpool_explain_recover(hdl,
1770 newname ? origname : thename, -error, nv);
1771 break;
1774 nvlist_free(nv);
1775 ret = -1;
1776 } else {
1777 zpool_handle_t *zhp;
1780 * This should never fail, but play it safe anyway.
1782 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1783 ret = -1;
1784 else if (zhp != NULL)
1785 zpool_close(zhp);
1786 if (policy.zrp_request &
1787 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1788 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1789 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1791 nvlist_free(nv);
1792 return (0);
1795 return (ret);
1799 * Scan the pool.
1802 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1804 zfs_cmd_t zc = { 0 };
1805 char msg[1024];
1806 libzfs_handle_t *hdl = zhp->zpool_hdl;
1808 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1809 zc.zc_cookie = func;
1811 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1812 (errno == ENOENT && func != POOL_SCAN_NONE))
1813 return (0);
1815 if (func == POOL_SCAN_SCRUB) {
1816 (void) snprintf(msg, sizeof (msg),
1817 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1818 } else if (func == POOL_SCAN_NONE) {
1819 (void) snprintf(msg, sizeof (msg),
1820 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1821 zc.zc_name);
1822 } else {
1823 assert(!"unexpected result");
1826 if (errno == EBUSY) {
1827 nvlist_t *nvroot;
1828 pool_scan_stat_t *ps = NULL;
1829 uint_t psc;
1831 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1832 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1833 (void) nvlist_lookup_uint64_array(nvroot,
1834 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1835 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1836 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1837 else
1838 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1839 } else if (errno == ENOENT) {
1840 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1841 } else {
1842 return (zpool_standard_error(hdl, errno, msg));
1847 * This provides a very minimal check whether a given string is likely a
1848 * c#t#d# style string. Users of this are expected to do their own
1849 * verification of the s# part.
1851 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1854 * More elaborate version for ones which may start with "/dev/dsk/"
1855 * and the like.
1857 static int
1858 ctd_check_path(char *str)
1861 * If it starts with a slash, check the last component.
1863 if (str && str[0] == '/') {
1864 char *tmp = strrchr(str, '/');
1867 * If it ends in "/old", check the second-to-last
1868 * component of the string instead.
1870 if (tmp != str && strcmp(tmp, "/old") == 0) {
1871 for (tmp--; *tmp != '/'; tmp--)
1874 str = tmp + 1;
1876 return (CTD_CHECK(str));
1880 * Find a vdev that matches the search criteria specified. We use the
1881 * the nvpair name to determine how we should look for the device.
1882 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1883 * spare; but FALSE if its an INUSE spare.
1885 static nvlist_t *
1886 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1887 boolean_t *l2cache, boolean_t *log)
1889 uint_t c, children;
1890 nvlist_t **child;
1891 nvlist_t *ret;
1892 uint64_t is_log;
1893 char *srchkey;
1894 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1896 /* Nothing to look for */
1897 if (search == NULL || pair == NULL)
1898 return (NULL);
1900 /* Obtain the key we will use to search */
1901 srchkey = nvpair_name(pair);
1903 switch (nvpair_type(pair)) {
1904 case DATA_TYPE_UINT64:
1905 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1906 uint64_t srchval, theguid;
1908 verify(nvpair_value_uint64(pair, &srchval) == 0);
1909 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1910 &theguid) == 0);
1911 if (theguid == srchval)
1912 return (nv);
1914 break;
1916 case DATA_TYPE_STRING: {
1917 char *srchval, *val;
1919 verify(nvpair_value_string(pair, &srchval) == 0);
1920 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1921 break;
1924 * Search for the requested value. Special cases:
1926 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1927 * "s0" or "s0/old". The "s0" part is hidden from the user,
1928 * but included in the string, so this matches around it.
1929 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1931 * Otherwise, all other searches are simple string compares.
1933 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1934 ctd_check_path(val)) {
1935 uint64_t wholedisk = 0;
1937 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1938 &wholedisk);
1939 if (wholedisk) {
1940 int slen = strlen(srchval);
1941 int vlen = strlen(val);
1943 if (slen != vlen - 2)
1944 break;
1947 * make_leaf_vdev() should only set
1948 * wholedisk for ZPOOL_CONFIG_PATHs which
1949 * will include "/dev/dsk/", giving plenty of
1950 * room for the indices used next.
1952 ASSERT(vlen >= 6);
1955 * strings identical except trailing "s0"
1957 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1958 strncmp(srchval, val, slen) == 0)
1959 return (nv);
1962 * strings identical except trailing "s0/old"
1964 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1965 strcmp(&srchval[slen - 4], "/old") == 0 &&
1966 strncmp(srchval, val, slen - 4) == 0)
1967 return (nv);
1969 break;
1971 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1972 char *type, *idx, *end, *p;
1973 uint64_t id, vdev_id;
1976 * Determine our vdev type, keeping in mind
1977 * that the srchval is composed of a type and
1978 * vdev id pair (i.e. mirror-4).
1980 if ((type = strdup(srchval)) == NULL)
1981 return (NULL);
1983 if ((p = strrchr(type, '-')) == NULL) {
1984 free(type);
1985 break;
1987 idx = p + 1;
1988 *p = '\0';
1991 * If the types don't match then keep looking.
1993 if (strncmp(val, type, strlen(val)) != 0) {
1994 free(type);
1995 break;
1998 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1999 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2000 strncmp(type, VDEV_TYPE_MIRROR,
2001 strlen(VDEV_TYPE_MIRROR)) == 0);
2002 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2003 &id) == 0);
2005 errno = 0;
2006 vdev_id = strtoull(idx, &end, 10);
2008 free(type);
2009 if (errno != 0)
2010 return (NULL);
2013 * Now verify that we have the correct vdev id.
2015 if (vdev_id == id)
2016 return (nv);
2020 * Common case
2022 if (strcmp(srchval, val) == 0)
2023 return (nv);
2024 break;
2027 default:
2028 break;
2031 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2032 &child, &children) != 0)
2033 return (NULL);
2035 for (c = 0; c < children; c++) {
2036 if ((ret = vdev_to_nvlist_iter(child[c], search,
2037 avail_spare, l2cache, NULL)) != NULL) {
2039 * The 'is_log' value is only set for the toplevel
2040 * vdev, not the leaf vdevs. So we always lookup the
2041 * log device from the root of the vdev tree (where
2042 * 'log' is non-NULL).
2044 if (log != NULL &&
2045 nvlist_lookup_uint64(child[c],
2046 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2047 is_log) {
2048 *log = B_TRUE;
2050 return (ret);
2054 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2055 &child, &children) == 0) {
2056 for (c = 0; c < children; c++) {
2057 if ((ret = vdev_to_nvlist_iter(child[c], search,
2058 avail_spare, l2cache, NULL)) != NULL) {
2059 *avail_spare = B_TRUE;
2060 return (ret);
2065 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2066 &child, &children) == 0) {
2067 for (c = 0; c < children; c++) {
2068 if ((ret = vdev_to_nvlist_iter(child[c], search,
2069 avail_spare, l2cache, NULL)) != NULL) {
2070 *l2cache = B_TRUE;
2071 return (ret);
2076 return (NULL);
2080 * Given a physical path (minus the "/devices" prefix), find the
2081 * associated vdev.
2083 nvlist_t *
2084 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2085 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2087 nvlist_t *search, *nvroot, *ret;
2089 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2090 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2092 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2093 &nvroot) == 0);
2095 *avail_spare = B_FALSE;
2096 *l2cache = B_FALSE;
2097 if (log != NULL)
2098 *log = B_FALSE;
2099 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2100 nvlist_free(search);
2102 return (ret);
2106 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2108 boolean_t
2109 zpool_vdev_is_interior(const char *name)
2111 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2112 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2113 return (B_TRUE);
2114 return (B_FALSE);
2117 nvlist_t *
2118 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2119 boolean_t *l2cache, boolean_t *log)
2121 char buf[MAXPATHLEN];
2122 char *end;
2123 nvlist_t *nvroot, *search, *ret;
2124 uint64_t guid;
2126 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2128 guid = strtoull(path, &end, 10);
2129 if (guid != 0 && *end == '\0') {
2130 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2131 } else if (zpool_vdev_is_interior(path)) {
2132 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2133 } else if (path[0] != '/') {
2134 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
2135 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2136 } else {
2137 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2140 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2141 &nvroot) == 0);
2143 *avail_spare = B_FALSE;
2144 *l2cache = B_FALSE;
2145 if (log != NULL)
2146 *log = B_FALSE;
2147 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2148 nvlist_free(search);
2150 return (ret);
2153 static int
2154 vdev_online(nvlist_t *nv)
2156 uint64_t ival;
2158 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2159 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2160 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2161 return (0);
2163 return (1);
2167 * Helper function for zpool_get_physpaths().
2169 static int
2170 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2171 size_t *bytes_written)
2173 size_t bytes_left, pos, rsz;
2174 char *tmppath;
2175 const char *format;
2177 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2178 &tmppath) != 0)
2179 return (EZFS_NODEVICE);
2181 pos = *bytes_written;
2182 bytes_left = physpath_size - pos;
2183 format = (pos == 0) ? "%s" : " %s";
2185 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2186 *bytes_written += rsz;
2188 if (rsz >= bytes_left) {
2189 /* if physpath was not copied properly, clear it */
2190 if (bytes_left != 0) {
2191 physpath[pos] = 0;
2193 return (EZFS_NOSPC);
2195 return (0);
2198 static int
2199 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2200 size_t *rsz, boolean_t is_spare)
2202 char *type;
2203 int ret;
2205 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2206 return (EZFS_INVALCONFIG);
2208 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2210 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2211 * For a spare vdev, we only want to boot from the active
2212 * spare device.
2214 if (is_spare) {
2215 uint64_t spare = 0;
2216 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2217 &spare);
2218 if (!spare)
2219 return (EZFS_INVALCONFIG);
2222 if (vdev_online(nv)) {
2223 if ((ret = vdev_get_one_physpath(nv, physpath,
2224 phypath_size, rsz)) != 0)
2225 return (ret);
2227 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2228 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2229 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2230 nvlist_t **child;
2231 uint_t count;
2232 int i, ret;
2234 if (nvlist_lookup_nvlist_array(nv,
2235 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2236 return (EZFS_INVALCONFIG);
2238 for (i = 0; i < count; i++) {
2239 ret = vdev_get_physpaths(child[i], physpath,
2240 phypath_size, rsz, is_spare);
2241 if (ret == EZFS_NOSPC)
2242 return (ret);
2246 return (EZFS_POOL_INVALARG);
2250 * Get phys_path for a root pool config.
2251 * Return 0 on success; non-zero on failure.
2253 static int
2254 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2256 size_t rsz;
2257 nvlist_t *vdev_root;
2258 nvlist_t **child;
2259 uint_t count;
2260 char *type;
2262 rsz = 0;
2264 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2265 &vdev_root) != 0)
2266 return (EZFS_INVALCONFIG);
2268 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2269 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2270 &child, &count) != 0)
2271 return (EZFS_INVALCONFIG);
2274 * root pool can only have a single top-level vdev.
2276 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2277 return (EZFS_POOL_INVALARG);
2279 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2280 B_FALSE);
2282 /* No online devices */
2283 if (rsz == 0)
2284 return (EZFS_NODEVICE);
2286 return (0);
2290 * Get phys_path for a root pool
2291 * Return 0 on success; non-zero on failure.
2294 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2296 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2297 phypath_size));
2301 * If the device has being dynamically expanded then we need to relabel
2302 * the disk to use the new unallocated space.
2304 static int
2305 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2307 char path[MAXPATHLEN];
2308 char errbuf[1024];
2309 int fd, error;
2310 int (*_efi_use_whole_disk)(int);
2312 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2313 "efi_use_whole_disk")) == NULL)
2314 return (-1);
2316 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2318 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2319 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2320 "relabel '%s': unable to open device"), name);
2321 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2325 * It's possible that we might encounter an error if the device
2326 * does not have any unallocated space left. If so, we simply
2327 * ignore that error and continue on.
2329 error = _efi_use_whole_disk(fd);
2330 (void) close(fd);
2331 if (error && error != VT_ENOSPC) {
2332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2333 "relabel '%s': unable to read disk capacity"), name);
2334 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2336 return (0);
2340 * Bring the specified vdev online. The 'flags' parameter is a set of the
2341 * ZFS_ONLINE_* flags.
2344 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2345 vdev_state_t *newstate)
2347 zfs_cmd_t zc = { 0 };
2348 char msg[1024];
2349 nvlist_t *tgt;
2350 boolean_t avail_spare, l2cache, islog;
2351 libzfs_handle_t *hdl = zhp->zpool_hdl;
2353 if (flags & ZFS_ONLINE_EXPAND) {
2354 (void) snprintf(msg, sizeof (msg),
2355 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2356 } else {
2357 (void) snprintf(msg, sizeof (msg),
2358 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2361 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2362 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2363 &islog)) == NULL)
2364 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2366 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2368 if (avail_spare)
2369 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2371 if (flags & ZFS_ONLINE_EXPAND ||
2372 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2373 char *pathname = NULL;
2374 uint64_t wholedisk = 0;
2376 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2377 &wholedisk);
2378 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2379 &pathname) == 0);
2382 * XXX - L2ARC 1.0 devices can't support expansion.
2384 if (l2cache) {
2385 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2386 "cannot expand cache devices"));
2387 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2390 if (wholedisk) {
2391 pathname += strlen(DISK_ROOT) + 1;
2392 (void) zpool_relabel_disk(hdl, pathname);
2396 zc.zc_cookie = VDEV_STATE_ONLINE;
2397 zc.zc_obj = flags;
2399 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2400 if (errno == EINVAL) {
2401 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2402 "from this pool into a new one. Use '%s' "
2403 "instead"), "zpool detach");
2404 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2406 return (zpool_standard_error(hdl, errno, msg));
2409 *newstate = zc.zc_cookie;
2410 return (0);
2414 * Take the specified vdev offline
2417 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2419 zfs_cmd_t zc = { 0 };
2420 char msg[1024];
2421 nvlist_t *tgt;
2422 boolean_t avail_spare, l2cache;
2423 libzfs_handle_t *hdl = zhp->zpool_hdl;
2425 (void) snprintf(msg, sizeof (msg),
2426 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2428 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2429 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2430 NULL)) == NULL)
2431 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2433 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2435 if (avail_spare)
2436 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2438 zc.zc_cookie = VDEV_STATE_OFFLINE;
2439 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2441 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2442 return (0);
2444 switch (errno) {
2445 case EBUSY:
2448 * There are no other replicas of this device.
2450 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2452 case EEXIST:
2454 * The log device has unplayed logs
2456 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2458 default:
2459 return (zpool_standard_error(hdl, errno, msg));
2464 * Mark the given vdev faulted.
2467 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2469 zfs_cmd_t zc = { 0 };
2470 char msg[1024];
2471 libzfs_handle_t *hdl = zhp->zpool_hdl;
2473 (void) snprintf(msg, sizeof (msg),
2474 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2476 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2477 zc.zc_guid = guid;
2478 zc.zc_cookie = VDEV_STATE_FAULTED;
2479 zc.zc_obj = aux;
2481 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2482 return (0);
2484 switch (errno) {
2485 case EBUSY:
2488 * There are no other replicas of this device.
2490 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2492 default:
2493 return (zpool_standard_error(hdl, errno, msg));
2499 * Mark the given vdev degraded.
2502 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2504 zfs_cmd_t zc = { 0 };
2505 char msg[1024];
2506 libzfs_handle_t *hdl = zhp->zpool_hdl;
2508 (void) snprintf(msg, sizeof (msg),
2509 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2511 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2512 zc.zc_guid = guid;
2513 zc.zc_cookie = VDEV_STATE_DEGRADED;
2514 zc.zc_obj = aux;
2516 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2517 return (0);
2519 return (zpool_standard_error(hdl, errno, msg));
2523 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2524 * a hot spare.
2526 static boolean_t
2527 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2529 nvlist_t **child;
2530 uint_t c, children;
2531 char *type;
2533 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2534 &children) == 0) {
2535 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2536 &type) == 0);
2538 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2539 children == 2 && child[which] == tgt)
2540 return (B_TRUE);
2542 for (c = 0; c < children; c++)
2543 if (is_replacing_spare(child[c], tgt, which))
2544 return (B_TRUE);
2547 return (B_FALSE);
2551 * Attach new_disk (fully described by nvroot) to old_disk.
2552 * If 'replacing' is specified, the new disk will replace the old one.
2555 zpool_vdev_attach(zpool_handle_t *zhp,
2556 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2558 zfs_cmd_t zc = { 0 };
2559 char msg[1024];
2560 int ret;
2561 nvlist_t *tgt;
2562 boolean_t avail_spare, l2cache, islog;
2563 uint64_t val;
2564 char *newname;
2565 nvlist_t **child;
2566 uint_t children;
2567 nvlist_t *config_root;
2568 libzfs_handle_t *hdl = zhp->zpool_hdl;
2569 boolean_t rootpool = zpool_is_bootable(zhp);
2571 if (replacing)
2572 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2573 "cannot replace %s with %s"), old_disk, new_disk);
2574 else
2575 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2576 "cannot attach %s to %s"), new_disk, old_disk);
2578 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2579 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2580 &islog)) == 0)
2581 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2583 if (avail_spare)
2584 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2586 if (l2cache)
2587 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2589 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2590 zc.zc_cookie = replacing;
2592 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2593 &child, &children) != 0 || children != 1) {
2594 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2595 "new device must be a single disk"));
2596 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2599 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2600 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2602 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2603 return (-1);
2606 * If the target is a hot spare that has been swapped in, we can only
2607 * replace it with another hot spare.
2609 if (replacing &&
2610 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2611 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2612 NULL) == NULL || !avail_spare) &&
2613 is_replacing_spare(config_root, tgt, 1)) {
2614 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2615 "can only be replaced by another hot spare"));
2616 free(newname);
2617 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2620 free(newname);
2622 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2623 return (-1);
2625 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2627 zcmd_free_nvlists(&zc);
2629 if (ret == 0) {
2630 if (rootpool) {
2632 * XXX need a better way to prevent user from
2633 * booting up a half-baked vdev.
2635 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2636 "sure to wait until resilver is done "
2637 "before rebooting.\n"));
2639 return (0);
2642 switch (errno) {
2643 case ENOTSUP:
2645 * Can't attach to or replace this type of vdev.
2647 if (replacing) {
2648 uint64_t version = zpool_get_prop_int(zhp,
2649 ZPOOL_PROP_VERSION, NULL);
2651 if (islog)
2652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2653 "cannot replace a log with a spare"));
2654 else if (version >= SPA_VERSION_MULTI_REPLACE)
2655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2656 "already in replacing/spare config; wait "
2657 "for completion or use 'zpool detach'"));
2658 else
2659 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2660 "cannot replace a replacing device"));
2661 } else {
2662 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2663 "can only attach to mirrors and top-level "
2664 "disks"));
2666 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2667 break;
2669 case EINVAL:
2671 * The new device must be a single disk.
2673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2674 "new device must be a single disk"));
2675 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2676 break;
2678 case EBUSY:
2679 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2680 new_disk);
2681 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2682 break;
2684 case EOVERFLOW:
2686 * The new device is too small.
2688 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2689 "device is too small"));
2690 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2691 break;
2693 case EDOM:
2695 * The new device has a different alignment requirement.
2697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2698 "devices have different sector alignment"));
2699 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2700 break;
2702 case ENAMETOOLONG:
2704 * The resulting top-level vdev spec won't fit in the label.
2706 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2707 break;
2709 default:
2710 (void) zpool_standard_error(hdl, errno, msg);
2713 return (-1);
2717 * Detach the specified device.
2720 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2722 zfs_cmd_t zc = { 0 };
2723 char msg[1024];
2724 nvlist_t *tgt;
2725 boolean_t avail_spare, l2cache;
2726 libzfs_handle_t *hdl = zhp->zpool_hdl;
2728 (void) snprintf(msg, sizeof (msg),
2729 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2732 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2733 NULL)) == 0)
2734 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2736 if (avail_spare)
2737 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2739 if (l2cache)
2740 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2742 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2744 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2745 return (0);
2747 switch (errno) {
2749 case ENOTSUP:
2751 * Can't detach from this type of vdev.
2753 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2754 "applicable to mirror and replacing vdevs"));
2755 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2756 break;
2758 case EBUSY:
2760 * There are no other replicas of this device.
2762 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2763 break;
2765 default:
2766 (void) zpool_standard_error(hdl, errno, msg);
2769 return (-1);
2773 * Find a mirror vdev in the source nvlist.
2775 * The mchild array contains a list of disks in one of the top-level mirrors
2776 * of the source pool. The schild array contains a list of disks that the
2777 * user specified on the command line. We loop over the mchild array to
2778 * see if any entry in the schild array matches.
2780 * If a disk in the mchild array is found in the schild array, we return
2781 * the index of that entry. Otherwise we return -1.
2783 static int
2784 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2785 nvlist_t **schild, uint_t schildren)
2787 uint_t mc;
2789 for (mc = 0; mc < mchildren; mc++) {
2790 uint_t sc;
2791 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2792 mchild[mc], B_FALSE);
2794 for (sc = 0; sc < schildren; sc++) {
2795 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2796 schild[sc], B_FALSE);
2797 boolean_t result = (strcmp(mpath, spath) == 0);
2799 free(spath);
2800 if (result) {
2801 free(mpath);
2802 return (mc);
2806 free(mpath);
2809 return (-1);
2813 * Split a mirror pool. If newroot points to null, then a new nvlist
2814 * is generated and it is the responsibility of the caller to free it.
2817 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2818 nvlist_t *props, splitflags_t flags)
2820 zfs_cmd_t zc = { 0 };
2821 char msg[1024];
2822 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2823 nvlist_t **varray = NULL, *zc_props = NULL;
2824 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2825 libzfs_handle_t *hdl = zhp->zpool_hdl;
2826 uint64_t vers;
2827 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2828 int retval = 0;
2830 (void) snprintf(msg, sizeof (msg),
2831 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2833 if (!zpool_name_valid(hdl, B_FALSE, newname))
2834 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2836 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2837 (void) fprintf(stderr, gettext("Internal error: unable to "
2838 "retrieve pool configuration\n"));
2839 return (-1);
2842 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2843 == 0);
2844 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2846 if (props) {
2847 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2848 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2849 props, vers, flags, msg)) == NULL)
2850 return (-1);
2853 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2854 &children) != 0) {
2855 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2856 "Source pool is missing vdev tree"));
2857 if (zc_props)
2858 nvlist_free(zc_props);
2859 return (-1);
2862 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2863 vcount = 0;
2865 if (*newroot == NULL ||
2866 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2867 &newchild, &newchildren) != 0)
2868 newchildren = 0;
2870 for (c = 0; c < children; c++) {
2871 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2872 char *type;
2873 nvlist_t **mchild, *vdev;
2874 uint_t mchildren;
2875 int entry;
2878 * Unlike cache & spares, slogs are stored in the
2879 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2881 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2882 &is_log);
2883 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2884 &is_hole);
2885 if (is_log || is_hole) {
2887 * Create a hole vdev and put it in the config.
2889 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2890 goto out;
2891 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2892 VDEV_TYPE_HOLE) != 0)
2893 goto out;
2894 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2895 1) != 0)
2896 goto out;
2897 if (lastlog == 0)
2898 lastlog = vcount;
2899 varray[vcount++] = vdev;
2900 continue;
2902 lastlog = 0;
2903 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2904 == 0);
2905 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2906 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2907 "Source pool must be composed only of mirrors\n"));
2908 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2909 goto out;
2912 verify(nvlist_lookup_nvlist_array(child[c],
2913 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2915 /* find or add an entry for this top-level vdev */
2916 if (newchildren > 0 &&
2917 (entry = find_vdev_entry(zhp, mchild, mchildren,
2918 newchild, newchildren)) >= 0) {
2919 /* We found a disk that the user specified. */
2920 vdev = mchild[entry];
2921 ++found;
2922 } else {
2923 /* User didn't specify a disk for this vdev. */
2924 vdev = mchild[mchildren - 1];
2927 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2928 goto out;
2931 /* did we find every disk the user specified? */
2932 if (found != newchildren) {
2933 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2934 "include at most one disk from each mirror"));
2935 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2936 goto out;
2939 /* Prepare the nvlist for populating. */
2940 if (*newroot == NULL) {
2941 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2942 goto out;
2943 freelist = B_TRUE;
2944 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2945 VDEV_TYPE_ROOT) != 0)
2946 goto out;
2947 } else {
2948 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2951 /* Add all the children we found */
2952 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2953 lastlog == 0 ? vcount : lastlog) != 0)
2954 goto out;
2957 * If we're just doing a dry run, exit now with success.
2959 if (flags.dryrun) {
2960 memory_err = B_FALSE;
2961 freelist = B_FALSE;
2962 goto out;
2965 /* now build up the config list & call the ioctl */
2966 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2967 goto out;
2969 if (nvlist_add_nvlist(newconfig,
2970 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2971 nvlist_add_string(newconfig,
2972 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2973 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2974 goto out;
2977 * The new pool is automatically part of the namespace unless we
2978 * explicitly export it.
2980 if (!flags.import)
2981 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2982 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2983 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2984 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2985 goto out;
2986 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2987 goto out;
2989 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2990 retval = zpool_standard_error(hdl, errno, msg);
2991 goto out;
2994 freelist = B_FALSE;
2995 memory_err = B_FALSE;
2997 out:
2998 if (varray != NULL) {
2999 int v;
3001 for (v = 0; v < vcount; v++)
3002 nvlist_free(varray[v]);
3003 free(varray);
3005 zcmd_free_nvlists(&zc);
3006 if (zc_props)
3007 nvlist_free(zc_props);
3008 if (newconfig)
3009 nvlist_free(newconfig);
3010 if (freelist) {
3011 nvlist_free(*newroot);
3012 *newroot = NULL;
3015 if (retval != 0)
3016 return (retval);
3018 if (memory_err)
3019 return (no_memory(hdl));
3021 return (0);
3025 * Remove the given device. Currently, this is supported only for hot spares
3026 * and level 2 cache devices.
3029 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3031 zfs_cmd_t zc = { 0 };
3032 char msg[1024];
3033 nvlist_t *tgt;
3034 boolean_t avail_spare, l2cache, islog;
3035 libzfs_handle_t *hdl = zhp->zpool_hdl;
3036 uint64_t version;
3038 (void) snprintf(msg, sizeof (msg),
3039 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3041 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3042 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3043 &islog)) == 0)
3044 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3046 * XXX - this should just go away.
3048 if (!avail_spare && !l2cache && !islog) {
3049 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3050 "only inactive hot spares, cache, top-level, "
3051 "or log devices can be removed"));
3052 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3055 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3056 if (islog && version < SPA_VERSION_HOLES) {
3057 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3058 "pool must be upgrade to support log removal"));
3059 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3062 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3064 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3065 return (0);
3067 return (zpool_standard_error(hdl, errno, msg));
3071 * Clear the errors for the pool, or the particular device if specified.
3074 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3076 zfs_cmd_t zc = { 0 };
3077 char msg[1024];
3078 nvlist_t *tgt;
3079 zpool_rewind_policy_t policy;
3080 boolean_t avail_spare, l2cache;
3081 libzfs_handle_t *hdl = zhp->zpool_hdl;
3082 nvlist_t *nvi = NULL;
3083 int error;
3085 if (path)
3086 (void) snprintf(msg, sizeof (msg),
3087 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3088 path);
3089 else
3090 (void) snprintf(msg, sizeof (msg),
3091 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3092 zhp->zpool_name);
3094 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3095 if (path) {
3096 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3097 &l2cache, NULL)) == 0)
3098 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3101 * Don't allow error clearing for hot spares. Do allow
3102 * error clearing for l2cache devices.
3104 if (avail_spare)
3105 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3107 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3108 &zc.zc_guid) == 0);
3111 zpool_get_rewind_policy(rewindnvl, &policy);
3112 zc.zc_cookie = policy.zrp_request;
3114 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3115 return (-1);
3117 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3118 return (-1);
3120 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3121 errno == ENOMEM) {
3122 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3123 zcmd_free_nvlists(&zc);
3124 return (-1);
3128 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3129 errno != EPERM && errno != EACCES)) {
3130 if (policy.zrp_request &
3131 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3132 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3133 zpool_rewind_exclaim(hdl, zc.zc_name,
3134 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3135 nvi);
3136 nvlist_free(nvi);
3138 zcmd_free_nvlists(&zc);
3139 return (0);
3142 zcmd_free_nvlists(&zc);
3143 return (zpool_standard_error(hdl, errno, msg));
3147 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3150 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3152 zfs_cmd_t zc = { 0 };
3153 char msg[1024];
3154 libzfs_handle_t *hdl = zhp->zpool_hdl;
3156 (void) snprintf(msg, sizeof (msg),
3157 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3158 guid);
3160 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3161 zc.zc_guid = guid;
3162 zc.zc_cookie = ZPOOL_NO_REWIND;
3164 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3165 return (0);
3167 return (zpool_standard_error(hdl, errno, msg));
3171 * Change the GUID for a pool.
3174 zpool_reguid(zpool_handle_t *zhp)
3176 char msg[1024];
3177 libzfs_handle_t *hdl = zhp->zpool_hdl;
3178 zfs_cmd_t zc = { 0 };
3180 (void) snprintf(msg, sizeof (msg),
3181 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3183 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3184 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3185 return (0);
3187 return (zpool_standard_error(hdl, errno, msg));
3191 * Reopen the pool.
3194 zpool_reopen(zpool_handle_t *zhp)
3196 zfs_cmd_t zc = { 0 };
3197 char msg[1024];
3198 libzfs_handle_t *hdl = zhp->zpool_hdl;
3200 (void) snprintf(msg, sizeof (msg),
3201 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3202 zhp->zpool_name);
3204 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3205 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3206 return (0);
3207 return (zpool_standard_error(hdl, errno, msg));
3211 * Convert from a devid string to a path.
3213 static char *
3214 devid_to_path(char *devid_str)
3216 ddi_devid_t devid;
3217 char *minor;
3218 char *path;
3219 devid_nmlist_t *list = NULL;
3220 int ret;
3222 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3223 return (NULL);
3225 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3227 devid_str_free(minor);
3228 devid_free(devid);
3230 if (ret != 0)
3231 return (NULL);
3234 * In a case the strdup() fails, we will just return NULL below.
3236 path = strdup(list[0].devname);
3238 devid_free_nmlist(list);
3240 return (path);
3244 * Convert from a path to a devid string.
3246 static char *
3247 path_to_devid(const char *path)
3249 int fd;
3250 ddi_devid_t devid;
3251 char *minor, *ret;
3253 if ((fd = open(path, O_RDONLY)) < 0)
3254 return (NULL);
3256 minor = NULL;
3257 ret = NULL;
3258 if (devid_get(fd, &devid) == 0) {
3259 if (devid_get_minor_name(fd, &minor) == 0)
3260 ret = devid_str_encode(devid, minor);
3261 if (minor != NULL)
3262 devid_str_free(minor);
3263 devid_free(devid);
3265 (void) close(fd);
3267 return (ret);
3271 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3272 * ignore any failure here, since a common case is for an unprivileged user to
3273 * type 'zpool status', and we'll display the correct information anyway.
3275 static void
3276 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3278 zfs_cmd_t zc = { 0 };
3280 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3281 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3282 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3283 &zc.zc_guid) == 0);
3285 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3289 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3290 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3291 * We also check if this is a whole disk, in which case we strip off the
3292 * trailing 's0' slice name.
3294 * This routine is also responsible for identifying when disks have been
3295 * reconfigured in a new location. The kernel will have opened the device by
3296 * devid, but the path will still refer to the old location. To catch this, we
3297 * first do a path -> devid translation (which is fast for the common case). If
3298 * the devid matches, we're done. If not, we do a reverse devid -> path
3299 * translation and issue the appropriate ioctl() to update the path of the vdev.
3300 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3301 * of these checks.
3303 char *
3304 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3305 boolean_t verbose)
3307 char *path, *devid;
3308 uint64_t value;
3309 char buf[64];
3310 vdev_stat_t *vs;
3311 uint_t vsc;
3313 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3314 &value) == 0) {
3315 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3316 &value) == 0);
3317 (void) snprintf(buf, sizeof (buf), "%llu",
3318 (u_longlong_t)value);
3319 path = buf;
3320 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3323 * If the device is dead (faulted, offline, etc) then don't
3324 * bother opening it. Otherwise we may be forcing the user to
3325 * open a misbehaving device, which can have undesirable
3326 * effects.
3328 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3329 (uint64_t **)&vs, &vsc) != 0 ||
3330 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3331 zhp != NULL &&
3332 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3334 * Determine if the current path is correct.
3336 char *newdevid = path_to_devid(path);
3338 if (newdevid == NULL ||
3339 strcmp(devid, newdevid) != 0) {
3340 char *newpath;
3342 if ((newpath = devid_to_path(devid)) != NULL) {
3344 * Update the path appropriately.
3346 set_path(zhp, nv, newpath);
3347 if (nvlist_add_string(nv,
3348 ZPOOL_CONFIG_PATH, newpath) == 0)
3349 verify(nvlist_lookup_string(nv,
3350 ZPOOL_CONFIG_PATH,
3351 &path) == 0);
3352 free(newpath);
3356 if (newdevid)
3357 devid_str_free(newdevid);
3360 if (strncmp(path, "/dev/dsk/", 9) == 0)
3361 path += 9;
3363 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3364 &value) == 0 && value) {
3365 int pathlen = strlen(path);
3366 char *tmp = zfs_strdup(hdl, path);
3369 * If it starts with c#, and ends with "s0", chop
3370 * the "s0" off, or if it ends with "s0/old", remove
3371 * the "s0" from the middle.
3373 if (CTD_CHECK(tmp)) {
3374 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3375 tmp[pathlen - 2] = '\0';
3376 } else if (pathlen > 6 &&
3377 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3378 (void) strcpy(&tmp[pathlen - 6],
3379 "/old");
3382 return (tmp);
3384 } else {
3385 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3388 * If it's a raidz device, we need to stick in the parity level.
3390 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3391 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3392 &value) == 0);
3393 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3394 (u_longlong_t)value);
3395 path = buf;
3399 * We identify each top-level vdev by using a <type-id>
3400 * naming convention.
3402 if (verbose) {
3403 uint64_t id;
3405 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3406 &id) == 0);
3407 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3408 (u_longlong_t)id);
3409 path = buf;
3413 return (zfs_strdup(hdl, path));
3416 static int
3417 zbookmark_mem_compare(const void *a, const void *b)
3419 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3423 * Retrieve the persistent error log, uniquify the members, and return to the
3424 * caller.
3427 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3429 zfs_cmd_t zc = { 0 };
3430 uint64_t count;
3431 zbookmark_phys_t *zb = NULL;
3432 int i;
3435 * Retrieve the raw error list from the kernel. If the number of errors
3436 * has increased, allocate more space and continue until we get the
3437 * entire list.
3439 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3440 &count) == 0);
3441 if (count == 0)
3442 return (0);
3443 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3444 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3445 return (-1);
3446 zc.zc_nvlist_dst_size = count;
3447 (void) strcpy(zc.zc_name, zhp->zpool_name);
3448 for (;;) {
3449 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3450 &zc) != 0) {
3451 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3452 if (errno == ENOMEM) {
3453 void *dst;
3455 count = zc.zc_nvlist_dst_size;
3456 dst = zfs_alloc(zhp->zpool_hdl, count *
3457 sizeof (zbookmark_phys_t));
3458 if (dst == NULL)
3459 return (-1);
3460 zc.zc_nvlist_dst = (uintptr_t)dst;
3461 } else {
3462 return (-1);
3464 } else {
3465 break;
3470 * Sort the resulting bookmarks. This is a little confusing due to the
3471 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3472 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3473 * _not_ copied as part of the process. So we point the start of our
3474 * array appropriate and decrement the total number of elements.
3476 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3477 zc.zc_nvlist_dst_size;
3478 count -= zc.zc_nvlist_dst_size;
3480 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3482 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3485 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3487 for (i = 0; i < count; i++) {
3488 nvlist_t *nv;
3490 /* ignoring zb_blkid and zb_level for now */
3491 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3492 zb[i-1].zb_object == zb[i].zb_object)
3493 continue;
3495 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3496 goto nomem;
3497 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3498 zb[i].zb_objset) != 0) {
3499 nvlist_free(nv);
3500 goto nomem;
3502 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3503 zb[i].zb_object) != 0) {
3504 nvlist_free(nv);
3505 goto nomem;
3507 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3508 nvlist_free(nv);
3509 goto nomem;
3511 nvlist_free(nv);
3514 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3515 return (0);
3517 nomem:
3518 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3519 return (no_memory(zhp->zpool_hdl));
3523 * Upgrade a ZFS pool to the latest on-disk version.
3526 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3528 zfs_cmd_t zc = { 0 };
3529 libzfs_handle_t *hdl = zhp->zpool_hdl;
3531 (void) strcpy(zc.zc_name, zhp->zpool_name);
3532 zc.zc_cookie = new_version;
3534 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3535 return (zpool_standard_error_fmt(hdl, errno,
3536 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3537 zhp->zpool_name));
3538 return (0);
3541 void
3542 zfs_save_arguments(int argc, char **argv, char *string, int len)
3544 (void) strlcpy(string, basename(argv[0]), len);
3545 for (int i = 1; i < argc; i++) {
3546 (void) strlcat(string, " ", len);
3547 (void) strlcat(string, argv[i], len);
3552 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3554 zfs_cmd_t zc = { 0 };
3555 nvlist_t *args;
3556 int err;
3558 args = fnvlist_alloc();
3559 fnvlist_add_string(args, "message", message);
3560 err = zcmd_write_src_nvlist(hdl, &zc, args);
3561 if (err == 0)
3562 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3563 nvlist_free(args);
3564 zcmd_free_nvlists(&zc);
3565 return (err);
3569 * Perform ioctl to get some command history of a pool.
3571 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3572 * logical offset of the history buffer to start reading from.
3574 * Upon return, 'off' is the next logical offset to read from and
3575 * 'len' is the actual amount of bytes read into 'buf'.
3577 static int
3578 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3580 zfs_cmd_t zc = { 0 };
3581 libzfs_handle_t *hdl = zhp->zpool_hdl;
3583 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3585 zc.zc_history = (uint64_t)(uintptr_t)buf;
3586 zc.zc_history_len = *len;
3587 zc.zc_history_offset = *off;
3589 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3590 switch (errno) {
3591 case EPERM:
3592 return (zfs_error_fmt(hdl, EZFS_PERM,
3593 dgettext(TEXT_DOMAIN,
3594 "cannot show history for pool '%s'"),
3595 zhp->zpool_name));
3596 case ENOENT:
3597 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3598 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3599 "'%s'"), zhp->zpool_name));
3600 case ENOTSUP:
3601 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3602 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3603 "'%s', pool must be upgraded"), zhp->zpool_name));
3604 default:
3605 return (zpool_standard_error_fmt(hdl, errno,
3606 dgettext(TEXT_DOMAIN,
3607 "cannot get history for '%s'"), zhp->zpool_name));
3611 *len = zc.zc_history_len;
3612 *off = zc.zc_history_offset;
3614 return (0);
3618 * Process the buffer of nvlists, unpacking and storing each nvlist record
3619 * into 'records'. 'leftover' is set to the number of bytes that weren't
3620 * processed as there wasn't a complete record.
3623 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3624 nvlist_t ***records, uint_t *numrecords)
3626 uint64_t reclen;
3627 nvlist_t *nv;
3628 int i;
3630 while (bytes_read > sizeof (reclen)) {
3632 /* get length of packed record (stored as little endian) */
3633 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3634 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3636 if (bytes_read < sizeof (reclen) + reclen)
3637 break;
3639 /* unpack record */
3640 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3641 return (ENOMEM);
3642 bytes_read -= sizeof (reclen) + reclen;
3643 buf += sizeof (reclen) + reclen;
3645 /* add record to nvlist array */
3646 (*numrecords)++;
3647 if (ISP2(*numrecords + 1)) {
3648 *records = realloc(*records,
3649 *numrecords * 2 * sizeof (nvlist_t *));
3651 (*records)[*numrecords - 1] = nv;
3654 *leftover = bytes_read;
3655 return (0);
3659 * Retrieve the command history of a pool.
3662 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3664 char *buf;
3665 int buflen = 128 * 1024;
3666 uint64_t off = 0;
3667 nvlist_t **records = NULL;
3668 uint_t numrecords = 0;
3669 int err, i;
3671 buf = malloc(buflen);
3672 if (buf == NULL)
3673 return (ENOMEM);
3674 do {
3675 uint64_t bytes_read = buflen;
3676 uint64_t leftover;
3678 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3679 break;
3681 /* if nothing else was read in, we're at EOF, just return */
3682 if (!bytes_read)
3683 break;
3685 if ((err = zpool_history_unpack(buf, bytes_read,
3686 &leftover, &records, &numrecords)) != 0)
3687 break;
3688 off -= leftover;
3689 if (leftover == bytes_read) {
3691 * no progress made, because buffer is not big enough
3692 * to hold this record; resize and retry.
3694 buflen *= 2;
3695 free(buf);
3696 buf = malloc(buflen);
3697 if (buf == NULL)
3698 return (ENOMEM);
3701 /* CONSTCOND */
3702 } while (1);
3704 free(buf);
3706 if (!err) {
3707 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3708 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3709 records, numrecords) == 0);
3711 for (i = 0; i < numrecords; i++)
3712 nvlist_free(records[i]);
3713 free(records);
3715 return (err);
3718 void
3719 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3720 char *pathname, size_t len)
3722 zfs_cmd_t zc = { 0 };
3723 boolean_t mounted = B_FALSE;
3724 char *mntpnt = NULL;
3725 char dsname[MAXNAMELEN];
3727 if (dsobj == 0) {
3728 /* special case for the MOS */
3729 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3730 return;
3733 /* get the dataset's name */
3734 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3735 zc.zc_obj = dsobj;
3736 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3737 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3738 /* just write out a path of two object numbers */
3739 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3740 dsobj, obj);
3741 return;
3743 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3745 /* find out if the dataset is mounted */
3746 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3748 /* get the corrupted object's path */
3749 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3750 zc.zc_obj = obj;
3751 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3752 &zc) == 0) {
3753 if (mounted) {
3754 (void) snprintf(pathname, len, "%s%s", mntpnt,
3755 zc.zc_value);
3756 } else {
3757 (void) snprintf(pathname, len, "%s:%s",
3758 dsname, zc.zc_value);
3760 } else {
3761 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3763 free(mntpnt);
3767 * Read the EFI label from the config, if a label does not exist then
3768 * pass back the error to the caller. If the caller has passed a non-NULL
3769 * diskaddr argument then we set it to the starting address of the EFI
3770 * partition.
3772 static int
3773 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3775 char *path;
3776 int fd;
3777 char diskname[MAXPATHLEN];
3778 int err = -1;
3780 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3781 return (err);
3783 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3784 strrchr(path, '/'));
3785 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3786 struct dk_gpt *vtoc;
3788 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3789 if (sb != NULL)
3790 *sb = vtoc->efi_parts[0].p_start;
3791 efi_free(vtoc);
3793 (void) close(fd);
3795 return (err);
3799 * determine where a partition starts on a disk in the current
3800 * configuration
3802 static diskaddr_t
3803 find_start_block(nvlist_t *config)
3805 nvlist_t **child;
3806 uint_t c, children;
3807 diskaddr_t sb = MAXOFFSET_T;
3808 uint64_t wholedisk;
3810 if (nvlist_lookup_nvlist_array(config,
3811 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3812 if (nvlist_lookup_uint64(config,
3813 ZPOOL_CONFIG_WHOLE_DISK,
3814 &wholedisk) != 0 || !wholedisk) {
3815 return (MAXOFFSET_T);
3817 if (read_efi_label(config, &sb) < 0)
3818 sb = MAXOFFSET_T;
3819 return (sb);
3822 for (c = 0; c < children; c++) {
3823 sb = find_start_block(child[c]);
3824 if (sb != MAXOFFSET_T) {
3825 return (sb);
3828 return (MAXOFFSET_T);
3832 * Label an individual disk. The name provided is the short name,
3833 * stripped of any leading /dev path.
3836 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3838 char path[MAXPATHLEN];
3839 struct dk_gpt *vtoc;
3840 int fd;
3841 size_t resv = EFI_MIN_RESV_SIZE;
3842 uint64_t slice_size;
3843 diskaddr_t start_block;
3844 char errbuf[1024];
3846 /* prepare an error message just in case */
3847 (void) snprintf(errbuf, sizeof (errbuf),
3848 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3850 if (zhp) {
3851 nvlist_t *nvroot;
3853 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3854 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3856 if (zhp->zpool_start_block == 0)
3857 start_block = find_start_block(nvroot);
3858 else
3859 start_block = zhp->zpool_start_block;
3860 zhp->zpool_start_block = start_block;
3861 } else {
3862 /* new pool */
3863 start_block = NEW_START_BLOCK;
3866 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3867 BACKUP_SLICE);
3869 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3871 * This shouldn't happen. We've long since verified that this
3872 * is a valid device.
3874 zfs_error_aux(hdl,
3875 dgettext(TEXT_DOMAIN, "unable to open device"));
3876 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3879 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3881 * The only way this can fail is if we run out of memory, or we
3882 * were unable to read the disk's capacity
3884 if (errno == ENOMEM)
3885 (void) no_memory(hdl);
3887 (void) close(fd);
3888 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3889 "unable to read disk capacity"), name);
3891 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3894 slice_size = vtoc->efi_last_u_lba + 1;
3895 slice_size -= EFI_MIN_RESV_SIZE;
3896 if (start_block == MAXOFFSET_T)
3897 start_block = NEW_START_BLOCK;
3898 slice_size -= start_block;
3900 vtoc->efi_parts[0].p_start = start_block;
3901 vtoc->efi_parts[0].p_size = slice_size;
3904 * Why we use V_USR: V_BACKUP confuses users, and is considered
3905 * disposable by some EFI utilities (since EFI doesn't have a backup
3906 * slice). V_UNASSIGNED is supposed to be used only for zero size
3907 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3908 * etc. were all pretty specific. V_USR is as close to reality as we
3909 * can get, in the absence of V_OTHER.
3911 vtoc->efi_parts[0].p_tag = V_USR;
3912 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3914 vtoc->efi_parts[8].p_start = slice_size + start_block;
3915 vtoc->efi_parts[8].p_size = resv;
3916 vtoc->efi_parts[8].p_tag = V_RESERVED;
3918 if (efi_write(fd, vtoc) != 0) {
3920 * Some block drivers (like pcata) may not support EFI
3921 * GPT labels. Print out a helpful error message dir-
3922 * ecting the user to manually label the disk and give
3923 * a specific slice.
3925 (void) close(fd);
3926 efi_free(vtoc);
3928 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3929 "try using fdisk(1M) and then provide a specific slice"));
3930 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3933 (void) close(fd);
3934 efi_free(vtoc);
3935 return (0);
3938 static boolean_t
3939 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3941 char *type;
3942 nvlist_t **child;
3943 uint_t children, c;
3945 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3946 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
3947 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3948 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3949 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3950 "vdev type '%s' is not supported"), type);
3951 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3952 return (B_FALSE);
3954 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3955 &child, &children) == 0) {
3956 for (c = 0; c < children; c++) {
3957 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3958 return (B_FALSE);
3961 return (B_TRUE);
3965 * Check if this zvol is allowable for use as a dump device; zero if
3966 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
3968 * Allowable storage configurations include mirrors, all raidz variants, and
3969 * pools with log, cache, and spare devices. Pools which are backed by files or
3970 * have missing/hole vdevs are not suitable.
3973 zvol_check_dump_config(char *arg)
3975 zpool_handle_t *zhp = NULL;
3976 nvlist_t *config, *nvroot;
3977 char *p, *volname;
3978 nvlist_t **top;
3979 uint_t toplevels;
3980 libzfs_handle_t *hdl;
3981 char errbuf[1024];
3982 char poolname[ZPOOL_MAXNAMELEN];
3983 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3984 int ret = 1;
3986 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3987 return (-1);
3990 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3991 "dump is not supported on device '%s'"), arg);
3993 if ((hdl = libzfs_init()) == NULL)
3994 return (1);
3995 libzfs_print_on_error(hdl, B_TRUE);
3997 volname = arg + pathlen;
3999 /* check the configuration of the pool */
4000 if ((p = strchr(volname, '/')) == NULL) {
4001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4002 "malformed dataset name"));
4003 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4004 return (1);
4005 } else if (p - volname >= ZFS_MAXNAMELEN) {
4006 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4007 "dataset name is too long"));
4008 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4009 return (1);
4010 } else {
4011 (void) strncpy(poolname, volname, p - volname);
4012 poolname[p - volname] = '\0';
4015 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4016 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4017 "could not open pool '%s'"), poolname);
4018 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4019 goto out;
4021 config = zpool_get_config(zhp, NULL);
4022 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4023 &nvroot) != 0) {
4024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4025 "could not obtain vdev configuration for '%s'"), poolname);
4026 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4027 goto out;
4030 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4031 &top, &toplevels) == 0);
4033 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4034 goto out;
4036 ret = 0;
4038 out:
4039 if (zhp)
4040 zpool_close(zhp);
4041 libzfs_fini(hdl);
4042 return (ret);