8264 want support for promoting datasets in libzfs_core
[unleashed.git] / usr / src / lib / libzfs_core / common / libzfs_core.c
blob87f7ea4f466a8df934b2d3e46ef340cdd45f94d2
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 RackTop Systems.
30 * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
31 * It has the following characteristics:
33 * - Thread Safe. libzfs_core is accessible concurrently from multiple
34 * threads. This is accomplished primarily by avoiding global data
35 * (e.g. caching). Since it's thread-safe, there is no reason for a
36 * process to have multiple libzfs "instances". Therefore, we store
37 * our few pieces of data (e.g. the file descriptor) in global
38 * variables. The fd is reference-counted so that the libzfs_core
39 * library can be "initialized" multiple times (e.g. by different
40 * consumers within the same process).
42 * - Committed Interface. The libzfs_core interface will be committed,
43 * therefore consumers can compile against it and be confident that
44 * their code will continue to work on future releases of this code.
45 * Currently, the interface is Evolving (not Committed), but we intend
46 * to commit to it once it is more complete and we determine that it
47 * meets the needs of all consumers.
49 * - Programatic Error Handling. libzfs_core communicates errors with
50 * defined error numbers, and doesn't print anything to stdout/stderr.
52 * - Thin Layer. libzfs_core is a thin layer, marshaling arguments
53 * to/from the kernel ioctls. There is generally a 1:1 correspondence
54 * between libzfs_core functions and ioctls to /dev/zfs.
56 * - Clear Atomicity. Because libzfs_core functions are generally 1:1
57 * with kernel ioctls, and kernel ioctls are general atomic, each
58 * libzfs_core function is atomic. For example, creating multiple
59 * snapshots with a single call to lzc_snapshot() is atomic -- it
60 * can't fail with only some of the requested snapshots created, even
61 * in the event of power loss or system crash.
63 * - Continued libzfs Support. Some higher-level operations (e.g.
64 * support for "zfs send -R") are too complicated to fit the scope of
65 * libzfs_core. This functionality will continue to live in libzfs.
66 * Where appropriate, libzfs will use the underlying atomic operations
67 * of libzfs_core. For example, libzfs may implement "zfs send -R |
68 * zfs receive" by using individual "send one snapshot", rename,
69 * destroy, and "receive one snapshot" operations in libzfs_core.
70 * /sbin/zfs and /zbin/zpool will link with both libzfs and
71 * libzfs_core. Other consumers should aim to use only libzfs_core,
72 * since that will be the supported, stable interface going forwards.
75 #include <libzfs_core.h>
76 #include <ctype.h>
77 #include <unistd.h>
78 #include <stdlib.h>
79 #include <string.h>
80 #include <errno.h>
81 #include <fcntl.h>
82 #include <pthread.h>
83 #include <sys/nvpair.h>
84 #include <sys/param.h>
85 #include <sys/types.h>
86 #include <sys/stat.h>
87 #include <sys/zfs_ioctl.h>
89 static int g_fd = -1;
90 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
91 static int g_refcount;
93 int
94 libzfs_core_init(void)
96 (void) pthread_mutex_lock(&g_lock);
97 if (g_refcount == 0) {
98 g_fd = open("/dev/zfs", O_RDWR);
99 if (g_fd < 0) {
100 (void) pthread_mutex_unlock(&g_lock);
101 return (errno);
104 g_refcount++;
105 (void) pthread_mutex_unlock(&g_lock);
106 return (0);
109 void
110 libzfs_core_fini(void)
112 (void) pthread_mutex_lock(&g_lock);
113 ASSERT3S(g_refcount, >, 0);
115 if (g_refcount > 0)
116 g_refcount--;
118 if (g_refcount == 0 && g_fd != -1) {
119 (void) close(g_fd);
120 g_fd = -1;
122 (void) pthread_mutex_unlock(&g_lock);
125 static int
126 lzc_ioctl(zfs_ioc_t ioc, const char *name,
127 nvlist_t *source, nvlist_t **resultp)
129 zfs_cmd_t zc = { 0 };
130 int error = 0;
131 char *packed;
132 size_t size;
134 ASSERT3S(g_refcount, >, 0);
135 VERIFY3S(g_fd, !=, -1);
137 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
139 packed = fnvlist_pack(source, &size);
140 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
141 zc.zc_nvlist_src_size = size;
143 if (resultp != NULL) {
144 *resultp = NULL;
145 zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
146 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
147 malloc(zc.zc_nvlist_dst_size);
148 if (zc.zc_nvlist_dst == NULL) {
149 error = ENOMEM;
150 goto out;
154 while (ioctl(g_fd, ioc, &zc) != 0) {
155 if (errno == ENOMEM && resultp != NULL) {
156 free((void *)(uintptr_t)zc.zc_nvlist_dst);
157 zc.zc_nvlist_dst_size *= 2;
158 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
159 malloc(zc.zc_nvlist_dst_size);
160 if (zc.zc_nvlist_dst == NULL) {
161 error = ENOMEM;
162 goto out;
164 } else {
165 error = errno;
166 break;
169 if (zc.zc_nvlist_dst_filled) {
170 *resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
171 zc.zc_nvlist_dst_size);
174 out:
175 fnvlist_pack_free(packed, size);
176 free((void *)(uintptr_t)zc.zc_nvlist_dst);
177 return (error);
181 lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props)
183 int error;
184 nvlist_t *args = fnvlist_alloc();
185 fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
186 if (props != NULL)
187 fnvlist_add_nvlist(args, "props", props);
188 error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
189 nvlist_free(args);
190 return (error);
194 lzc_clone(const char *fsname, const char *origin,
195 nvlist_t *props)
197 int error;
198 nvlist_t *args = fnvlist_alloc();
199 fnvlist_add_string(args, "origin", origin);
200 if (props != NULL)
201 fnvlist_add_nvlist(args, "props", props);
202 error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
203 nvlist_free(args);
204 return (error);
208 lzc_promote(const char *fsname, char *snapnamebuf, int snapnamelen)
211 * The promote ioctl is still legacy, so we need to construct our
212 * own zfs_cmd_t rather than using lzc_ioctl().
214 zfs_cmd_t zc = { 0 };
216 ASSERT3S(g_refcount, >, 0);
217 VERIFY3S(g_fd, !=, -1);
219 (void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
220 if (ioctl(g_fd, ZFS_IOC_PROMOTE, &zc) != 0) {
221 int error = errno;
222 if (error == EEXIST && snapnamebuf != NULL)
223 (void) strlcpy(snapnamebuf, zc.zc_string, snapnamelen);
224 return (error);
226 return (0);
230 * Creates snapshots.
232 * The keys in the snaps nvlist are the snapshots to be created.
233 * They must all be in the same pool.
235 * The props nvlist is properties to set. Currently only user properties
236 * are supported. { user:prop_name -> string value }
238 * The returned results nvlist will have an entry for each snapshot that failed.
239 * The value will be the (int32) error code.
241 * The return value will be 0 if all snapshots were created, otherwise it will
242 * be the errno of a (unspecified) snapshot that failed.
245 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
247 nvpair_t *elem;
248 nvlist_t *args;
249 int error;
250 char pool[ZFS_MAX_DATASET_NAME_LEN];
252 *errlist = NULL;
254 /* determine the pool name */
255 elem = nvlist_next_nvpair(snaps, NULL);
256 if (elem == NULL)
257 return (0);
258 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
259 pool[strcspn(pool, "/@")] = '\0';
261 args = fnvlist_alloc();
262 fnvlist_add_nvlist(args, "snaps", snaps);
263 if (props != NULL)
264 fnvlist_add_nvlist(args, "props", props);
266 error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
267 nvlist_free(args);
269 return (error);
273 * Destroys snapshots.
275 * The keys in the snaps nvlist are the snapshots to be destroyed.
276 * They must all be in the same pool.
278 * Snapshots that do not exist will be silently ignored.
280 * If 'defer' is not set, and a snapshot has user holds or clones, the
281 * destroy operation will fail and none of the snapshots will be
282 * destroyed.
284 * If 'defer' is set, and a snapshot has user holds or clones, it will be
285 * marked for deferred destruction, and will be destroyed when the last hold
286 * or clone is removed/destroyed.
288 * The return value will be 0 if all snapshots were destroyed (or marked for
289 * later destruction if 'defer' is set) or didn't exist to begin with.
291 * Otherwise the return value will be the errno of a (unspecified) snapshot
292 * that failed, no snapshots will be destroyed, and the errlist will have an
293 * entry for each snapshot that failed. The value in the errlist will be
294 * the (int32) error code.
297 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
299 nvpair_t *elem;
300 nvlist_t *args;
301 int error;
302 char pool[ZFS_MAX_DATASET_NAME_LEN];
304 /* determine the pool name */
305 elem = nvlist_next_nvpair(snaps, NULL);
306 if (elem == NULL)
307 return (0);
308 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
309 pool[strcspn(pool, "/@")] = '\0';
311 args = fnvlist_alloc();
312 fnvlist_add_nvlist(args, "snaps", snaps);
313 if (defer)
314 fnvlist_add_boolean(args, "defer");
316 error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
317 nvlist_free(args);
319 return (error);
323 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
324 uint64_t *usedp)
326 nvlist_t *args;
327 nvlist_t *result;
328 int err;
329 char fs[ZFS_MAX_DATASET_NAME_LEN];
330 char *atp;
332 /* determine the fs name */
333 (void) strlcpy(fs, firstsnap, sizeof (fs));
334 atp = strchr(fs, '@');
335 if (atp == NULL)
336 return (EINVAL);
337 *atp = '\0';
339 args = fnvlist_alloc();
340 fnvlist_add_string(args, "firstsnap", firstsnap);
342 err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
343 nvlist_free(args);
344 if (err == 0)
345 *usedp = fnvlist_lookup_uint64(result, "used");
346 fnvlist_free(result);
348 return (err);
351 boolean_t
352 lzc_exists(const char *dataset)
355 * The objset_stats ioctl is still legacy, so we need to construct our
356 * own zfs_cmd_t rather than using lzc_ioctl().
358 zfs_cmd_t zc = { 0 };
360 ASSERT3S(g_refcount, >, 0);
361 VERIFY3S(g_fd, !=, -1);
363 (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
364 return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
368 * Create "user holds" on snapshots. If there is a hold on a snapshot,
369 * the snapshot can not be destroyed. (However, it can be marked for deletion
370 * by lzc_destroy_snaps(defer=B_TRUE).)
372 * The keys in the nvlist are snapshot names.
373 * The snapshots must all be in the same pool.
374 * The value is the name of the hold (string type).
376 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
377 * In this case, when the cleanup_fd is closed (including on process
378 * termination), the holds will be released. If the system is shut down
379 * uncleanly, the holds will be released when the pool is next opened
380 * or imported.
382 * Holds for snapshots which don't exist will be skipped and have an entry
383 * added to errlist, but will not cause an overall failure.
385 * The return value will be 0 if all holds, for snapshots that existed,
386 * were succesfully created.
388 * Otherwise the return value will be the errno of a (unspecified) hold that
389 * failed and no holds will be created.
391 * In all cases the errlist will have an entry for each hold that failed
392 * (name = snapshot), with its value being the error code (int32).
395 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
397 char pool[ZFS_MAX_DATASET_NAME_LEN];
398 nvlist_t *args;
399 nvpair_t *elem;
400 int error;
402 /* determine the pool name */
403 elem = nvlist_next_nvpair(holds, NULL);
404 if (elem == NULL)
405 return (0);
406 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
407 pool[strcspn(pool, "/@")] = '\0';
409 args = fnvlist_alloc();
410 fnvlist_add_nvlist(args, "holds", holds);
411 if (cleanup_fd != -1)
412 fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
414 error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
415 nvlist_free(args);
416 return (error);
420 * Release "user holds" on snapshots. If the snapshot has been marked for
421 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
422 * any clones, and all the user holds are removed, then the snapshot will be
423 * destroyed.
425 * The keys in the nvlist are snapshot names.
426 * The snapshots must all be in the same pool.
427 * The value is a nvlist whose keys are the holds to remove.
429 * Holds which failed to release because they didn't exist will have an entry
430 * added to errlist, but will not cause an overall failure.
432 * The return value will be 0 if the nvl holds was empty or all holds that
433 * existed, were successfully removed.
435 * Otherwise the return value will be the errno of a (unspecified) hold that
436 * failed to release and no holds will be released.
438 * In all cases the errlist will have an entry for each hold that failed to
439 * to release.
442 lzc_release(nvlist_t *holds, nvlist_t **errlist)
444 char pool[ZFS_MAX_DATASET_NAME_LEN];
445 nvpair_t *elem;
447 /* determine the pool name */
448 elem = nvlist_next_nvpair(holds, NULL);
449 if (elem == NULL)
450 return (0);
451 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
452 pool[strcspn(pool, "/@")] = '\0';
454 return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
458 * Retrieve list of user holds on the specified snapshot.
460 * On success, *holdsp will be set to a nvlist which the caller must free.
461 * The keys are the names of the holds, and the value is the creation time
462 * of the hold (uint64) in seconds since the epoch.
465 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
467 int error;
468 nvlist_t *innvl = fnvlist_alloc();
469 error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
470 fnvlist_free(innvl);
471 return (error);
475 * Generate a zfs send stream for the specified snapshot and write it to
476 * the specified file descriptor.
478 * "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
480 * If "from" is NULL, a full (non-incremental) stream will be sent.
481 * If "from" is non-NULL, it must be the full name of a snapshot or
482 * bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
483 * "pool/fs#earlier_bmark"). If non-NULL, the specified snapshot or
484 * bookmark must represent an earlier point in the history of "snapname").
485 * It can be an earlier snapshot in the same filesystem or zvol as "snapname",
486 * or it can be the origin of "snapname"'s filesystem, or an earlier
487 * snapshot in the origin, etc.
489 * "fd" is the file descriptor to write the send stream to.
491 * If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
492 * to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
493 * records with drr_blksz > 128K.
495 * If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
496 * to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
497 * which the receiving system must support (as indicated by support
498 * for the "embedded_data" feature).
501 lzc_send(const char *snapname, const char *from, int fd,
502 enum lzc_send_flags flags)
504 return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
508 lzc_send_resume(const char *snapname, const char *from, int fd,
509 enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
511 nvlist_t *args;
512 int err;
514 args = fnvlist_alloc();
515 fnvlist_add_int32(args, "fd", fd);
516 if (from != NULL)
517 fnvlist_add_string(args, "fromsnap", from);
518 if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
519 fnvlist_add_boolean(args, "largeblockok");
520 if (flags & LZC_SEND_FLAG_EMBED_DATA)
521 fnvlist_add_boolean(args, "embedok");
522 if (flags & LZC_SEND_FLAG_COMPRESS)
523 fnvlist_add_boolean(args, "compressok");
524 if (resumeobj != 0 || resumeoff != 0) {
525 fnvlist_add_uint64(args, "resume_object", resumeobj);
526 fnvlist_add_uint64(args, "resume_offset", resumeoff);
528 err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
529 nvlist_free(args);
530 return (err);
534 * "from" can be NULL, a snapshot, or a bookmark.
536 * If from is NULL, a full (non-incremental) stream will be estimated. This
537 * is calculated very efficiently.
539 * If from is a snapshot, lzc_send_space uses the deadlists attached to
540 * each snapshot to efficiently estimate the stream size.
542 * If from is a bookmark, the indirect blocks in the destination snapshot
543 * are traversed, looking for blocks with a birth time since the creation TXG of
544 * the snapshot this bookmark was created from. This will result in
545 * significantly more I/O and be less efficient than a send space estimation on
546 * an equivalent snapshot.
549 lzc_send_space(const char *snapname, const char *from,
550 enum lzc_send_flags flags, uint64_t *spacep)
552 nvlist_t *args;
553 nvlist_t *result;
554 int err;
556 args = fnvlist_alloc();
557 if (from != NULL)
558 fnvlist_add_string(args, "from", from);
559 if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
560 fnvlist_add_boolean(args, "largeblockok");
561 if (flags & LZC_SEND_FLAG_EMBED_DATA)
562 fnvlist_add_boolean(args, "embedok");
563 if (flags & LZC_SEND_FLAG_COMPRESS)
564 fnvlist_add_boolean(args, "compressok");
565 err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
566 nvlist_free(args);
567 if (err == 0)
568 *spacep = fnvlist_lookup_uint64(result, "space");
569 nvlist_free(result);
570 return (err);
573 static int
574 recv_read(int fd, void *buf, int ilen)
576 char *cp = buf;
577 int rv;
578 int len = ilen;
580 do {
581 rv = read(fd, cp, len);
582 cp += rv;
583 len -= rv;
584 } while (rv > 0);
586 if (rv < 0 || len != 0)
587 return (EIO);
589 return (0);
592 static int
593 recv_impl(const char *snapname, nvlist_t *props, const char *origin,
594 boolean_t force, boolean_t resumable, int fd,
595 const dmu_replay_record_t *begin_record)
598 * The receive ioctl is still legacy, so we need to construct our own
599 * zfs_cmd_t rather than using zfsc_ioctl().
601 zfs_cmd_t zc = { 0 };
602 char *atp;
603 char *packed = NULL;
604 size_t size;
605 int error;
607 ASSERT3S(g_refcount, >, 0);
608 VERIFY3S(g_fd, !=, -1);
610 /* zc_name is name of containing filesystem */
611 (void) strlcpy(zc.zc_name, snapname, sizeof (zc.zc_name));
612 atp = strchr(zc.zc_name, '@');
613 if (atp == NULL)
614 return (EINVAL);
615 *atp = '\0';
617 /* if the fs does not exist, try its parent. */
618 if (!lzc_exists(zc.zc_name)) {
619 char *slashp = strrchr(zc.zc_name, '/');
620 if (slashp == NULL)
621 return (ENOENT);
622 *slashp = '\0';
626 /* zc_value is full name of the snapshot to create */
627 (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
629 if (props != NULL) {
630 /* zc_nvlist_src is props to set */
631 packed = fnvlist_pack(props, &size);
632 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
633 zc.zc_nvlist_src_size = size;
636 /* zc_string is name of clone origin (if DRR_FLAG_CLONE) */
637 if (origin != NULL)
638 (void) strlcpy(zc.zc_string, origin, sizeof (zc.zc_string));
640 /* zc_begin_record is non-byteswapped BEGIN record */
641 if (begin_record == NULL) {
642 error = recv_read(fd, &zc.zc_begin_record,
643 sizeof (zc.zc_begin_record));
644 if (error != 0)
645 goto out;
646 } else {
647 zc.zc_begin_record = *begin_record;
650 /* zc_cookie is fd to read from */
651 zc.zc_cookie = fd;
653 /* zc guid is force flag */
654 zc.zc_guid = force;
656 zc.zc_resumable = resumable;
658 /* zc_cleanup_fd is unused */
659 zc.zc_cleanup_fd = -1;
661 error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
662 if (error != 0)
663 error = errno;
665 out:
666 if (packed != NULL)
667 fnvlist_pack_free(packed, size);
668 free((void*)(uintptr_t)zc.zc_nvlist_dst);
669 return (error);
673 * The simplest receive case: receive from the specified fd, creating the
674 * specified snapshot. Apply the specified properties as "received" properties
675 * (which can be overridden by locally-set properties). If the stream is a
676 * clone, its origin snapshot must be specified by 'origin'. The 'force'
677 * flag will cause the target filesystem to be rolled back or destroyed if
678 * necessary to receive.
680 * Return 0 on success or an errno on failure.
682 * Note: this interface does not work on dedup'd streams
683 * (those with DMU_BACKUP_FEATURE_DEDUP).
686 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
687 boolean_t force, int fd)
689 return (recv_impl(snapname, props, origin, force, B_FALSE, fd, NULL));
693 * Like lzc_receive, but if the receive fails due to premature stream
694 * termination, the intermediate state will be preserved on disk. In this
695 * case, ECKSUM will be returned. The receive may subsequently be resumed
696 * with a resuming send stream generated by lzc_send_resume().
699 lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
700 boolean_t force, int fd)
702 return (recv_impl(snapname, props, origin, force, B_TRUE, fd, NULL));
706 * Like lzc_receive, but allows the caller to read the begin record and then to
707 * pass it in. That could be useful if the caller wants to derive, for example,
708 * the snapname or the origin parameters based on the information contained in
709 * the begin record.
710 * The begin record must be in its original form as read from the stream,
711 * in other words, it should not be byteswapped.
713 * The 'resumable' parameter allows to obtain the same behavior as with
714 * lzc_receive_resumable.
717 lzc_receive_with_header(const char *snapname, nvlist_t *props,
718 const char *origin, boolean_t force, boolean_t resumable, int fd,
719 const dmu_replay_record_t *begin_record)
721 if (begin_record == NULL)
722 return (EINVAL);
723 return (recv_impl(snapname, props, origin, force, resumable, fd,
724 begin_record));
728 * Roll back this filesystem or volume to its most recent snapshot.
729 * If snapnamebuf is not NULL, it will be filled in with the name
730 * of the most recent snapshot.
732 * Return 0 on success or an errno on failure.
735 lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
737 nvlist_t *args;
738 nvlist_t *result;
739 int err;
741 args = fnvlist_alloc();
742 err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
743 nvlist_free(args);
744 if (err == 0 && snapnamebuf != NULL) {
745 const char *snapname = fnvlist_lookup_string(result, "target");
746 (void) strlcpy(snapnamebuf, snapname, snapnamelen);
748 nvlist_free(result);
750 return (err);
754 * Creates bookmarks.
756 * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
757 * the name of the snapshot (e.g. "pool/fs@snap"). All the bookmarks and
758 * snapshots must be in the same pool.
760 * The returned results nvlist will have an entry for each bookmark that failed.
761 * The value will be the (int32) error code.
763 * The return value will be 0 if all bookmarks were created, otherwise it will
764 * be the errno of a (undetermined) bookmarks that failed.
767 lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
769 nvpair_t *elem;
770 int error;
771 char pool[ZFS_MAX_DATASET_NAME_LEN];
773 /* determine the pool name */
774 elem = nvlist_next_nvpair(bookmarks, NULL);
775 if (elem == NULL)
776 return (0);
777 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
778 pool[strcspn(pool, "/#")] = '\0';
780 error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
782 return (error);
786 * Retrieve bookmarks.
788 * Retrieve the list of bookmarks for the given file system. The props
789 * parameter is an nvlist of property names (with no values) that will be
790 * returned for each bookmark.
792 * The following are valid properties on bookmarks, all of which are numbers
793 * (represented as uint64 in the nvlist)
795 * "guid" - globally unique identifier of the snapshot it refers to
796 * "createtxg" - txg when the snapshot it refers to was created
797 * "creation" - timestamp when the snapshot it refers to was created
799 * The format of the returned nvlist as follows:
800 * <short name of bookmark> -> {
801 * <name of property> -> {
802 * "value" -> uint64
807 lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
809 return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
813 * Destroys bookmarks.
815 * The keys in the bmarks nvlist are the bookmarks to be destroyed.
816 * They must all be in the same pool. Bookmarks are specified as
817 * <fs>#<bmark>.
819 * Bookmarks that do not exist will be silently ignored.
821 * The return value will be 0 if all bookmarks that existed were destroyed.
823 * Otherwise the return value will be the errno of a (undetermined) bookmark
824 * that failed, no bookmarks will be destroyed, and the errlist will have an
825 * entry for each bookmarks that failed. The value in the errlist will be
826 * the (int32) error code.
829 lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
831 nvpair_t *elem;
832 int error;
833 char pool[ZFS_MAX_DATASET_NAME_LEN];
835 /* determine the pool name */
836 elem = nvlist_next_nvpair(bmarks, NULL);
837 if (elem == NULL)
838 return (0);
839 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
840 pool[strcspn(pool, "/#")] = '\0';
842 error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
844 return (error);