4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
32 #include <sys/dnode.h>
33 #include <sys/zfs_context.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_dataset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_prop.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/zfs_ioctl.h>
43 #include <sys/zio_checksum.h>
44 #include <sys/zfs_znode.h>
45 #include <zfs_fletcher.h>
48 #include <sys/zfs_onexit.h>
50 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
51 int zfs_send_corrupt_data
= B_FALSE
;
53 static char *dmu_recv_tag
= "dmu_recv_tag";
56 dump_bytes(dmu_sendarg_t
*dsp
, void *buf
, int len
)
58 dsl_dataset_t
*ds
= dsp
->dsa_os
->os_dsl_dataset
;
59 ssize_t resid
; /* have to get resid to get detailed errno */
62 fletcher_4_incremental_native(buf
, len
, &dsp
->dsa_zc
);
63 dsp
->dsa_err
= vn_rdwr(UIO_WRITE
, dsp
->dsa_vp
,
65 0, UIO_SYSSPACE
, FAPPEND
, RLIM64_INFINITY
, CRED(), &resid
);
67 mutex_enter(&ds
->ds_sendstream_lock
);
69 mutex_exit(&ds
->ds_sendstream_lock
);
71 return (dsp
->dsa_err
);
75 dump_free(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
78 struct drr_free
*drrf
= &(dsp
->dsa_drr
->drr_u
.drr_free
);
80 if (length
!= -1ULL && offset
+ length
< offset
)
84 * If there is a pending op, but it's not PENDING_FREE, push it out,
85 * since free block aggregation can only be done for blocks of the
86 * same type (i.e., DRR_FREE records can only be aggregated with
87 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
88 * aggregated with other DRR_FREEOBJECTS records.
90 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
91 dsp
->dsa_pending_op
!= PENDING_FREE
) {
92 if (dump_bytes(dsp
, dsp
->dsa_drr
,
93 sizeof (dmu_replay_record_t
)) != 0)
95 dsp
->dsa_pending_op
= PENDING_NONE
;
98 if (dsp
->dsa_pending_op
== PENDING_FREE
) {
100 * There should never be a PENDING_FREE if length is -1
101 * (because dump_dnode is the only place where this
102 * function is called with a -1, and only after flushing
103 * any pending record).
105 ASSERT(length
!= -1ULL);
107 * Check to see whether this free block can be aggregated
110 if (drrf
->drr_object
== object
&& drrf
->drr_offset
+
111 drrf
->drr_length
== offset
) {
112 drrf
->drr_length
+= length
;
115 /* not a continuation. Push out pending record */
116 if (dump_bytes(dsp
, dsp
->dsa_drr
,
117 sizeof (dmu_replay_record_t
)) != 0)
119 dsp
->dsa_pending_op
= PENDING_NONE
;
122 /* create a FREE record and make it pending */
123 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
124 dsp
->dsa_drr
->drr_type
= DRR_FREE
;
125 drrf
->drr_object
= object
;
126 drrf
->drr_offset
= offset
;
127 drrf
->drr_length
= length
;
128 drrf
->drr_toguid
= dsp
->dsa_toguid
;
129 if (length
== -1ULL) {
130 if (dump_bytes(dsp
, dsp
->dsa_drr
,
131 sizeof (dmu_replay_record_t
)) != 0)
134 dsp
->dsa_pending_op
= PENDING_FREE
;
141 dump_data(dmu_sendarg_t
*dsp
, dmu_object_type_t type
,
142 uint64_t object
, uint64_t offset
, int blksz
, const blkptr_t
*bp
, void *data
)
144 struct drr_write
*drrw
= &(dsp
->dsa_drr
->drr_u
.drr_write
);
148 * If there is any kind of pending aggregation (currently either
149 * a grouping of free objects or free blocks), push it out to
150 * the stream, since aggregation can't be done across operations
151 * of different types.
153 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
154 if (dump_bytes(dsp
, dsp
->dsa_drr
,
155 sizeof (dmu_replay_record_t
)) != 0)
157 dsp
->dsa_pending_op
= PENDING_NONE
;
159 /* write a DATA record */
160 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
161 dsp
->dsa_drr
->drr_type
= DRR_WRITE
;
162 drrw
->drr_object
= object
;
163 drrw
->drr_type
= type
;
164 drrw
->drr_offset
= offset
;
165 drrw
->drr_length
= blksz
;
166 drrw
->drr_toguid
= dsp
->dsa_toguid
;
167 drrw
->drr_checksumtype
= BP_GET_CHECKSUM(bp
);
168 if (zio_checksum_table
[drrw
->drr_checksumtype
].ci_dedup
)
169 drrw
->drr_checksumflags
|= DRR_CHECKSUM_DEDUP
;
170 DDK_SET_LSIZE(&drrw
->drr_key
, BP_GET_LSIZE(bp
));
171 DDK_SET_PSIZE(&drrw
->drr_key
, BP_GET_PSIZE(bp
));
172 DDK_SET_COMPRESS(&drrw
->drr_key
, BP_GET_COMPRESS(bp
));
173 drrw
->drr_key
.ddk_cksum
= bp
->blk_cksum
;
175 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
177 if (dump_bytes(dsp
, data
, blksz
) != 0)
183 dump_spill(dmu_sendarg_t
*dsp
, uint64_t object
, int blksz
, void *data
)
185 struct drr_spill
*drrs
= &(dsp
->dsa_drr
->drr_u
.drr_spill
);
187 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
188 if (dump_bytes(dsp
, dsp
->dsa_drr
,
189 sizeof (dmu_replay_record_t
)) != 0)
191 dsp
->dsa_pending_op
= PENDING_NONE
;
194 /* write a SPILL record */
195 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
196 dsp
->dsa_drr
->drr_type
= DRR_SPILL
;
197 drrs
->drr_object
= object
;
198 drrs
->drr_length
= blksz
;
199 drrs
->drr_toguid
= dsp
->dsa_toguid
;
201 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)))
203 if (dump_bytes(dsp
, data
, blksz
))
209 dump_freeobjects(dmu_sendarg_t
*dsp
, uint64_t firstobj
, uint64_t numobjs
)
211 struct drr_freeobjects
*drrfo
= &(dsp
->dsa_drr
->drr_u
.drr_freeobjects
);
214 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
215 * push it out, since free block aggregation can only be done for
216 * blocks of the same type (i.e., DRR_FREE records can only be
217 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
218 * can only be aggregated with other DRR_FREEOBJECTS records.
220 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
221 dsp
->dsa_pending_op
!= PENDING_FREEOBJECTS
) {
222 if (dump_bytes(dsp
, dsp
->dsa_drr
,
223 sizeof (dmu_replay_record_t
)) != 0)
225 dsp
->dsa_pending_op
= PENDING_NONE
;
227 if (dsp
->dsa_pending_op
== PENDING_FREEOBJECTS
) {
229 * See whether this free object array can be aggregated
232 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
== firstobj
) {
233 drrfo
->drr_numobjs
+= numobjs
;
236 /* can't be aggregated. Push out pending record */
237 if (dump_bytes(dsp
, dsp
->dsa_drr
,
238 sizeof (dmu_replay_record_t
)) != 0)
240 dsp
->dsa_pending_op
= PENDING_NONE
;
244 /* write a FREEOBJECTS record */
245 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
246 dsp
->dsa_drr
->drr_type
= DRR_FREEOBJECTS
;
247 drrfo
->drr_firstobj
= firstobj
;
248 drrfo
->drr_numobjs
= numobjs
;
249 drrfo
->drr_toguid
= dsp
->dsa_toguid
;
251 dsp
->dsa_pending_op
= PENDING_FREEOBJECTS
;
257 dump_dnode(dmu_sendarg_t
*dsp
, uint64_t object
, dnode_phys_t
*dnp
)
259 struct drr_object
*drro
= &(dsp
->dsa_drr
->drr_u
.drr_object
);
261 if (dnp
== NULL
|| dnp
->dn_type
== DMU_OT_NONE
)
262 return (dump_freeobjects(dsp
, object
, 1));
264 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
265 if (dump_bytes(dsp
, dsp
->dsa_drr
,
266 sizeof (dmu_replay_record_t
)) != 0)
268 dsp
->dsa_pending_op
= PENDING_NONE
;
271 /* write an OBJECT record */
272 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
273 dsp
->dsa_drr
->drr_type
= DRR_OBJECT
;
274 drro
->drr_object
= object
;
275 drro
->drr_type
= dnp
->dn_type
;
276 drro
->drr_bonustype
= dnp
->dn_bonustype
;
277 drro
->drr_blksz
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
278 drro
->drr_bonuslen
= dnp
->dn_bonuslen
;
279 drro
->drr_checksumtype
= dnp
->dn_checksum
;
280 drro
->drr_compress
= dnp
->dn_compress
;
281 drro
->drr_toguid
= dsp
->dsa_toguid
;
283 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
286 if (dump_bytes(dsp
, DN_BONUS(dnp
), P2ROUNDUP(dnp
->dn_bonuslen
, 8)) != 0)
289 /* free anything past the end of the file */
290 if (dump_free(dsp
, object
, (dnp
->dn_maxblkid
+ 1) *
291 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
), -1ULL))
298 #define BP_SPAN(dnp, level) \
299 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
300 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
304 backup_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
305 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
307 dmu_sendarg_t
*dsp
= arg
;
308 dmu_object_type_t type
= bp
? BP_GET_TYPE(bp
) : DMU_OT_NONE
;
311 if (issig(JUSTLOOKING
) && issig(FORREAL
))
314 if (zb
->zb_object
!= DMU_META_DNODE_OBJECT
&&
315 DMU_OBJECT_IS_SPECIAL(zb
->zb_object
)) {
317 } else if (bp
== NULL
&& zb
->zb_object
== DMU_META_DNODE_OBJECT
) {
318 uint64_t span
= BP_SPAN(dnp
, zb
->zb_level
);
319 uint64_t dnobj
= (zb
->zb_blkid
* span
) >> DNODE_SHIFT
;
320 err
= dump_freeobjects(dsp
, dnobj
, span
>> DNODE_SHIFT
);
321 } else if (bp
== NULL
) {
322 uint64_t span
= BP_SPAN(dnp
, zb
->zb_level
);
323 err
= dump_free(dsp
, zb
->zb_object
, zb
->zb_blkid
* span
, span
);
324 } else if (zb
->zb_level
> 0 || type
== DMU_OT_OBJSET
) {
326 } else if (type
== DMU_OT_DNODE
) {
329 int blksz
= BP_GET_LSIZE(bp
);
330 uint32_t aflags
= ARC_WAIT
;
333 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
334 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
339 for (i
= 0; i
< blksz
>> DNODE_SHIFT
; i
++) {
340 uint64_t dnobj
= (zb
->zb_blkid
<<
341 (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
)) + i
;
342 err
= dump_dnode(dsp
, dnobj
, blk
+i
);
346 (void) arc_buf_remove_ref(abuf
, &abuf
);
347 } else if (type
== DMU_OT_SA
) {
348 uint32_t aflags
= ARC_WAIT
;
350 int blksz
= BP_GET_LSIZE(bp
);
352 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
353 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
357 err
= dump_spill(dsp
, zb
->zb_object
, blksz
, abuf
->b_data
);
358 (void) arc_buf_remove_ref(abuf
, &abuf
);
359 } else { /* it's a level-0 block of a regular object */
360 uint32_t aflags
= ARC_WAIT
;
362 int blksz
= BP_GET_LSIZE(bp
);
364 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
365 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
367 if (zfs_send_corrupt_data
) {
368 /* Send a block filled with 0x"zfs badd bloc" */
369 abuf
= arc_buf_alloc(spa
, blksz
, &abuf
,
372 for (ptr
= abuf
->b_data
;
373 (char *)ptr
< (char *)abuf
->b_data
+ blksz
;
375 *ptr
= 0x2f5baddb10c;
381 err
= dump_data(dsp
, type
, zb
->zb_object
, zb
->zb_blkid
* blksz
,
382 blksz
, bp
, abuf
->b_data
);
383 (void) arc_buf_remove_ref(abuf
, &abuf
);
386 ASSERT(err
== 0 || err
== EINTR
);
391 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
392 * For example, they could both be snapshots of the same filesystem, and
393 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
394 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
395 * filesystem. Or 'earlier' could be the origin's origin.
398 is_before(dsl_dataset_t
*later
, dsl_dataset_t
*earlier
)
400 dsl_pool_t
*dp
= later
->ds_dir
->dd_pool
;
403 dsl_dataset_t
*origin
;
405 if (earlier
->ds_phys
->ds_creation_txg
>=
406 later
->ds_phys
->ds_creation_txg
)
409 if (later
->ds_dir
== earlier
->ds_dir
)
411 if (!dsl_dir_is_clone(later
->ds_dir
))
414 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
415 if (later
->ds_dir
->dd_phys
->dd_origin_obj
== earlier
->ds_object
) {
416 rw_exit(&dp
->dp_config_rwlock
);
419 error
= dsl_dataset_hold_obj(dp
,
420 later
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &origin
);
421 rw_exit(&dp
->dp_config_rwlock
);
424 ret
= is_before(origin
, earlier
);
425 dsl_dataset_rele(origin
, FTAG
);
430 dmu_send(objset_t
*tosnap
, objset_t
*fromsnap
, int outfd
, vnode_t
*vp
,
433 dsl_dataset_t
*ds
= tosnap
->os_dsl_dataset
;
434 dsl_dataset_t
*fromds
= fromsnap
? fromsnap
->os_dsl_dataset
: NULL
;
435 dmu_replay_record_t
*drr
;
438 uint64_t fromtxg
= 0;
440 /* tosnap must be a snapshot */
441 if (ds
->ds_phys
->ds_next_snap_obj
== 0)
445 * fromsnap must be an earlier snapshot from the same fs as tosnap,
446 * or the origin's fs.
448 if (fromds
!= NULL
&& !is_before(ds
, fromds
))
451 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
452 drr
->drr_type
= DRR_BEGIN
;
453 drr
->drr_u
.drr_begin
.drr_magic
= DMU_BACKUP_MAGIC
;
454 DMU_SET_STREAM_HDRTYPE(drr
->drr_u
.drr_begin
.drr_versioninfo
,
458 if (dmu_objset_type(tosnap
) == DMU_OST_ZFS
) {
460 if (zfs_get_zplprop(tosnap
, ZFS_PROP_VERSION
, &version
) != 0) {
461 kmem_free(drr
, sizeof (dmu_replay_record_t
));
464 if (version
== ZPL_VERSION_SA
) {
465 DMU_SET_FEATUREFLAGS(
466 drr
->drr_u
.drr_begin
.drr_versioninfo
,
467 DMU_BACKUP_FEATURE_SA_SPILL
);
472 drr
->drr_u
.drr_begin
.drr_creation_time
=
473 ds
->ds_phys
->ds_creation_time
;
474 drr
->drr_u
.drr_begin
.drr_type
= tosnap
->os_phys
->os_type
;
475 if (fromds
!= NULL
&& ds
->ds_dir
!= fromds
->ds_dir
)
476 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CLONE
;
477 drr
->drr_u
.drr_begin
.drr_toguid
= ds
->ds_phys
->ds_guid
;
478 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
479 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CI_DATA
;
482 drr
->drr_u
.drr_begin
.drr_fromguid
= fromds
->ds_phys
->ds_guid
;
483 dsl_dataset_name(ds
, drr
->drr_u
.drr_begin
.drr_toname
);
486 fromtxg
= fromds
->ds_phys
->ds_creation_txg
;
488 dsp
= kmem_zalloc(sizeof (dmu_sendarg_t
), KM_SLEEP
);
492 dsp
->dsa_outfd
= outfd
;
493 dsp
->dsa_proc
= curproc
;
494 dsp
->dsa_os
= tosnap
;
496 dsp
->dsa_toguid
= ds
->ds_phys
->ds_guid
;
497 ZIO_SET_CHECKSUM(&dsp
->dsa_zc
, 0, 0, 0, 0);
498 dsp
->dsa_pending_op
= PENDING_NONE
;
500 mutex_enter(&ds
->ds_sendstream_lock
);
501 list_insert_head(&ds
->ds_sendstreams
, dsp
);
502 mutex_exit(&ds
->ds_sendstream_lock
);
504 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0) {
509 err
= traverse_dataset(ds
, fromtxg
, TRAVERSE_PRE
| TRAVERSE_PREFETCH
,
512 if (dsp
->dsa_pending_op
!= PENDING_NONE
)
513 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0)
517 if (err
== EINTR
&& dsp
->dsa_err
)
522 bzero(drr
, sizeof (dmu_replay_record_t
));
523 drr
->drr_type
= DRR_END
;
524 drr
->drr_u
.drr_end
.drr_checksum
= dsp
->dsa_zc
;
525 drr
->drr_u
.drr_end
.drr_toguid
= dsp
->dsa_toguid
;
527 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0) {
533 mutex_enter(&ds
->ds_sendstream_lock
);
534 list_remove(&ds
->ds_sendstreams
, dsp
);
535 mutex_exit(&ds
->ds_sendstream_lock
);
537 kmem_free(drr
, sizeof (dmu_replay_record_t
));
538 kmem_free(dsp
, sizeof (dmu_sendarg_t
));
544 dmu_send_estimate(objset_t
*tosnap
, objset_t
*fromsnap
, uint64_t *sizep
)
546 dsl_dataset_t
*ds
= tosnap
->os_dsl_dataset
;
547 dsl_dataset_t
*fromds
= fromsnap
? fromsnap
->os_dsl_dataset
: NULL
;
548 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
552 /* tosnap must be a snapshot */
553 if (ds
->ds_phys
->ds_next_snap_obj
== 0)
557 * fromsnap must be an earlier snapshot from the same fs as tosnap,
558 * or the origin's fs.
560 if (fromds
!= NULL
&& !is_before(ds
, fromds
))
563 /* Get uncompressed size estimate of changed data. */
564 if (fromds
== NULL
) {
565 size
= ds
->ds_phys
->ds_uncompressed_bytes
;
568 err
= dsl_dataset_space_written(fromds
, ds
,
569 &used
, &comp
, &size
);
575 * Assume that space (both on-disk and in-stream) is dominated by
576 * data. We will adjust for indirect blocks and the copies property,
577 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
581 * Subtract out approximate space used by indirect blocks.
582 * Assume most space is used by data blocks (non-indirect, non-dnode).
583 * Assume all blocks are recordsize. Assume ditto blocks and
584 * internal fragmentation counter out compression.
586 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
587 * block, which we observe in practice.
590 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
591 err
= dsl_prop_get_ds(ds
, "recordsize",
592 sizeof (recordsize
), 1, &recordsize
, NULL
);
593 rw_exit(&dp
->dp_config_rwlock
);
596 size
-= size
/ recordsize
* sizeof (blkptr_t
);
598 /* Add in the space for the record associated with each block. */
599 size
+= size
/ recordsize
* sizeof (dmu_replay_record_t
);
606 struct recvbeginsyncarg
{
609 dsl_dataset_t
*origin
;
611 dmu_objset_type_t type
;
615 char clonelastname
[MAXNAMELEN
];
616 dsl_dataset_t
*ds
; /* the ds to recv into; returned from the syncfunc */
622 recv_new_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
624 dsl_dir_t
*dd
= arg1
;
625 struct recvbeginsyncarg
*rbsa
= arg2
;
626 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
630 err
= zap_lookup(mos
, dd
->dd_phys
->dd_child_dir_zapobj
,
631 strrchr(rbsa
->tofs
, '/') + 1, sizeof (uint64_t), 1, &val
);
634 return (err
? err
: EEXIST
);
637 /* make sure it's a snap in the same pool */
638 if (rbsa
->origin
->ds_dir
->dd_pool
!= dd
->dd_pool
)
640 if (!dsl_dataset_is_snapshot(rbsa
->origin
))
642 if (rbsa
->origin
->ds_phys
->ds_guid
!= rbsa
->fromguid
)
650 recv_new_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
652 dsl_dir_t
*dd
= arg1
;
653 struct recvbeginsyncarg
*rbsa
= arg2
;
654 uint64_t flags
= DS_FLAG_INCONSISTENT
| rbsa
->dsflags
;
657 /* Create and open new dataset. */
658 dsobj
= dsl_dataset_create_sync(dd
, strrchr(rbsa
->tofs
, '/') + 1,
659 rbsa
->origin
, flags
, rbsa
->cr
, tx
);
660 VERIFY(0 == dsl_dataset_own_obj(dd
->dd_pool
, dsobj
,
661 B_TRUE
, dmu_recv_tag
, &rbsa
->ds
));
663 if (rbsa
->origin
== NULL
) {
664 (void) dmu_objset_create_impl(dd
->dd_pool
->dp_spa
,
665 rbsa
->ds
, &rbsa
->ds
->ds_phys
->ds_bp
, rbsa
->type
, tx
);
668 spa_history_log_internal_ds(rbsa
->ds
, "receive new", tx
, "");
673 recv_existing_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
675 dsl_dataset_t
*ds
= arg1
;
676 struct recvbeginsyncarg
*rbsa
= arg2
;
680 /* must not have any changes since most recent snapshot */
681 if (!rbsa
->force
&& dsl_dataset_modified_since_lastsnap(ds
))
684 /* new snapshot name must not exist */
685 err
= zap_lookup(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
686 ds
->ds_phys
->ds_snapnames_zapobj
, rbsa
->tosnap
, 8, 1, &val
);
692 if (rbsa
->fromguid
) {
693 /* if incremental, most recent snapshot must match fromguid */
694 if (ds
->ds_prev
== NULL
)
698 * most recent snapshot must match fromguid, or there are no
699 * changes since the fromguid one
701 if (ds
->ds_prev
->ds_phys
->ds_guid
!= rbsa
->fromguid
) {
702 uint64_t birth
= ds
->ds_prev
->ds_phys
->ds_bp
.blk_birth
;
703 uint64_t obj
= ds
->ds_prev
->ds_phys
->ds_prev_snap_obj
;
706 err
= dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
710 if (snap
->ds_phys
->ds_creation_txg
< birth
) {
711 dsl_dataset_rele(snap
, FTAG
);
714 if (snap
->ds_phys
->ds_guid
== rbsa
->fromguid
) {
715 dsl_dataset_rele(snap
, FTAG
);
718 obj
= snap
->ds_phys
->ds_prev_snap_obj
;
719 dsl_dataset_rele(snap
, FTAG
);
725 /* if full, most recent snapshot must be $ORIGIN */
726 if (ds
->ds_phys
->ds_prev_snap_txg
>= TXG_INITIAL
)
730 /* temporary clone name must not exist */
731 err
= zap_lookup(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
732 ds
->ds_dir
->dd_phys
->dd_child_dir_zapobj
,
733 rbsa
->clonelastname
, 8, 1, &val
);
744 recv_existing_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
746 dsl_dataset_t
*ohds
= arg1
;
747 struct recvbeginsyncarg
*rbsa
= arg2
;
748 dsl_pool_t
*dp
= ohds
->ds_dir
->dd_pool
;
750 uint64_t flags
= DS_FLAG_INCONSISTENT
| rbsa
->dsflags
;
753 /* create and open the temporary clone */
754 dsobj
= dsl_dataset_create_sync(ohds
->ds_dir
, rbsa
->clonelastname
,
755 ohds
->ds_prev
, flags
, rbsa
->cr
, tx
);
756 VERIFY(0 == dsl_dataset_own_obj(dp
, dsobj
, B_TRUE
, dmu_recv_tag
, &cds
));
759 * If we actually created a non-clone, we need to create the
760 * objset in our new dataset.
762 if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds
))) {
763 (void) dmu_objset_create_impl(dp
->dp_spa
,
764 cds
, dsl_dataset_get_blkptr(cds
), rbsa
->type
, tx
);
769 spa_history_log_internal_ds(cds
, "receive over existing", tx
, "");
773 dmu_recv_verify_features(dsl_dataset_t
*ds
, struct drr_begin
*drrb
)
777 featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
779 /* Verify pool version supports SA if SA_SPILL feature set */
780 return ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
781 (spa_version(dsl_dataset_get_spa(ds
)) < SPA_VERSION_SA
));
785 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
786 * succeeds; otherwise we will leak the holds on the datasets.
789 dmu_recv_begin(char *tofs
, char *tosnap
, char *top_ds
, struct drr_begin
*drrb
,
790 boolean_t force
, objset_t
*origin
, dmu_recv_cookie_t
*drc
)
794 struct recvbeginsyncarg rbsa
= { 0 };
795 uint64_t versioninfo
;
799 if (drrb
->drr_magic
== DMU_BACKUP_MAGIC
)
801 else if (drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
))
807 rbsa
.tosnap
= tosnap
;
808 rbsa
.origin
= origin
? origin
->os_dsl_dataset
: NULL
;
809 rbsa
.fromguid
= drrb
->drr_fromguid
;
810 rbsa
.type
= drrb
->drr_type
;
814 versioninfo
= drrb
->drr_versioninfo
;
815 flags
= drrb
->drr_flags
;
818 rbsa
.type
= BSWAP_32(rbsa
.type
);
819 rbsa
.fromguid
= BSWAP_64(rbsa
.fromguid
);
820 versioninfo
= BSWAP_64(versioninfo
);
821 flags
= BSWAP_32(flags
);
824 if (DMU_GET_STREAM_HDRTYPE(versioninfo
) == DMU_COMPOUNDSTREAM
||
825 rbsa
.type
>= DMU_OST_NUMTYPES
||
826 ((flags
& DRR_FLAG_CLONE
) && origin
== NULL
))
829 if (flags
& DRR_FLAG_CI_DATA
)
830 rbsa
.dsflags
= DS_FLAG_CI_DATASET
;
832 bzero(drc
, sizeof (dmu_recv_cookie_t
));
833 drc
->drc_drrb
= drrb
;
834 drc
->drc_tosnap
= tosnap
;
835 drc
->drc_top_ds
= top_ds
;
836 drc
->drc_force
= force
;
839 * Process the begin in syncing context.
842 /* open the dataset we are logically receiving into */
843 err
= dsl_dataset_hold(tofs
, dmu_recv_tag
, &ds
);
845 if (dmu_recv_verify_features(ds
, drrb
)) {
846 dsl_dataset_rele(ds
, dmu_recv_tag
);
849 /* target fs already exists; recv into temp clone */
851 /* Can't recv a clone into an existing fs */
852 if (flags
& DRR_FLAG_CLONE
) {
853 dsl_dataset_rele(ds
, dmu_recv_tag
);
857 /* must not have an incremental recv already in progress */
858 if (!mutex_tryenter(&ds
->ds_recvlock
)) {
859 dsl_dataset_rele(ds
, dmu_recv_tag
);
863 /* tmp clone name is: tofs/%tosnap" */
864 (void) snprintf(rbsa
.clonelastname
, sizeof (rbsa
.clonelastname
),
867 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
868 recv_existing_check
, recv_existing_sync
, ds
, &rbsa
, 5);
870 mutex_exit(&ds
->ds_recvlock
);
871 dsl_dataset_rele(ds
, dmu_recv_tag
);
874 drc
->drc_logical_ds
= ds
;
875 drc
->drc_real_ds
= rbsa
.ds
;
876 } else if (err
== ENOENT
) {
877 /* target fs does not exist; must be a full backup or clone */
881 * If it's a non-clone incremental, we are missing the
882 * target fs, so fail the recv.
884 if (rbsa
.fromguid
&& !(flags
& DRR_FLAG_CLONE
))
887 /* Open the parent of tofs */
888 cp
= strrchr(tofs
, '/');
890 err
= dsl_dataset_hold(tofs
, FTAG
, &ds
);
895 if (dmu_recv_verify_features(ds
, drrb
)) {
896 dsl_dataset_rele(ds
, FTAG
);
900 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
901 recv_new_check
, recv_new_sync
, ds
->ds_dir
, &rbsa
, 5);
902 dsl_dataset_rele(ds
, FTAG
);
905 drc
->drc_logical_ds
= drc
->drc_real_ds
= rbsa
.ds
;
906 drc
->drc_newfs
= B_TRUE
;
918 int bufsize
; /* amount of memory allocated for buf */
920 avl_tree_t
*guid_to_ds_map
;
923 typedef struct guid_map_entry
{
925 dsl_dataset_t
*gme_ds
;
930 guid_compare(const void *arg1
, const void *arg2
)
932 const guid_map_entry_t
*gmep1
= arg1
;
933 const guid_map_entry_t
*gmep2
= arg2
;
935 if (gmep1
->guid
< gmep2
->guid
)
937 else if (gmep1
->guid
> gmep2
->guid
)
943 free_guid_map_onexit(void *arg
)
945 avl_tree_t
*ca
= arg
;
947 guid_map_entry_t
*gmep
;
949 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
950 dsl_dataset_rele(gmep
->gme_ds
, ca
);
951 kmem_free(gmep
, sizeof (guid_map_entry_t
));
954 kmem_free(ca
, sizeof (avl_tree_t
));
958 restore_read(struct restorearg
*ra
, int len
)
963 /* some things will require 8-byte alignment, so everything must */
969 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
970 (caddr_t
)ra
->buf
+ done
, len
- done
,
971 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
972 RLIM64_INFINITY
, CRED(), &resid
);
974 if (resid
== len
- done
)
976 ra
->voff
+= len
- done
- resid
;
982 ASSERT3U(done
, ==, len
);
985 fletcher_4_incremental_byteswap(rv
, len
, &ra
->cksum
);
987 fletcher_4_incremental_native(rv
, len
, &ra
->cksum
);
992 backup_byteswap(dmu_replay_record_t
*drr
)
994 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
995 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
996 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
997 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
998 switch (drr
->drr_type
) {
1000 DO64(drr_begin
.drr_magic
);
1001 DO64(drr_begin
.drr_versioninfo
);
1002 DO64(drr_begin
.drr_creation_time
);
1003 DO32(drr_begin
.drr_type
);
1004 DO32(drr_begin
.drr_flags
);
1005 DO64(drr_begin
.drr_toguid
);
1006 DO64(drr_begin
.drr_fromguid
);
1009 DO64(drr_object
.drr_object
);
1010 /* DO64(drr_object.drr_allocation_txg); */
1011 DO32(drr_object
.drr_type
);
1012 DO32(drr_object
.drr_bonustype
);
1013 DO32(drr_object
.drr_blksz
);
1014 DO32(drr_object
.drr_bonuslen
);
1015 DO64(drr_object
.drr_toguid
);
1017 case DRR_FREEOBJECTS
:
1018 DO64(drr_freeobjects
.drr_firstobj
);
1019 DO64(drr_freeobjects
.drr_numobjs
);
1020 DO64(drr_freeobjects
.drr_toguid
);
1023 DO64(drr_write
.drr_object
);
1024 DO32(drr_write
.drr_type
);
1025 DO64(drr_write
.drr_offset
);
1026 DO64(drr_write
.drr_length
);
1027 DO64(drr_write
.drr_toguid
);
1028 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[0]);
1029 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[1]);
1030 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[2]);
1031 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[3]);
1032 DO64(drr_write
.drr_key
.ddk_prop
);
1034 case DRR_WRITE_BYREF
:
1035 DO64(drr_write_byref
.drr_object
);
1036 DO64(drr_write_byref
.drr_offset
);
1037 DO64(drr_write_byref
.drr_length
);
1038 DO64(drr_write_byref
.drr_toguid
);
1039 DO64(drr_write_byref
.drr_refguid
);
1040 DO64(drr_write_byref
.drr_refobject
);
1041 DO64(drr_write_byref
.drr_refoffset
);
1042 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[0]);
1043 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[1]);
1044 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[2]);
1045 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[3]);
1046 DO64(drr_write_byref
.drr_key
.ddk_prop
);
1049 DO64(drr_free
.drr_object
);
1050 DO64(drr_free
.drr_offset
);
1051 DO64(drr_free
.drr_length
);
1052 DO64(drr_free
.drr_toguid
);
1055 DO64(drr_spill
.drr_object
);
1056 DO64(drr_spill
.drr_length
);
1057 DO64(drr_spill
.drr_toguid
);
1060 DO64(drr_end
.drr_checksum
.zc_word
[0]);
1061 DO64(drr_end
.drr_checksum
.zc_word
[1]);
1062 DO64(drr_end
.drr_checksum
.zc_word
[2]);
1063 DO64(drr_end
.drr_checksum
.zc_word
[3]);
1064 DO64(drr_end
.drr_toguid
);
1072 restore_object(struct restorearg
*ra
, objset_t
*os
, struct drr_object
*drro
)
1078 if (drro
->drr_type
== DMU_OT_NONE
||
1079 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1080 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1081 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1082 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1083 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1084 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1085 drro
->drr_blksz
> SPA_MAXBLOCKSIZE
||
1086 drro
->drr_bonuslen
> DN_MAX_BONUSLEN
) {
1090 err
= dmu_object_info(os
, drro
->drr_object
, NULL
);
1092 if (err
!= 0 && err
!= ENOENT
)
1095 if (drro
->drr_bonuslen
) {
1096 data
= restore_read(ra
, P2ROUNDUP(drro
->drr_bonuslen
, 8));
1101 if (err
== ENOENT
) {
1102 /* currently free, want to be allocated */
1103 tx
= dmu_tx_create(os
);
1104 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1105 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1110 err
= dmu_object_claim(os
, drro
->drr_object
,
1111 drro
->drr_type
, drro
->drr_blksz
,
1112 drro
->drr_bonustype
, drro
->drr_bonuslen
, tx
);
1115 /* currently allocated, want to be allocated */
1116 err
= dmu_object_reclaim(os
, drro
->drr_object
,
1117 drro
->drr_type
, drro
->drr_blksz
,
1118 drro
->drr_bonustype
, drro
->drr_bonuslen
);
1124 tx
= dmu_tx_create(os
);
1125 dmu_tx_hold_bonus(tx
, drro
->drr_object
);
1126 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1132 dmu_object_set_checksum(os
, drro
->drr_object
, drro
->drr_checksumtype
,
1134 dmu_object_set_compress(os
, drro
->drr_object
, drro
->drr_compress
, tx
);
1139 VERIFY(0 == dmu_bonus_hold(os
, drro
->drr_object
, FTAG
, &db
));
1140 dmu_buf_will_dirty(db
, tx
);
1142 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
1143 bcopy(data
, db
->db_data
, drro
->drr_bonuslen
);
1145 dmu_object_byteswap_t byteswap
=
1146 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
1147 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
1148 drro
->drr_bonuslen
);
1150 dmu_buf_rele(db
, FTAG
);
1158 restore_freeobjects(struct restorearg
*ra
, objset_t
*os
,
1159 struct drr_freeobjects
*drrfo
)
1163 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
1166 for (obj
= drrfo
->drr_firstobj
;
1167 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
;
1168 (void) dmu_object_next(os
, &obj
, FALSE
, 0)) {
1171 if (dmu_object_info(os
, obj
, NULL
) != 0)
1174 err
= dmu_free_object(os
, obj
);
1182 restore_write(struct restorearg
*ra
, objset_t
*os
,
1183 struct drr_write
*drrw
)
1189 if (drrw
->drr_offset
+ drrw
->drr_length
< drrw
->drr_offset
||
1190 !DMU_OT_IS_VALID(drrw
->drr_type
))
1193 data
= restore_read(ra
, drrw
->drr_length
);
1197 if (dmu_object_info(os
, drrw
->drr_object
, NULL
) != 0)
1200 tx
= dmu_tx_create(os
);
1202 dmu_tx_hold_write(tx
, drrw
->drr_object
,
1203 drrw
->drr_offset
, drrw
->drr_length
);
1204 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1210 dmu_object_byteswap_t byteswap
=
1211 DMU_OT_BYTESWAP(drrw
->drr_type
);
1212 dmu_ot_byteswap
[byteswap
].ob_func(data
, drrw
->drr_length
);
1214 dmu_write(os
, drrw
->drr_object
,
1215 drrw
->drr_offset
, drrw
->drr_length
, data
, tx
);
1221 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1222 * streams to refer to a copy of the data that is already on the
1223 * system because it came in earlier in the stream. This function
1224 * finds the earlier copy of the data, and uses that copy instead of
1225 * data from the stream to fulfill this write.
1228 restore_write_byref(struct restorearg
*ra
, objset_t
*os
,
1229 struct drr_write_byref
*drrwbr
)
1233 guid_map_entry_t gmesrch
;
1234 guid_map_entry_t
*gmep
;
1236 objset_t
*ref_os
= NULL
;
1239 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
1243 * If the GUID of the referenced dataset is different from the
1244 * GUID of the target dataset, find the referenced dataset.
1246 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
1247 gmesrch
.guid
= drrwbr
->drr_refguid
;
1248 if ((gmep
= avl_find(ra
->guid_to_ds_map
, &gmesrch
,
1252 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
1258 if (err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
1259 drrwbr
->drr_refoffset
, FTAG
, &dbp
, DMU_READ_PREFETCH
))
1262 tx
= dmu_tx_create(os
);
1264 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
1265 drrwbr
->drr_offset
, drrwbr
->drr_length
);
1266 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1271 dmu_write(os
, drrwbr
->drr_object
,
1272 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
1273 dmu_buf_rele(dbp
, FTAG
);
1279 restore_spill(struct restorearg
*ra
, objset_t
*os
, struct drr_spill
*drrs
)
1283 dmu_buf_t
*db
, *db_spill
;
1286 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
1287 drrs
->drr_length
> SPA_MAXBLOCKSIZE
)
1290 data
= restore_read(ra
, drrs
->drr_length
);
1294 if (dmu_object_info(os
, drrs
->drr_object
, NULL
) != 0)
1297 VERIFY(0 == dmu_bonus_hold(os
, drrs
->drr_object
, FTAG
, &db
));
1298 if ((err
= dmu_spill_hold_by_bonus(db
, FTAG
, &db_spill
)) != 0) {
1299 dmu_buf_rele(db
, FTAG
);
1303 tx
= dmu_tx_create(os
);
1305 dmu_tx_hold_spill(tx
, db
->db_object
);
1307 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1309 dmu_buf_rele(db
, FTAG
);
1310 dmu_buf_rele(db_spill
, FTAG
);
1314 dmu_buf_will_dirty(db_spill
, tx
);
1316 if (db_spill
->db_size
< drrs
->drr_length
)
1317 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
1318 drrs
->drr_length
, tx
));
1319 bcopy(data
, db_spill
->db_data
, drrs
->drr_length
);
1321 dmu_buf_rele(db
, FTAG
);
1322 dmu_buf_rele(db_spill
, FTAG
);
1330 restore_free(struct restorearg
*ra
, objset_t
*os
,
1331 struct drr_free
*drrf
)
1335 if (drrf
->drr_length
!= -1ULL &&
1336 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
1339 if (dmu_object_info(os
, drrf
->drr_object
, NULL
) != 0)
1342 err
= dmu_free_long_range(os
, drrf
->drr_object
,
1343 drrf
->drr_offset
, drrf
->drr_length
);
1348 * NB: callers *must* call dmu_recv_end() if this succeeds.
1351 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
1352 int cleanup_fd
, uint64_t *action_handlep
)
1354 struct restorearg ra
= { 0 };
1355 dmu_replay_record_t
*drr
;
1360 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
))
1364 /* compute checksum of drr_begin record */
1365 dmu_replay_record_t
*drr
;
1366 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
1368 drr
->drr_type
= DRR_BEGIN
;
1369 drr
->drr_u
.drr_begin
= *drc
->drc_drrb
;
1371 fletcher_4_incremental_byteswap(drr
,
1372 sizeof (dmu_replay_record_t
), &ra
.cksum
);
1374 fletcher_4_incremental_native(drr
,
1375 sizeof (dmu_replay_record_t
), &ra
.cksum
);
1377 kmem_free(drr
, sizeof (dmu_replay_record_t
));
1381 struct drr_begin
*drrb
= drc
->drc_drrb
;
1382 drrb
->drr_magic
= BSWAP_64(drrb
->drr_magic
);
1383 drrb
->drr_versioninfo
= BSWAP_64(drrb
->drr_versioninfo
);
1384 drrb
->drr_creation_time
= BSWAP_64(drrb
->drr_creation_time
);
1385 drrb
->drr_type
= BSWAP_32(drrb
->drr_type
);
1386 drrb
->drr_toguid
= BSWAP_64(drrb
->drr_toguid
);
1387 drrb
->drr_fromguid
= BSWAP_64(drrb
->drr_fromguid
);
1393 ra
.buf
= kmem_alloc(ra
.bufsize
, KM_SLEEP
);
1395 /* these were verified in dmu_recv_begin */
1396 ASSERT(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
) ==
1398 ASSERT(drc
->drc_drrb
->drr_type
< DMU_OST_NUMTYPES
);
1401 * Open the objset we are modifying.
1403 VERIFY(dmu_objset_from_ds(drc
->drc_real_ds
, &os
) == 0);
1405 ASSERT(drc
->drc_real_ds
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
);
1407 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
1409 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1410 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
1413 if (cleanup_fd
== -1) {
1417 ra
.err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
1423 if (*action_handlep
== 0) {
1425 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
1426 avl_create(ra
.guid_to_ds_map
, guid_compare
,
1427 sizeof (guid_map_entry_t
),
1428 offsetof(guid_map_entry_t
, avlnode
));
1429 ra
.err
= zfs_onexit_add_cb(minor
,
1430 free_guid_map_onexit
, ra
.guid_to_ds_map
,
1435 ra
.err
= zfs_onexit_cb_data(minor
, *action_handlep
,
1436 (void **)&ra
.guid_to_ds_map
);
1441 drc
->drc_guid_to_ds_map
= ra
.guid_to_ds_map
;
1445 * Read records and process them.
1448 while (ra
.err
== 0 &&
1449 NULL
!= (drr
= restore_read(&ra
, sizeof (*drr
)))) {
1450 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
1456 backup_byteswap(drr
);
1458 switch (drr
->drr_type
) {
1462 * We need to make a copy of the record header,
1463 * because restore_{object,write} may need to
1464 * restore_read(), which will invalidate drr.
1466 struct drr_object drro
= drr
->drr_u
.drr_object
;
1467 ra
.err
= restore_object(&ra
, os
, &drro
);
1470 case DRR_FREEOBJECTS
:
1472 struct drr_freeobjects drrfo
=
1473 drr
->drr_u
.drr_freeobjects
;
1474 ra
.err
= restore_freeobjects(&ra
, os
, &drrfo
);
1479 struct drr_write drrw
= drr
->drr_u
.drr_write
;
1480 ra
.err
= restore_write(&ra
, os
, &drrw
);
1483 case DRR_WRITE_BYREF
:
1485 struct drr_write_byref drrwbr
=
1486 drr
->drr_u
.drr_write_byref
;
1487 ra
.err
= restore_write_byref(&ra
, os
, &drrwbr
);
1492 struct drr_free drrf
= drr
->drr_u
.drr_free
;
1493 ra
.err
= restore_free(&ra
, os
, &drrf
);
1498 struct drr_end drre
= drr
->drr_u
.drr_end
;
1500 * We compare against the *previous* checksum
1501 * value, because the stored checksum is of
1502 * everything before the DRR_END record.
1504 if (!ZIO_CHECKSUM_EQUAL(drre
.drr_checksum
, pcksum
))
1510 struct drr_spill drrs
= drr
->drr_u
.drr_spill
;
1511 ra
.err
= restore_spill(&ra
, os
, &drrs
);
1520 ASSERT(ra
.err
!= 0);
1523 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
1524 zfs_onexit_fd_rele(cleanup_fd
);
1528 * destroy what we created, so we don't leave it in the
1529 * inconsistent restoring state.
1531 txg_wait_synced(drc
->drc_real_ds
->ds_dir
->dd_pool
, 0);
1533 (void) dsl_dataset_destroy(drc
->drc_real_ds
, dmu_recv_tag
,
1535 if (drc
->drc_real_ds
!= drc
->drc_logical_ds
) {
1536 mutex_exit(&drc
->drc_logical_ds
->ds_recvlock
);
1537 dsl_dataset_rele(drc
->drc_logical_ds
, dmu_recv_tag
);
1541 kmem_free(ra
.buf
, ra
.bufsize
);
1546 struct recvendsyncarg
{
1548 uint64_t creation_time
;
1553 recv_end_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1555 dsl_dataset_t
*ds
= arg1
;
1556 struct recvendsyncarg
*resa
= arg2
;
1558 return (dsl_dataset_snapshot_check(ds
, resa
->tosnap
, tx
));
1562 recv_end_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1564 dsl_dataset_t
*ds
= arg1
;
1565 struct recvendsyncarg
*resa
= arg2
;
1567 dsl_dataset_snapshot_sync(ds
, resa
->tosnap
, tx
);
1569 /* set snapshot's creation time and guid */
1570 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1571 ds
->ds_prev
->ds_phys
->ds_creation_time
= resa
->creation_time
;
1572 ds
->ds_prev
->ds_phys
->ds_guid
= resa
->toguid
;
1573 ds
->ds_prev
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1575 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1576 ds
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1577 spa_history_log_internal_ds(ds
, "finished receiving", tx
, "");
1581 add_ds_to_guidmap(avl_tree_t
*guid_map
, dsl_dataset_t
*ds
)
1583 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1584 uint64_t snapobj
= ds
->ds_phys
->ds_prev_snap_obj
;
1585 dsl_dataset_t
*snapds
;
1586 guid_map_entry_t
*gmep
;
1589 ASSERT(guid_map
!= NULL
);
1591 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
1592 err
= dsl_dataset_hold_obj(dp
, snapobj
, guid_map
, &snapds
);
1594 gmep
= kmem_alloc(sizeof (guid_map_entry_t
), KM_SLEEP
);
1595 gmep
->guid
= snapds
->ds_phys
->ds_guid
;
1596 gmep
->gme_ds
= snapds
;
1597 avl_add(guid_map
, gmep
);
1600 rw_exit(&dp
->dp_config_rwlock
);
1605 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
1607 struct recvendsyncarg resa
;
1608 dsl_dataset_t
*ds
= drc
->drc_logical_ds
;
1611 if (dsl_dataset_tryown(ds
, FALSE
, dmu_recv_tag
)) {
1612 err
= dsl_dataset_clone_swap(drc
->drc_real_ds
, ds
,
1617 mutex_exit(&ds
->ds_recvlock
);
1618 dsl_dataset_rele(ds
, dmu_recv_tag
);
1619 (void) dsl_dataset_destroy(drc
->drc_real_ds
, dmu_recv_tag
,
1624 resa
.creation_time
= drc
->drc_drrb
->drr_creation_time
;
1625 resa
.toguid
= drc
->drc_drrb
->drr_toguid
;
1626 resa
.tosnap
= drc
->drc_tosnap
;
1628 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
1629 recv_end_check
, recv_end_sync
, ds
, &resa
, 3);
1632 (void) dsl_dataset_clone_swap(drc
->drc_real_ds
, ds
, B_TRUE
);
1636 mutex_exit(&ds
->ds_recvlock
);
1637 if (err
== 0 && drc
->drc_guid_to_ds_map
!= NULL
)
1638 (void) add_ds_to_guidmap(drc
->drc_guid_to_ds_map
, ds
);
1639 dsl_dataset_disown(ds
, dmu_recv_tag
);
1640 myerr
= dsl_dataset_destroy(drc
->drc_real_ds
, dmu_recv_tag
, B_FALSE
);
1646 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
1648 struct recvendsyncarg resa
;
1649 dsl_dataset_t
*ds
= drc
->drc_logical_ds
;
1653 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1654 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1657 txg_wait_synced(ds
->ds_dir
->dd_pool
, 0);
1659 resa
.creation_time
= drc
->drc_drrb
->drr_creation_time
;
1660 resa
.toguid
= drc
->drc_drrb
->drr_toguid
;
1661 resa
.tosnap
= drc
->drc_tosnap
;
1663 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
1664 recv_end_check
, recv_end_sync
, ds
, &resa
, 3);
1666 /* clean up the fs we just recv'd into */
1667 (void) dsl_dataset_destroy(ds
, dmu_recv_tag
, B_FALSE
);
1669 if (drc
->drc_guid_to_ds_map
!= NULL
)
1670 (void) add_ds_to_guidmap(drc
->drc_guid_to_ds_map
, ds
);
1671 /* release the hold from dmu_recv_begin */
1672 dsl_dataset_disown(ds
, dmu_recv_tag
);
1678 dmu_recv_end(dmu_recv_cookie_t
*drc
)
1680 if (drc
->drc_logical_ds
!= drc
->drc_real_ds
)
1681 return (dmu_recv_existing_end(drc
));
1683 return (dmu_recv_new_end(drc
));