4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2014 Integros [integros.com]
32 #include <sys/dmu_impl.h>
33 #include <sys/dmu_tx.h>
35 #include <sys/dnode.h>
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/zfs_ioctl.h>
46 #include <sys/zio_checksum.h>
47 #include <sys/zfs_znode.h>
48 #include <zfs_fletcher.h>
51 #include <sys/zfs_onexit.h>
52 #include <sys/dmu_send.h>
53 #include <sys/dsl_destroy.h>
54 #include <sys/blkptr.h>
55 #include <sys/dsl_bookmark.h>
56 #include <sys/zfeature.h>
57 #include <sys/bqueue.h>
59 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
60 int zfs_send_corrupt_data
= B_FALSE
;
61 int zfs_send_queue_length
= 16 * 1024 * 1024;
62 int zfs_recv_queue_length
= 16 * 1024 * 1024;
63 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
64 int zfs_send_set_freerecords_bit
= B_TRUE
;
66 static char *dmu_recv_tag
= "dmu_recv_tag";
67 const char *recv_clone_name
= "%recv";
69 #define BP_SPAN(datablkszsec, indblkshift, level) \
70 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
71 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
73 static void byteswap_record(dmu_replay_record_t
*drr
);
75 struct send_thread_arg
{
77 dsl_dataset_t
*ds
; /* Dataset to traverse */
78 uint64_t fromtxg
; /* Traverse from this txg */
79 int flags
; /* flags to pass to traverse_dataset */
82 zbookmark_phys_t resume
;
85 struct send_block_record
{
86 boolean_t eos_marker
; /* Marks the end of the stream */
90 uint16_t datablkszsec
;
95 dump_bytes(dmu_sendarg_t
*dsp
, void *buf
, int len
)
97 dsl_dataset_t
*ds
= dmu_objset_ds(dsp
->dsa_os
);
98 ssize_t resid
; /* have to get resid to get detailed errno */
101 * The code does not rely on this (len being a multiple of 8). We keep
102 * this assertion because of the corresponding assertion in
103 * receive_read(). Keeping this assertion ensures that we do not
104 * inadvertently break backwards compatibility (causing the assertion
105 * in receive_read() to trigger on old software).
107 * Removing the assertions could be rolled into a new feature that uses
108 * data that isn't 8-byte aligned; if the assertions were removed, a
109 * feature flag would have to be added.
114 dsp
->dsa_err
= vn_rdwr(UIO_WRITE
, dsp
->dsa_vp
,
116 0, UIO_SYSSPACE
, FAPPEND
, RLIM64_INFINITY
, CRED(), &resid
);
118 mutex_enter(&ds
->ds_sendstream_lock
);
119 *dsp
->dsa_off
+= len
;
120 mutex_exit(&ds
->ds_sendstream_lock
);
122 return (dsp
->dsa_err
);
126 * For all record types except BEGIN, fill in the checksum (overlaid in
127 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
128 * up to the start of the checksum itself.
131 dump_record(dmu_sendarg_t
*dsp
, void *payload
, int payload_len
)
133 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
134 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
135 fletcher_4_incremental_native(dsp
->dsa_drr
,
136 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
138 if (dsp
->dsa_drr
->drr_type
!= DRR_BEGIN
) {
139 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp
->dsa_drr
->drr_u
.
140 drr_checksum
.drr_checksum
));
141 dsp
->dsa_drr
->drr_u
.drr_checksum
.drr_checksum
= dsp
->dsa_zc
;
143 fletcher_4_incremental_native(&dsp
->dsa_drr
->
144 drr_u
.drr_checksum
.drr_checksum
,
145 sizeof (zio_cksum_t
), &dsp
->dsa_zc
);
146 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
147 return (SET_ERROR(EINTR
));
148 if (payload_len
!= 0) {
149 fletcher_4_incremental_native(payload
, payload_len
,
151 if (dump_bytes(dsp
, payload
, payload_len
) != 0)
152 return (SET_ERROR(EINTR
));
158 * Fill in the drr_free struct, or perform aggregation if the previous record is
159 * also a free record, and the two are adjacent.
161 * Note that we send free records even for a full send, because we want to be
162 * able to receive a full send as a clone, which requires a list of all the free
163 * and freeobject records that were generated on the source.
166 dump_free(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
169 struct drr_free
*drrf
= &(dsp
->dsa_drr
->drr_u
.drr_free
);
172 * When we receive a free record, dbuf_free_range() assumes
173 * that the receiving system doesn't have any dbufs in the range
174 * being freed. This is always true because there is a one-record
175 * constraint: we only send one WRITE record for any given
176 * object,offset. We know that the one-record constraint is
177 * true because we always send data in increasing order by
180 * If the increasing-order constraint ever changes, we should find
181 * another way to assert that the one-record constraint is still
184 ASSERT(object
> dsp
->dsa_last_data_object
||
185 (object
== dsp
->dsa_last_data_object
&&
186 offset
> dsp
->dsa_last_data_offset
));
188 if (length
!= -1ULL && offset
+ length
< offset
)
192 * If there is a pending op, but it's not PENDING_FREE, push it out,
193 * since free block aggregation can only be done for blocks of the
194 * same type (i.e., DRR_FREE records can only be aggregated with
195 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
196 * aggregated with other DRR_FREEOBJECTS records.
198 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
199 dsp
->dsa_pending_op
!= PENDING_FREE
) {
200 if (dump_record(dsp
, NULL
, 0) != 0)
201 return (SET_ERROR(EINTR
));
202 dsp
->dsa_pending_op
= PENDING_NONE
;
205 if (dsp
->dsa_pending_op
== PENDING_FREE
) {
207 * There should never be a PENDING_FREE if length is -1
208 * (because dump_dnode is the only place where this
209 * function is called with a -1, and only after flushing
210 * any pending record).
212 ASSERT(length
!= -1ULL);
214 * Check to see whether this free block can be aggregated
217 if (drrf
->drr_object
== object
&& drrf
->drr_offset
+
218 drrf
->drr_length
== offset
) {
219 drrf
->drr_length
+= length
;
222 /* not a continuation. Push out pending record */
223 if (dump_record(dsp
, NULL
, 0) != 0)
224 return (SET_ERROR(EINTR
));
225 dsp
->dsa_pending_op
= PENDING_NONE
;
228 /* create a FREE record and make it pending */
229 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
230 dsp
->dsa_drr
->drr_type
= DRR_FREE
;
231 drrf
->drr_object
= object
;
232 drrf
->drr_offset
= offset
;
233 drrf
->drr_length
= length
;
234 drrf
->drr_toguid
= dsp
->dsa_toguid
;
235 if (length
== -1ULL) {
236 if (dump_record(dsp
, NULL
, 0) != 0)
237 return (SET_ERROR(EINTR
));
239 dsp
->dsa_pending_op
= PENDING_FREE
;
246 dump_write(dmu_sendarg_t
*dsp
, dmu_object_type_t type
,
247 uint64_t object
, uint64_t offset
, int blksz
, const blkptr_t
*bp
, void *data
)
249 struct drr_write
*drrw
= &(dsp
->dsa_drr
->drr_u
.drr_write
);
252 * We send data in increasing object, offset order.
253 * See comment in dump_free() for details.
255 ASSERT(object
> dsp
->dsa_last_data_object
||
256 (object
== dsp
->dsa_last_data_object
&&
257 offset
> dsp
->dsa_last_data_offset
));
258 dsp
->dsa_last_data_object
= object
;
259 dsp
->dsa_last_data_offset
= offset
+ blksz
- 1;
262 * If there is any kind of pending aggregation (currently either
263 * a grouping of free objects or free blocks), push it out to
264 * the stream, since aggregation can't be done across operations
265 * of different types.
267 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
268 if (dump_record(dsp
, NULL
, 0) != 0)
269 return (SET_ERROR(EINTR
));
270 dsp
->dsa_pending_op
= PENDING_NONE
;
272 /* write a WRITE record */
273 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
274 dsp
->dsa_drr
->drr_type
= DRR_WRITE
;
275 drrw
->drr_object
= object
;
276 drrw
->drr_type
= type
;
277 drrw
->drr_offset
= offset
;
278 drrw
->drr_length
= blksz
;
279 drrw
->drr_toguid
= dsp
->dsa_toguid
;
280 if (bp
== NULL
|| BP_IS_EMBEDDED(bp
)) {
282 * There's no pre-computed checksum for partial-block
283 * writes or embedded BP's, so (like
284 * fletcher4-checkummed blocks) userland will have to
285 * compute a dedup-capable checksum itself.
287 drrw
->drr_checksumtype
= ZIO_CHECKSUM_OFF
;
289 drrw
->drr_checksumtype
= BP_GET_CHECKSUM(bp
);
290 if (zio_checksum_table
[drrw
->drr_checksumtype
].ci_flags
&
291 ZCHECKSUM_FLAG_DEDUP
)
292 drrw
->drr_checksumflags
|= DRR_CHECKSUM_DEDUP
;
293 DDK_SET_LSIZE(&drrw
->drr_key
, BP_GET_LSIZE(bp
));
294 DDK_SET_PSIZE(&drrw
->drr_key
, BP_GET_PSIZE(bp
));
295 DDK_SET_COMPRESS(&drrw
->drr_key
, BP_GET_COMPRESS(bp
));
296 drrw
->drr_key
.ddk_cksum
= bp
->blk_cksum
;
299 if (dump_record(dsp
, data
, blksz
) != 0)
300 return (SET_ERROR(EINTR
));
305 dump_write_embedded(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
306 int blksz
, const blkptr_t
*bp
)
308 char buf
[BPE_PAYLOAD_SIZE
];
309 struct drr_write_embedded
*drrw
=
310 &(dsp
->dsa_drr
->drr_u
.drr_write_embedded
);
312 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
313 if (dump_record(dsp
, NULL
, 0) != 0)
315 dsp
->dsa_pending_op
= PENDING_NONE
;
318 ASSERT(BP_IS_EMBEDDED(bp
));
320 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
321 dsp
->dsa_drr
->drr_type
= DRR_WRITE_EMBEDDED
;
322 drrw
->drr_object
= object
;
323 drrw
->drr_offset
= offset
;
324 drrw
->drr_length
= blksz
;
325 drrw
->drr_toguid
= dsp
->dsa_toguid
;
326 drrw
->drr_compression
= BP_GET_COMPRESS(bp
);
327 drrw
->drr_etype
= BPE_GET_ETYPE(bp
);
328 drrw
->drr_lsize
= BPE_GET_LSIZE(bp
);
329 drrw
->drr_psize
= BPE_GET_PSIZE(bp
);
331 decode_embedded_bp_compressed(bp
, buf
);
333 if (dump_record(dsp
, buf
, P2ROUNDUP(drrw
->drr_psize
, 8)) != 0)
339 dump_spill(dmu_sendarg_t
*dsp
, uint64_t object
, int blksz
, void *data
)
341 struct drr_spill
*drrs
= &(dsp
->dsa_drr
->drr_u
.drr_spill
);
343 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
344 if (dump_record(dsp
, NULL
, 0) != 0)
345 return (SET_ERROR(EINTR
));
346 dsp
->dsa_pending_op
= PENDING_NONE
;
349 /* write a SPILL record */
350 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
351 dsp
->dsa_drr
->drr_type
= DRR_SPILL
;
352 drrs
->drr_object
= object
;
353 drrs
->drr_length
= blksz
;
354 drrs
->drr_toguid
= dsp
->dsa_toguid
;
356 if (dump_record(dsp
, data
, blksz
) != 0)
357 return (SET_ERROR(EINTR
));
362 dump_freeobjects(dmu_sendarg_t
*dsp
, uint64_t firstobj
, uint64_t numobjs
)
364 struct drr_freeobjects
*drrfo
= &(dsp
->dsa_drr
->drr_u
.drr_freeobjects
);
367 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
368 * push it out, since free block aggregation can only be done for
369 * blocks of the same type (i.e., DRR_FREE records can only be
370 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
371 * can only be aggregated with other DRR_FREEOBJECTS records.
373 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
374 dsp
->dsa_pending_op
!= PENDING_FREEOBJECTS
) {
375 if (dump_record(dsp
, NULL
, 0) != 0)
376 return (SET_ERROR(EINTR
));
377 dsp
->dsa_pending_op
= PENDING_NONE
;
379 if (dsp
->dsa_pending_op
== PENDING_FREEOBJECTS
) {
381 * See whether this free object array can be aggregated
384 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
== firstobj
) {
385 drrfo
->drr_numobjs
+= numobjs
;
388 /* can't be aggregated. Push out pending record */
389 if (dump_record(dsp
, NULL
, 0) != 0)
390 return (SET_ERROR(EINTR
));
391 dsp
->dsa_pending_op
= PENDING_NONE
;
395 /* write a FREEOBJECTS record */
396 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
397 dsp
->dsa_drr
->drr_type
= DRR_FREEOBJECTS
;
398 drrfo
->drr_firstobj
= firstobj
;
399 drrfo
->drr_numobjs
= numobjs
;
400 drrfo
->drr_toguid
= dsp
->dsa_toguid
;
402 dsp
->dsa_pending_op
= PENDING_FREEOBJECTS
;
408 dump_dnode(dmu_sendarg_t
*dsp
, uint64_t object
, dnode_phys_t
*dnp
)
410 struct drr_object
*drro
= &(dsp
->dsa_drr
->drr_u
.drr_object
);
412 if (object
< dsp
->dsa_resume_object
) {
414 * Note: when resuming, we will visit all the dnodes in
415 * the block of dnodes that we are resuming from. In
416 * this case it's unnecessary to send the dnodes prior to
417 * the one we are resuming from. We should be at most one
418 * block's worth of dnodes behind the resume point.
420 ASSERT3U(dsp
->dsa_resume_object
- object
, <,
421 1 << (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
));
425 if (dnp
== NULL
|| dnp
->dn_type
== DMU_OT_NONE
)
426 return (dump_freeobjects(dsp
, object
, 1));
428 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
429 if (dump_record(dsp
, NULL
, 0) != 0)
430 return (SET_ERROR(EINTR
));
431 dsp
->dsa_pending_op
= PENDING_NONE
;
434 /* write an OBJECT record */
435 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
436 dsp
->dsa_drr
->drr_type
= DRR_OBJECT
;
437 drro
->drr_object
= object
;
438 drro
->drr_type
= dnp
->dn_type
;
439 drro
->drr_bonustype
= dnp
->dn_bonustype
;
440 drro
->drr_blksz
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
441 drro
->drr_bonuslen
= dnp
->dn_bonuslen
;
442 drro
->drr_checksumtype
= dnp
->dn_checksum
;
443 drro
->drr_compress
= dnp
->dn_compress
;
444 drro
->drr_toguid
= dsp
->dsa_toguid
;
446 if (!(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
447 drro
->drr_blksz
> SPA_OLD_MAXBLOCKSIZE
)
448 drro
->drr_blksz
= SPA_OLD_MAXBLOCKSIZE
;
450 if (dump_record(dsp
, DN_BONUS(dnp
),
451 P2ROUNDUP(dnp
->dn_bonuslen
, 8)) != 0) {
452 return (SET_ERROR(EINTR
));
455 /* Free anything past the end of the file. */
456 if (dump_free(dsp
, object
, (dnp
->dn_maxblkid
+ 1) *
457 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
), -1ULL) != 0)
458 return (SET_ERROR(EINTR
));
459 if (dsp
->dsa_err
!= 0)
460 return (SET_ERROR(EINTR
));
465 backup_do_embed(dmu_sendarg_t
*dsp
, const blkptr_t
*bp
)
467 if (!BP_IS_EMBEDDED(bp
))
471 * Compression function must be legacy, or explicitly enabled.
473 if ((BP_GET_COMPRESS(bp
) >= ZIO_COMPRESS_LEGACY_FUNCTIONS
&&
474 !(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA_LZ4
)))
478 * Embed type must be explicitly enabled.
480 switch (BPE_GET_ETYPE(bp
)) {
481 case BP_EMBEDDED_TYPE_DATA
:
482 if (dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)
492 * This is the callback function to traverse_dataset that acts as the worker
493 * thread for dmu_send_impl.
497 send_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
498 const zbookmark_phys_t
*zb
, const struct dnode_phys
*dnp
, void *arg
)
500 struct send_thread_arg
*sta
= arg
;
501 struct send_block_record
*record
;
502 uint64_t record_size
;
505 ASSERT(zb
->zb_object
== DMU_META_DNODE_OBJECT
||
506 zb
->zb_object
>= sta
->resume
.zb_object
);
509 return (SET_ERROR(EINTR
));
512 ASSERT3U(zb
->zb_level
, ==, ZB_DNODE_LEVEL
);
514 } else if (zb
->zb_level
< 0) {
518 record
= kmem_zalloc(sizeof (struct send_block_record
), KM_SLEEP
);
519 record
->eos_marker
= B_FALSE
;
522 record
->indblkshift
= dnp
->dn_indblkshift
;
523 record
->datablkszsec
= dnp
->dn_datablkszsec
;
524 record_size
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
525 bqueue_enqueue(&sta
->q
, record
, record_size
);
531 * This function kicks off the traverse_dataset. It also handles setting the
532 * error code of the thread in case something goes wrong, and pushes the End of
533 * Stream record when the traverse_dataset call has finished. If there is no
534 * dataset to traverse, the thread immediately pushes End of Stream marker.
537 send_traverse_thread(void *arg
)
539 struct send_thread_arg
*st_arg
= arg
;
541 struct send_block_record
*data
;
543 if (st_arg
->ds
!= NULL
) {
544 err
= traverse_dataset_resume(st_arg
->ds
,
545 st_arg
->fromtxg
, &st_arg
->resume
,
546 st_arg
->flags
, send_cb
, st_arg
);
549 st_arg
->error_code
= err
;
551 data
= kmem_zalloc(sizeof (*data
), KM_SLEEP
);
552 data
->eos_marker
= B_TRUE
;
553 bqueue_enqueue(&st_arg
->q
, data
, 1);
557 * This function actually handles figuring out what kind of record needs to be
558 * dumped, reading the data (which has hopefully been prefetched), and calling
559 * the appropriate helper function.
562 do_dump(dmu_sendarg_t
*dsa
, struct send_block_record
*data
)
564 dsl_dataset_t
*ds
= dmu_objset_ds(dsa
->dsa_os
);
565 const blkptr_t
*bp
= &data
->bp
;
566 const zbookmark_phys_t
*zb
= &data
->zb
;
567 uint8_t indblkshift
= data
->indblkshift
;
568 uint16_t dblkszsec
= data
->datablkszsec
;
569 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
570 dmu_object_type_t type
= bp
? BP_GET_TYPE(bp
) : DMU_OT_NONE
;
573 ASSERT3U(zb
->zb_level
, >=, 0);
575 ASSERT(zb
->zb_object
== DMU_META_DNODE_OBJECT
||
576 zb
->zb_object
>= dsa
->dsa_resume_object
);
578 if (zb
->zb_object
!= DMU_META_DNODE_OBJECT
&&
579 DMU_OBJECT_IS_SPECIAL(zb
->zb_object
)) {
581 } else if (BP_IS_HOLE(bp
) &&
582 zb
->zb_object
== DMU_META_DNODE_OBJECT
) {
583 uint64_t span
= BP_SPAN(dblkszsec
, indblkshift
, zb
->zb_level
);
584 uint64_t dnobj
= (zb
->zb_blkid
* span
) >> DNODE_SHIFT
;
585 err
= dump_freeobjects(dsa
, dnobj
, span
>> DNODE_SHIFT
);
586 } else if (BP_IS_HOLE(bp
)) {
587 uint64_t span
= BP_SPAN(dblkszsec
, indblkshift
, zb
->zb_level
);
588 uint64_t offset
= zb
->zb_blkid
* span
;
589 err
= dump_free(dsa
, zb
->zb_object
, offset
, span
);
590 } else if (zb
->zb_level
> 0 || type
== DMU_OT_OBJSET
) {
592 } else if (type
== DMU_OT_DNODE
) {
593 int blksz
= BP_GET_LSIZE(bp
);
594 arc_flags_t aflags
= ARC_FLAG_WAIT
;
597 ASSERT0(zb
->zb_level
);
599 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
600 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
602 return (SET_ERROR(EIO
));
604 dnode_phys_t
*blk
= abuf
->b_data
;
605 uint64_t dnobj
= zb
->zb_blkid
* (blksz
>> DNODE_SHIFT
);
606 for (int i
= 0; i
< blksz
>> DNODE_SHIFT
; i
++) {
607 err
= dump_dnode(dsa
, dnobj
+ i
, blk
+ i
);
611 (void) arc_buf_remove_ref(abuf
, &abuf
);
612 } else if (type
== DMU_OT_SA
) {
613 arc_flags_t aflags
= ARC_FLAG_WAIT
;
615 int blksz
= BP_GET_LSIZE(bp
);
617 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
618 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
620 return (SET_ERROR(EIO
));
622 err
= dump_spill(dsa
, zb
->zb_object
, blksz
, abuf
->b_data
);
623 (void) arc_buf_remove_ref(abuf
, &abuf
);
624 } else if (backup_do_embed(dsa
, bp
)) {
625 /* it's an embedded level-0 block of a regular object */
626 int blksz
= dblkszsec
<< SPA_MINBLOCKSHIFT
;
627 ASSERT0(zb
->zb_level
);
628 err
= dump_write_embedded(dsa
, zb
->zb_object
,
629 zb
->zb_blkid
* blksz
, blksz
, bp
);
631 /* it's a level-0 block of a regular object */
632 arc_flags_t aflags
= ARC_FLAG_WAIT
;
634 int blksz
= dblkszsec
<< SPA_MINBLOCKSHIFT
;
637 ASSERT0(zb
->zb_level
);
638 ASSERT(zb
->zb_object
> dsa
->dsa_resume_object
||
639 (zb
->zb_object
== dsa
->dsa_resume_object
&&
640 zb
->zb_blkid
* blksz
>= dsa
->dsa_resume_offset
));
642 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
643 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
645 if (zfs_send_corrupt_data
) {
646 /* Send a block filled with 0x"zfs badd bloc" */
647 abuf
= arc_buf_alloc(spa
, blksz
, &abuf
,
650 for (ptr
= abuf
->b_data
;
651 (char *)ptr
< (char *)abuf
->b_data
+ blksz
;
653 *ptr
= 0x2f5baddb10cULL
;
655 return (SET_ERROR(EIO
));
659 offset
= zb
->zb_blkid
* blksz
;
661 if (!(dsa
->dsa_featureflags
&
662 DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
663 blksz
> SPA_OLD_MAXBLOCKSIZE
) {
664 char *buf
= abuf
->b_data
;
665 while (blksz
> 0 && err
== 0) {
666 int n
= MIN(blksz
, SPA_OLD_MAXBLOCKSIZE
);
667 err
= dump_write(dsa
, type
, zb
->zb_object
,
668 offset
, n
, NULL
, buf
);
674 err
= dump_write(dsa
, type
, zb
->zb_object
,
675 offset
, blksz
, bp
, abuf
->b_data
);
677 (void) arc_buf_remove_ref(abuf
, &abuf
);
680 ASSERT(err
== 0 || err
== EINTR
);
685 * Pop the new data off the queue, and free the old data.
687 static struct send_block_record
*
688 get_next_record(bqueue_t
*bq
, struct send_block_record
*data
)
690 struct send_block_record
*tmp
= bqueue_dequeue(bq
);
691 kmem_free(data
, sizeof (*data
));
696 * Actually do the bulk of the work in a zfs send.
698 * Note: Releases dp using the specified tag.
701 dmu_send_impl(void *tag
, dsl_pool_t
*dp
, dsl_dataset_t
*to_ds
,
702 zfs_bookmark_phys_t
*ancestor_zb
,
703 boolean_t is_clone
, boolean_t embedok
, boolean_t large_block_ok
, int outfd
,
704 uint64_t resumeobj
, uint64_t resumeoff
,
705 vnode_t
*vp
, offset_t
*off
)
708 dmu_replay_record_t
*drr
;
711 uint64_t fromtxg
= 0;
712 uint64_t featureflags
= 0;
713 struct send_thread_arg to_arg
= { 0 };
715 err
= dmu_objset_from_ds(to_ds
, &os
);
717 dsl_pool_rele(dp
, tag
);
721 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
722 drr
->drr_type
= DRR_BEGIN
;
723 drr
->drr_u
.drr_begin
.drr_magic
= DMU_BACKUP_MAGIC
;
724 DMU_SET_STREAM_HDRTYPE(drr
->drr_u
.drr_begin
.drr_versioninfo
,
728 if (dmu_objset_type(os
) == DMU_OST_ZFS
) {
730 if (zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &version
) != 0) {
731 kmem_free(drr
, sizeof (dmu_replay_record_t
));
732 dsl_pool_rele(dp
, tag
);
733 return (SET_ERROR(EINVAL
));
735 if (version
>= ZPL_VERSION_SA
) {
736 featureflags
|= DMU_BACKUP_FEATURE_SA_SPILL
;
741 if (large_block_ok
&& to_ds
->ds_feature_inuse
[SPA_FEATURE_LARGE_BLOCKS
])
742 featureflags
|= DMU_BACKUP_FEATURE_LARGE_BLOCKS
;
744 spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
)) {
745 featureflags
|= DMU_BACKUP_FEATURE_EMBED_DATA
;
746 if (spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
747 featureflags
|= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4
;
750 if (resumeobj
!= 0 || resumeoff
!= 0) {
751 featureflags
|= DMU_BACKUP_FEATURE_RESUMING
;
754 DMU_SET_FEATUREFLAGS(drr
->drr_u
.drr_begin
.drr_versioninfo
,
757 drr
->drr_u
.drr_begin
.drr_creation_time
=
758 dsl_dataset_phys(to_ds
)->ds_creation_time
;
759 drr
->drr_u
.drr_begin
.drr_type
= dmu_objset_type(os
);
761 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CLONE
;
762 drr
->drr_u
.drr_begin
.drr_toguid
= dsl_dataset_phys(to_ds
)->ds_guid
;
763 if (dsl_dataset_phys(to_ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
764 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CI_DATA
;
765 if (zfs_send_set_freerecords_bit
)
766 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_FREERECORDS
;
768 if (ancestor_zb
!= NULL
) {
769 drr
->drr_u
.drr_begin
.drr_fromguid
=
770 ancestor_zb
->zbm_guid
;
771 fromtxg
= ancestor_zb
->zbm_creation_txg
;
773 dsl_dataset_name(to_ds
, drr
->drr_u
.drr_begin
.drr_toname
);
774 if (!to_ds
->ds_is_snapshot
) {
775 (void) strlcat(drr
->drr_u
.drr_begin
.drr_toname
, "@--head--",
776 sizeof (drr
->drr_u
.drr_begin
.drr_toname
));
779 dsp
= kmem_zalloc(sizeof (dmu_sendarg_t
), KM_SLEEP
);
783 dsp
->dsa_outfd
= outfd
;
784 dsp
->dsa_proc
= curproc
;
787 dsp
->dsa_toguid
= dsl_dataset_phys(to_ds
)->ds_guid
;
788 dsp
->dsa_pending_op
= PENDING_NONE
;
789 dsp
->dsa_featureflags
= featureflags
;
790 dsp
->dsa_resume_object
= resumeobj
;
791 dsp
->dsa_resume_offset
= resumeoff
;
793 mutex_enter(&to_ds
->ds_sendstream_lock
);
794 list_insert_head(&to_ds
->ds_sendstreams
, dsp
);
795 mutex_exit(&to_ds
->ds_sendstream_lock
);
797 dsl_dataset_long_hold(to_ds
, FTAG
);
798 dsl_pool_rele(dp
, tag
);
800 void *payload
= NULL
;
801 size_t payload_len
= 0;
802 if (resumeobj
!= 0 || resumeoff
!= 0) {
803 dmu_object_info_t to_doi
;
804 err
= dmu_object_info(os
, resumeobj
, &to_doi
);
807 SET_BOOKMARK(&to_arg
.resume
, to_ds
->ds_object
, resumeobj
, 0,
808 resumeoff
/ to_doi
.doi_data_block_size
);
810 nvlist_t
*nvl
= fnvlist_alloc();
811 fnvlist_add_uint64(nvl
, "resume_object", resumeobj
);
812 fnvlist_add_uint64(nvl
, "resume_offset", resumeoff
);
813 payload
= fnvlist_pack(nvl
, &payload_len
);
814 drr
->drr_payloadlen
= payload_len
;
818 err
= dump_record(dsp
, payload
, payload_len
);
819 fnvlist_pack_free(payload
, payload_len
);
825 err
= bqueue_init(&to_arg
.q
, zfs_send_queue_length
,
826 offsetof(struct send_block_record
, ln
));
827 to_arg
.error_code
= 0;
828 to_arg
.cancel
= B_FALSE
;
830 to_arg
.fromtxg
= fromtxg
;
831 to_arg
.flags
= TRAVERSE_PRE
| TRAVERSE_PREFETCH
;
832 (void) thread_create(NULL
, 0, send_traverse_thread
, &to_arg
, 0, curproc
,
833 TS_RUN
, minclsyspri
);
835 struct send_block_record
*to_data
;
836 to_data
= bqueue_dequeue(&to_arg
.q
);
838 while (!to_data
->eos_marker
&& err
== 0) {
839 err
= do_dump(dsp
, to_data
);
840 to_data
= get_next_record(&to_arg
.q
, to_data
);
841 if (issig(JUSTLOOKING
) && issig(FORREAL
))
846 to_arg
.cancel
= B_TRUE
;
847 while (!to_data
->eos_marker
) {
848 to_data
= get_next_record(&to_arg
.q
, to_data
);
851 kmem_free(to_data
, sizeof (*to_data
));
853 bqueue_destroy(&to_arg
.q
);
855 if (err
== 0 && to_arg
.error_code
!= 0)
856 err
= to_arg
.error_code
;
861 if (dsp
->dsa_pending_op
!= PENDING_NONE
)
862 if (dump_record(dsp
, NULL
, 0) != 0)
863 err
= SET_ERROR(EINTR
);
866 if (err
== EINTR
&& dsp
->dsa_err
!= 0)
871 bzero(drr
, sizeof (dmu_replay_record_t
));
872 drr
->drr_type
= DRR_END
;
873 drr
->drr_u
.drr_end
.drr_checksum
= dsp
->dsa_zc
;
874 drr
->drr_u
.drr_end
.drr_toguid
= dsp
->dsa_toguid
;
876 if (dump_record(dsp
, NULL
, 0) != 0)
880 mutex_enter(&to_ds
->ds_sendstream_lock
);
881 list_remove(&to_ds
->ds_sendstreams
, dsp
);
882 mutex_exit(&to_ds
->ds_sendstream_lock
);
884 kmem_free(drr
, sizeof (dmu_replay_record_t
));
885 kmem_free(dsp
, sizeof (dmu_sendarg_t
));
887 dsl_dataset_long_rele(to_ds
, FTAG
);
893 dmu_send_obj(const char *pool
, uint64_t tosnap
, uint64_t fromsnap
,
894 boolean_t embedok
, boolean_t large_block_ok
,
895 int outfd
, vnode_t
*vp
, offset_t
*off
)
899 dsl_dataset_t
*fromds
= NULL
;
902 err
= dsl_pool_hold(pool
, FTAG
, &dp
);
906 err
= dsl_dataset_hold_obj(dp
, tosnap
, FTAG
, &ds
);
908 dsl_pool_rele(dp
, FTAG
);
913 zfs_bookmark_phys_t zb
;
916 err
= dsl_dataset_hold_obj(dp
, fromsnap
, FTAG
, &fromds
);
918 dsl_dataset_rele(ds
, FTAG
);
919 dsl_pool_rele(dp
, FTAG
);
922 if (!dsl_dataset_is_before(ds
, fromds
, 0))
923 err
= SET_ERROR(EXDEV
);
924 zb
.zbm_creation_time
=
925 dsl_dataset_phys(fromds
)->ds_creation_time
;
926 zb
.zbm_creation_txg
= dsl_dataset_phys(fromds
)->ds_creation_txg
;
927 zb
.zbm_guid
= dsl_dataset_phys(fromds
)->ds_guid
;
928 is_clone
= (fromds
->ds_dir
!= ds
->ds_dir
);
929 dsl_dataset_rele(fromds
, FTAG
);
930 err
= dmu_send_impl(FTAG
, dp
, ds
, &zb
, is_clone
,
931 embedok
, large_block_ok
, outfd
, 0, 0, vp
, off
);
933 err
= dmu_send_impl(FTAG
, dp
, ds
, NULL
, B_FALSE
,
934 embedok
, large_block_ok
, outfd
, 0, 0, vp
, off
);
936 dsl_dataset_rele(ds
, FTAG
);
941 dmu_send(const char *tosnap
, const char *fromsnap
, boolean_t embedok
,
942 boolean_t large_block_ok
, int outfd
, uint64_t resumeobj
, uint64_t resumeoff
,
943 vnode_t
*vp
, offset_t
*off
)
948 boolean_t owned
= B_FALSE
;
950 if (fromsnap
!= NULL
&& strpbrk(fromsnap
, "@#") == NULL
)
951 return (SET_ERROR(EINVAL
));
953 err
= dsl_pool_hold(tosnap
, FTAG
, &dp
);
957 if (strchr(tosnap
, '@') == NULL
&& spa_writeable(dp
->dp_spa
)) {
959 * We are sending a filesystem or volume. Ensure
960 * that it doesn't change by owning the dataset.
962 err
= dsl_dataset_own(dp
, tosnap
, FTAG
, &ds
);
965 err
= dsl_dataset_hold(dp
, tosnap
, FTAG
, &ds
);
968 dsl_pool_rele(dp
, FTAG
);
972 if (fromsnap
!= NULL
) {
973 zfs_bookmark_phys_t zb
;
974 boolean_t is_clone
= B_FALSE
;
975 int fsnamelen
= strchr(tosnap
, '@') - tosnap
;
978 * If the fromsnap is in a different filesystem, then
979 * mark the send stream as a clone.
981 if (strncmp(tosnap
, fromsnap
, fsnamelen
) != 0 ||
982 (fromsnap
[fsnamelen
] != '@' &&
983 fromsnap
[fsnamelen
] != '#')) {
987 if (strchr(fromsnap
, '@')) {
988 dsl_dataset_t
*fromds
;
989 err
= dsl_dataset_hold(dp
, fromsnap
, FTAG
, &fromds
);
991 if (!dsl_dataset_is_before(ds
, fromds
, 0))
992 err
= SET_ERROR(EXDEV
);
993 zb
.zbm_creation_time
=
994 dsl_dataset_phys(fromds
)->ds_creation_time
;
995 zb
.zbm_creation_txg
=
996 dsl_dataset_phys(fromds
)->ds_creation_txg
;
997 zb
.zbm_guid
= dsl_dataset_phys(fromds
)->ds_guid
;
998 is_clone
= (ds
->ds_dir
!= fromds
->ds_dir
);
999 dsl_dataset_rele(fromds
, FTAG
);
1002 err
= dsl_bookmark_lookup(dp
, fromsnap
, ds
, &zb
);
1005 dsl_dataset_rele(ds
, FTAG
);
1006 dsl_pool_rele(dp
, FTAG
);
1009 err
= dmu_send_impl(FTAG
, dp
, ds
, &zb
, is_clone
,
1010 embedok
, large_block_ok
,
1011 outfd
, resumeobj
, resumeoff
, vp
, off
);
1013 err
= dmu_send_impl(FTAG
, dp
, ds
, NULL
, B_FALSE
,
1014 embedok
, large_block_ok
,
1015 outfd
, resumeobj
, resumeoff
, vp
, off
);
1018 dsl_dataset_disown(ds
, FTAG
);
1020 dsl_dataset_rele(ds
, FTAG
);
1025 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t
*ds
, uint64_t size
,
1030 * Assume that space (both on-disk and in-stream) is dominated by
1031 * data. We will adjust for indirect blocks and the copies property,
1032 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1036 * Subtract out approximate space used by indirect blocks.
1037 * Assume most space is used by data blocks (non-indirect, non-dnode).
1038 * Assume all blocks are recordsize. Assume ditto blocks and
1039 * internal fragmentation counter out compression.
1041 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1042 * block, which we observe in practice.
1044 uint64_t recordsize
;
1045 err
= dsl_prop_get_int_ds(ds
, "recordsize", &recordsize
);
1048 size
-= size
/ recordsize
* sizeof (blkptr_t
);
1050 /* Add in the space for the record associated with each block. */
1051 size
+= size
/ recordsize
* sizeof (dmu_replay_record_t
);
1059 dmu_send_estimate(dsl_dataset_t
*ds
, dsl_dataset_t
*fromds
, uint64_t *sizep
)
1061 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1065 ASSERT(dsl_pool_config_held(dp
));
1067 /* tosnap must be a snapshot */
1068 if (!ds
->ds_is_snapshot
)
1069 return (SET_ERROR(EINVAL
));
1071 /* fromsnap, if provided, must be a snapshot */
1072 if (fromds
!= NULL
&& !fromds
->ds_is_snapshot
)
1073 return (SET_ERROR(EINVAL
));
1076 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1077 * or the origin's fs.
1079 if (fromds
!= NULL
&& !dsl_dataset_is_before(ds
, fromds
, 0))
1080 return (SET_ERROR(EXDEV
));
1082 /* Get uncompressed size estimate of changed data. */
1083 if (fromds
== NULL
) {
1084 size
= dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1086 uint64_t used
, comp
;
1087 err
= dsl_dataset_space_written(fromds
, ds
,
1088 &used
, &comp
, &size
);
1093 err
= dmu_adjust_send_estimate_for_indirects(ds
, size
, sizep
);
1098 * Simple callback used to traverse the blocks of a snapshot and sum their
1103 dmu_calculate_send_traversal(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
1104 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
1106 uint64_t *spaceptr
= arg
;
1107 if (bp
!= NULL
&& !BP_IS_HOLE(bp
)) {
1108 *spaceptr
+= BP_GET_UCSIZE(bp
);
1114 * Given a desination snapshot and a TXG, calculate the approximate size of a
1115 * send stream sent from that TXG. from_txg may be zero, indicating that the
1116 * whole snapshot will be sent.
1119 dmu_send_estimate_from_txg(dsl_dataset_t
*ds
, uint64_t from_txg
,
1122 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1126 ASSERT(dsl_pool_config_held(dp
));
1128 /* tosnap must be a snapshot */
1129 if (!dsl_dataset_is_snapshot(ds
))
1130 return (SET_ERROR(EINVAL
));
1132 /* verify that from_txg is before the provided snapshot was taken */
1133 if (from_txg
>= dsl_dataset_phys(ds
)->ds_creation_txg
) {
1134 return (SET_ERROR(EXDEV
));
1138 * traverse the blocks of the snapshot with birth times after
1139 * from_txg, summing their uncompressed size
1141 err
= traverse_dataset(ds
, from_txg
, TRAVERSE_POST
,
1142 dmu_calculate_send_traversal
, &size
);
1146 err
= dmu_adjust_send_estimate_for_indirects(ds
, size
, sizep
);
1150 typedef struct dmu_recv_begin_arg
{
1151 const char *drba_origin
;
1152 dmu_recv_cookie_t
*drba_cookie
;
1154 uint64_t drba_snapobj
;
1155 } dmu_recv_begin_arg_t
;
1158 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
1163 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1165 /* temporary clone name must not exist */
1166 error
= zap_lookup(dp
->dp_meta_objset
,
1167 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, recv_clone_name
,
1169 if (error
!= ENOENT
)
1170 return (error
== 0 ? EBUSY
: error
);
1172 /* new snapshot name must not exist */
1173 error
= zap_lookup(dp
->dp_meta_objset
,
1174 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1175 drba
->drba_cookie
->drc_tosnap
, 8, 1, &val
);
1176 if (error
!= ENOENT
)
1177 return (error
== 0 ? EEXIST
: error
);
1180 * Check snapshot limit before receiving. We'll recheck again at the
1181 * end, but might as well abort before receiving if we're already over
1184 * Note that we do not check the file system limit with
1185 * dsl_dir_fscount_check because the temporary %clones don't count
1186 * against that limit.
1188 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1, ZFS_PROP_SNAPSHOT_LIMIT
,
1189 NULL
, drba
->drba_cred
);
1193 if (fromguid
!= 0) {
1194 dsl_dataset_t
*snap
;
1195 uint64_t obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1197 /* Find snapshot in this dir that matches fromguid. */
1199 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
1202 return (SET_ERROR(ENODEV
));
1203 if (snap
->ds_dir
!= ds
->ds_dir
) {
1204 dsl_dataset_rele(snap
, FTAG
);
1205 return (SET_ERROR(ENODEV
));
1207 if (dsl_dataset_phys(snap
)->ds_guid
== fromguid
)
1209 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
1210 dsl_dataset_rele(snap
, FTAG
);
1213 return (SET_ERROR(ENODEV
));
1215 if (drba
->drba_cookie
->drc_force
) {
1216 drba
->drba_snapobj
= obj
;
1219 * If we are not forcing, there must be no
1220 * changes since fromsnap.
1222 if (dsl_dataset_modified_since_snap(ds
, snap
)) {
1223 dsl_dataset_rele(snap
, FTAG
);
1224 return (SET_ERROR(ETXTBSY
));
1226 drba
->drba_snapobj
= ds
->ds_prev
->ds_object
;
1229 dsl_dataset_rele(snap
, FTAG
);
1231 /* if full, then must be forced */
1232 if (!drba
->drba_cookie
->drc_force
)
1233 return (SET_ERROR(EEXIST
));
1234 /* start from $ORIGIN@$ORIGIN, if supported */
1235 drba
->drba_snapobj
= dp
->dp_origin_snap
!= NULL
?
1236 dp
->dp_origin_snap
->ds_object
: 0;
1244 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
1246 dmu_recv_begin_arg_t
*drba
= arg
;
1247 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1248 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1249 uint64_t fromguid
= drrb
->drr_fromguid
;
1250 int flags
= drrb
->drr_flags
;
1252 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1254 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1256 /* already checked */
1257 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1258 ASSERT(!(featureflags
& DMU_BACKUP_FEATURE_RESUMING
));
1260 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1261 DMU_COMPOUNDSTREAM
||
1262 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
1263 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
1264 return (SET_ERROR(EINVAL
));
1266 /* Verify pool version supports SA if SA_SPILL feature set */
1267 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
1268 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
1269 return (SET_ERROR(ENOTSUP
));
1271 if (drba
->drba_cookie
->drc_resumable
&&
1272 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EXTENSIBLE_DATASET
))
1273 return (SET_ERROR(ENOTSUP
));
1276 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1277 * record to a plan WRITE record, so the pool must have the
1278 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1279 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1281 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
1282 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
1283 return (SET_ERROR(ENOTSUP
));
1284 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA_LZ4
) &&
1285 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
1286 return (SET_ERROR(ENOTSUP
));
1289 * The receiving code doesn't know how to translate large blocks
1290 * to smaller ones, so the pool must have the LARGE_BLOCKS
1291 * feature enabled if the stream has LARGE_BLOCKS.
1293 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
1294 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
1295 return (SET_ERROR(ENOTSUP
));
1297 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
1299 /* target fs already exists; recv into temp clone */
1301 /* Can't recv a clone into an existing fs */
1302 if (flags
& DRR_FLAG_CLONE
|| drba
->drba_origin
) {
1303 dsl_dataset_rele(ds
, FTAG
);
1304 return (SET_ERROR(EINVAL
));
1307 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
);
1308 dsl_dataset_rele(ds
, FTAG
);
1309 } else if (error
== ENOENT
) {
1310 /* target fs does not exist; must be a full backup or clone */
1311 char buf
[MAXNAMELEN
];
1314 * If it's a non-clone incremental, we are missing the
1315 * target fs, so fail the recv.
1317 if (fromguid
!= 0 && !(flags
& DRR_FLAG_CLONE
||
1319 return (SET_ERROR(ENOENT
));
1322 * If we're receiving a full send as a clone, and it doesn't
1323 * contain all the necessary free records and freeobject
1324 * records, reject it.
1326 if (fromguid
== 0 && drba
->drba_origin
&&
1327 !(flags
& DRR_FLAG_FREERECORDS
))
1328 return (SET_ERROR(EINVAL
));
1330 /* Open the parent of tofs */
1331 ASSERT3U(strlen(tofs
), <, MAXNAMELEN
);
1332 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
1333 error
= dsl_dataset_hold(dp
, buf
, FTAG
, &ds
);
1338 * Check filesystem and snapshot limits before receiving. We'll
1339 * recheck snapshot limits again at the end (we create the
1340 * filesystems and increment those counts during begin_sync).
1342 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
1343 ZFS_PROP_FILESYSTEM_LIMIT
, NULL
, drba
->drba_cred
);
1345 dsl_dataset_rele(ds
, FTAG
);
1349 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
1350 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, drba
->drba_cred
);
1352 dsl_dataset_rele(ds
, FTAG
);
1356 if (drba
->drba_origin
!= NULL
) {
1357 dsl_dataset_t
*origin
;
1358 error
= dsl_dataset_hold(dp
, drba
->drba_origin
,
1361 dsl_dataset_rele(ds
, FTAG
);
1364 if (!origin
->ds_is_snapshot
) {
1365 dsl_dataset_rele(origin
, FTAG
);
1366 dsl_dataset_rele(ds
, FTAG
);
1367 return (SET_ERROR(EINVAL
));
1369 if (dsl_dataset_phys(origin
)->ds_guid
!= fromguid
&&
1371 dsl_dataset_rele(origin
, FTAG
);
1372 dsl_dataset_rele(ds
, FTAG
);
1373 return (SET_ERROR(ENODEV
));
1375 dsl_dataset_rele(origin
, FTAG
);
1377 dsl_dataset_rele(ds
, FTAG
);
1384 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
1386 dmu_recv_begin_arg_t
*drba
= arg
;
1387 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1388 objset_t
*mos
= dp
->dp_meta_objset
;
1389 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1390 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1391 dsl_dataset_t
*ds
, *newds
;
1394 uint64_t crflags
= 0;
1396 if (drrb
->drr_flags
& DRR_FLAG_CI_DATA
)
1397 crflags
|= DS_FLAG_CI_DATASET
;
1399 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
1401 /* create temporary clone */
1402 dsl_dataset_t
*snap
= NULL
;
1403 if (drba
->drba_snapobj
!= 0) {
1404 VERIFY0(dsl_dataset_hold_obj(dp
,
1405 drba
->drba_snapobj
, FTAG
, &snap
));
1407 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
, recv_clone_name
,
1408 snap
, crflags
, drba
->drba_cred
, tx
);
1409 if (drba
->drba_snapobj
!= 0)
1410 dsl_dataset_rele(snap
, FTAG
);
1411 dsl_dataset_rele(ds
, FTAG
);
1415 dsl_dataset_t
*origin
= NULL
;
1417 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
1419 if (drba
->drba_origin
!= NULL
) {
1420 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
1424 /* Create new dataset. */
1425 dsobj
= dsl_dataset_create_sync(dd
,
1426 strrchr(tofs
, '/') + 1,
1427 origin
, crflags
, drba
->drba_cred
, tx
);
1429 dsl_dataset_rele(origin
, FTAG
);
1430 dsl_dir_rele(dd
, FTAG
);
1431 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1433 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dmu_recv_tag
, &newds
));
1435 if (drba
->drba_cookie
->drc_resumable
) {
1436 dsl_dataset_zapify(newds
, tx
);
1437 if (drrb
->drr_fromguid
!= 0) {
1438 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_FROMGUID
,
1439 8, 1, &drrb
->drr_fromguid
, tx
));
1441 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TOGUID
,
1442 8, 1, &drrb
->drr_toguid
, tx
));
1443 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TONAME
,
1444 1, strlen(drrb
->drr_toname
) + 1, drrb
->drr_toname
, tx
));
1447 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OBJECT
,
1449 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OFFSET
,
1451 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_BYTES
,
1453 if (DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
) &
1454 DMU_BACKUP_FEATURE_EMBED_DATA
) {
1455 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_EMBEDOK
,
1460 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
1461 dsl_dataset_phys(newds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1464 * If we actually created a non-clone, we need to create the
1465 * objset in our new dataset.
1467 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
))) {
1468 (void) dmu_objset_create_impl(dp
->dp_spa
,
1469 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
1472 drba
->drba_cookie
->drc_ds
= newds
;
1474 spa_history_log_internal_ds(newds
, "receive", tx
, "");
1478 dmu_recv_resume_begin_check(void *arg
, dmu_tx_t
*tx
)
1480 dmu_recv_begin_arg_t
*drba
= arg
;
1481 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1482 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1484 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1486 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1488 /* already checked */
1489 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1490 ASSERT(featureflags
& DMU_BACKUP_FEATURE_RESUMING
);
1492 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1493 DMU_COMPOUNDSTREAM
||
1494 drrb
->drr_type
>= DMU_OST_NUMTYPES
)
1495 return (SET_ERROR(EINVAL
));
1497 /* Verify pool version supports SA if SA_SPILL feature set */
1498 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
1499 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
1500 return (SET_ERROR(ENOTSUP
));
1503 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1504 * record to a plain WRITE record, so the pool must have the
1505 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1506 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1508 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
1509 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
1510 return (SET_ERROR(ENOTSUP
));
1511 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA_LZ4
) &&
1512 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
1513 return (SET_ERROR(ENOTSUP
));
1515 char recvname
[ZFS_MAXNAMELEN
];
1517 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
1518 tofs
, recv_clone_name
);
1520 if (dsl_dataset_hold(dp
, recvname
, FTAG
, &ds
) != 0) {
1521 /* %recv does not exist; continue in tofs */
1522 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
1527 /* check that ds is marked inconsistent */
1528 if (!DS_IS_INCONSISTENT(ds
)) {
1529 dsl_dataset_rele(ds
, FTAG
);
1530 return (SET_ERROR(EINVAL
));
1533 /* check that there is resuming data, and that the toguid matches */
1534 if (!dsl_dataset_is_zapified(ds
)) {
1535 dsl_dataset_rele(ds
, FTAG
);
1536 return (SET_ERROR(EINVAL
));
1539 error
= zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1540 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
);
1541 if (error
!= 0 || drrb
->drr_toguid
!= val
) {
1542 dsl_dataset_rele(ds
, FTAG
);
1543 return (SET_ERROR(EINVAL
));
1547 * Check if the receive is still running. If so, it will be owned.
1548 * Note that nothing else can own the dataset (e.g. after the receive
1549 * fails) because it will be marked inconsistent.
1551 if (dsl_dataset_has_owner(ds
)) {
1552 dsl_dataset_rele(ds
, FTAG
);
1553 return (SET_ERROR(EBUSY
));
1556 /* There should not be any snapshots of this fs yet. */
1557 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
->ds_dir
== ds
->ds_dir
) {
1558 dsl_dataset_rele(ds
, FTAG
);
1559 return (SET_ERROR(EINVAL
));
1563 * Note: resume point will be checked when we process the first WRITE
1567 /* check that the origin matches */
1569 (void) zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1570 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
);
1571 if (drrb
->drr_fromguid
!= val
) {
1572 dsl_dataset_rele(ds
, FTAG
);
1573 return (SET_ERROR(EINVAL
));
1576 dsl_dataset_rele(ds
, FTAG
);
1581 dmu_recv_resume_begin_sync(void *arg
, dmu_tx_t
*tx
)
1583 dmu_recv_begin_arg_t
*drba
= arg
;
1584 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1585 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1588 char recvname
[ZFS_MAXNAMELEN
];
1590 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
1591 tofs
, recv_clone_name
);
1593 if (dsl_dataset_hold(dp
, recvname
, FTAG
, &ds
) != 0) {
1594 /* %recv does not exist; continue in tofs */
1595 VERIFY0(dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
));
1596 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1599 /* clear the inconsistent flag so that we can own it */
1600 ASSERT(DS_IS_INCONSISTENT(ds
));
1601 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1602 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1603 dsobj
= ds
->ds_object
;
1604 dsl_dataset_rele(ds
, FTAG
);
1606 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dmu_recv_tag
, &ds
));
1608 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1609 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1611 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds
)));
1613 drba
->drba_cookie
->drc_ds
= ds
;
1615 spa_history_log_internal_ds(ds
, "resume receive", tx
, "");
1619 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1620 * succeeds; otherwise we will leak the holds on the datasets.
1623 dmu_recv_begin(char *tofs
, char *tosnap
, dmu_replay_record_t
*drr_begin
,
1624 boolean_t force
, boolean_t resumable
, char *origin
, dmu_recv_cookie_t
*drc
)
1626 dmu_recv_begin_arg_t drba
= { 0 };
1628 bzero(drc
, sizeof (dmu_recv_cookie_t
));
1629 drc
->drc_drr_begin
= drr_begin
;
1630 drc
->drc_drrb
= &drr_begin
->drr_u
.drr_begin
;
1631 drc
->drc_tosnap
= tosnap
;
1632 drc
->drc_tofs
= tofs
;
1633 drc
->drc_force
= force
;
1634 drc
->drc_resumable
= resumable
;
1635 drc
->drc_cred
= CRED();
1637 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
)) {
1638 drc
->drc_byteswap
= B_TRUE
;
1639 fletcher_4_incremental_byteswap(drr_begin
,
1640 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1641 byteswap_record(drr_begin
);
1642 } else if (drc
->drc_drrb
->drr_magic
== DMU_BACKUP_MAGIC
) {
1643 fletcher_4_incremental_native(drr_begin
,
1644 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1646 return (SET_ERROR(EINVAL
));
1649 drba
.drba_origin
= origin
;
1650 drba
.drba_cookie
= drc
;
1651 drba
.drba_cred
= CRED();
1653 if (DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
1654 DMU_BACKUP_FEATURE_RESUMING
) {
1655 return (dsl_sync_task(tofs
,
1656 dmu_recv_resume_begin_check
, dmu_recv_resume_begin_sync
,
1657 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
1659 return (dsl_sync_task(tofs
,
1660 dmu_recv_begin_check
, dmu_recv_begin_sync
,
1661 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
1665 struct receive_record_arg
{
1666 dmu_replay_record_t header
;
1667 void *payload
; /* Pointer to a buffer containing the payload */
1669 * If the record is a write, pointer to the arc_buf_t containing the
1672 arc_buf_t
*write_buf
;
1674 uint64_t bytes_read
; /* bytes read from stream when record created */
1675 boolean_t eos_marker
; /* Marks the end of the stream */
1679 struct receive_writer_arg
{
1685 * These three args are used to signal to the main thread that we're
1693 /* A map from guid to dataset to help handle dedup'd streams. */
1694 avl_tree_t
*guid_to_ds_map
;
1695 boolean_t resumable
;
1696 uint64_t last_object
, last_offset
;
1697 uint64_t bytes_read
; /* bytes read when current record created */
1701 list_t list
; /* List of struct receive_objnode. */
1703 * Last object looked up. Used to assert that objects are being looked
1704 * up in ascending order.
1706 uint64_t last_lookup
;
1709 struct receive_objnode
{
1714 struct receive_arg
{
1716 vnode_t
*vp
; /* The vnode to read the stream from */
1717 uint64_t voff
; /* The current offset in the stream */
1718 uint64_t bytes_read
;
1720 * A record that has had its payload read in, but hasn't yet been handed
1721 * off to the worker thread.
1723 struct receive_record_arg
*rrd
;
1724 /* A record that has had its header read in, but not its payload. */
1725 struct receive_record_arg
*next_rrd
;
1727 zio_cksum_t prev_cksum
;
1730 /* Sorted list of objects not to issue prefetches for. */
1731 struct objlist ignore_objlist
;
1734 typedef struct guid_map_entry
{
1736 dsl_dataset_t
*gme_ds
;
1741 guid_compare(const void *arg1
, const void *arg2
)
1743 const guid_map_entry_t
*gmep1
= arg1
;
1744 const guid_map_entry_t
*gmep2
= arg2
;
1746 if (gmep1
->guid
< gmep2
->guid
)
1748 else if (gmep1
->guid
> gmep2
->guid
)
1754 free_guid_map_onexit(void *arg
)
1756 avl_tree_t
*ca
= arg
;
1757 void *cookie
= NULL
;
1758 guid_map_entry_t
*gmep
;
1760 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
1761 dsl_dataset_long_rele(gmep
->gme_ds
, gmep
);
1762 dsl_dataset_rele(gmep
->gme_ds
, gmep
);
1763 kmem_free(gmep
, sizeof (guid_map_entry_t
));
1766 kmem_free(ca
, sizeof (avl_tree_t
));
1770 receive_read(struct receive_arg
*ra
, int len
, void *buf
)
1775 * The code doesn't rely on this (lengths being multiples of 8). See
1776 * comment in dump_bytes.
1780 while (done
< len
) {
1783 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
1784 (char *)buf
+ done
, len
- done
,
1785 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
1786 RLIM64_INFINITY
, CRED(), &resid
);
1788 if (resid
== len
- done
) {
1790 * Note: ECKSUM indicates that the receive
1791 * was interrupted and can potentially be resumed.
1793 ra
->err
= SET_ERROR(ECKSUM
);
1795 ra
->voff
+= len
- done
- resid
;
1801 ra
->bytes_read
+= len
;
1803 ASSERT3U(done
, ==, len
);
1808 byteswap_record(dmu_replay_record_t
*drr
)
1810 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1811 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1812 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
1813 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
1815 switch (drr
->drr_type
) {
1817 DO64(drr_begin
.drr_magic
);
1818 DO64(drr_begin
.drr_versioninfo
);
1819 DO64(drr_begin
.drr_creation_time
);
1820 DO32(drr_begin
.drr_type
);
1821 DO32(drr_begin
.drr_flags
);
1822 DO64(drr_begin
.drr_toguid
);
1823 DO64(drr_begin
.drr_fromguid
);
1826 DO64(drr_object
.drr_object
);
1827 DO32(drr_object
.drr_type
);
1828 DO32(drr_object
.drr_bonustype
);
1829 DO32(drr_object
.drr_blksz
);
1830 DO32(drr_object
.drr_bonuslen
);
1831 DO64(drr_object
.drr_toguid
);
1833 case DRR_FREEOBJECTS
:
1834 DO64(drr_freeobjects
.drr_firstobj
);
1835 DO64(drr_freeobjects
.drr_numobjs
);
1836 DO64(drr_freeobjects
.drr_toguid
);
1839 DO64(drr_write
.drr_object
);
1840 DO32(drr_write
.drr_type
);
1841 DO64(drr_write
.drr_offset
);
1842 DO64(drr_write
.drr_length
);
1843 DO64(drr_write
.drr_toguid
);
1844 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write
.drr_key
.ddk_cksum
);
1845 DO64(drr_write
.drr_key
.ddk_prop
);
1847 case DRR_WRITE_BYREF
:
1848 DO64(drr_write_byref
.drr_object
);
1849 DO64(drr_write_byref
.drr_offset
);
1850 DO64(drr_write_byref
.drr_length
);
1851 DO64(drr_write_byref
.drr_toguid
);
1852 DO64(drr_write_byref
.drr_refguid
);
1853 DO64(drr_write_byref
.drr_refobject
);
1854 DO64(drr_write_byref
.drr_refoffset
);
1855 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write_byref
.
1857 DO64(drr_write_byref
.drr_key
.ddk_prop
);
1859 case DRR_WRITE_EMBEDDED
:
1860 DO64(drr_write_embedded
.drr_object
);
1861 DO64(drr_write_embedded
.drr_offset
);
1862 DO64(drr_write_embedded
.drr_length
);
1863 DO64(drr_write_embedded
.drr_toguid
);
1864 DO32(drr_write_embedded
.drr_lsize
);
1865 DO32(drr_write_embedded
.drr_psize
);
1868 DO64(drr_free
.drr_object
);
1869 DO64(drr_free
.drr_offset
);
1870 DO64(drr_free
.drr_length
);
1871 DO64(drr_free
.drr_toguid
);
1874 DO64(drr_spill
.drr_object
);
1875 DO64(drr_spill
.drr_length
);
1876 DO64(drr_spill
.drr_toguid
);
1879 DO64(drr_end
.drr_toguid
);
1880 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_end
.drr_checksum
);
1884 if (drr
->drr_type
!= DRR_BEGIN
) {
1885 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_checksum
.drr_checksum
);
1892 static inline uint8_t
1893 deduce_nblkptr(dmu_object_type_t bonus_type
, uint64_t bonus_size
)
1895 if (bonus_type
== DMU_OT_SA
) {
1899 ((DN_MAX_BONUSLEN
- bonus_size
) >> SPA_BLKPTRSHIFT
));
1904 save_resume_state(struct receive_writer_arg
*rwa
,
1905 uint64_t object
, uint64_t offset
, dmu_tx_t
*tx
)
1907 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
1909 if (!rwa
->resumable
)
1913 * We use ds_resume_bytes[] != 0 to indicate that we need to
1914 * update this on disk, so it must not be 0.
1916 ASSERT(rwa
->bytes_read
!= 0);
1919 * We only resume from write records, which have a valid
1920 * (non-meta-dnode) object number.
1922 ASSERT(object
!= 0);
1925 * For resuming to work correctly, we must receive records in order,
1926 * sorted by object,offset. This is checked by the callers, but
1927 * assert it here for good measure.
1929 ASSERT3U(object
, >=, rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
]);
1930 ASSERT(object
!= rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] ||
1931 offset
>= rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
]);
1932 ASSERT3U(rwa
->bytes_read
, >=,
1933 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
]);
1935 rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] = object
;
1936 rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
] = offset
;
1937 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
] = rwa
->bytes_read
;
1941 receive_object(struct receive_writer_arg
*rwa
, struct drr_object
*drro
,
1944 dmu_object_info_t doi
;
1949 if (drro
->drr_type
== DMU_OT_NONE
||
1950 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1951 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1952 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1953 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1954 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1955 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1956 drro
->drr_blksz
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)) ||
1957 drro
->drr_bonuslen
> DN_MAX_BONUSLEN
) {
1958 return (SET_ERROR(EINVAL
));
1961 err
= dmu_object_info(rwa
->os
, drro
->drr_object
, &doi
);
1963 if (err
!= 0 && err
!= ENOENT
)
1964 return (SET_ERROR(EINVAL
));
1965 object
= err
== 0 ? drro
->drr_object
: DMU_NEW_OBJECT
;
1968 * If we are losing blkptrs or changing the block size this must
1969 * be a new file instance. We must clear out the previous file
1970 * contents before we can change this type of metadata in the dnode.
1975 nblkptr
= deduce_nblkptr(drro
->drr_bonustype
,
1976 drro
->drr_bonuslen
);
1978 if (drro
->drr_blksz
!= doi
.doi_data_block_size
||
1979 nblkptr
< doi
.doi_nblkptr
) {
1980 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
1983 return (SET_ERROR(EINVAL
));
1987 tx
= dmu_tx_create(rwa
->os
);
1988 dmu_tx_hold_bonus(tx
, object
);
1989 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1995 if (object
== DMU_NEW_OBJECT
) {
1996 /* currently free, want to be allocated */
1997 err
= dmu_object_claim(rwa
->os
, drro
->drr_object
,
1998 drro
->drr_type
, drro
->drr_blksz
,
1999 drro
->drr_bonustype
, drro
->drr_bonuslen
, tx
);
2000 } else if (drro
->drr_type
!= doi
.doi_type
||
2001 drro
->drr_blksz
!= doi
.doi_data_block_size
||
2002 drro
->drr_bonustype
!= doi
.doi_bonus_type
||
2003 drro
->drr_bonuslen
!= doi
.doi_bonus_size
) {
2004 /* currently allocated, but with different properties */
2005 err
= dmu_object_reclaim(rwa
->os
, drro
->drr_object
,
2006 drro
->drr_type
, drro
->drr_blksz
,
2007 drro
->drr_bonustype
, drro
->drr_bonuslen
, tx
);
2011 return (SET_ERROR(EINVAL
));
2014 dmu_object_set_checksum(rwa
->os
, drro
->drr_object
,
2015 drro
->drr_checksumtype
, tx
);
2016 dmu_object_set_compress(rwa
->os
, drro
->drr_object
,
2017 drro
->drr_compress
, tx
);
2022 VERIFY0(dmu_bonus_hold(rwa
->os
, drro
->drr_object
, FTAG
, &db
));
2023 dmu_buf_will_dirty(db
, tx
);
2025 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
2026 bcopy(data
, db
->db_data
, drro
->drr_bonuslen
);
2027 if (rwa
->byteswap
) {
2028 dmu_object_byteswap_t byteswap
=
2029 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
2030 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
2031 drro
->drr_bonuslen
);
2033 dmu_buf_rele(db
, FTAG
);
2042 receive_freeobjects(struct receive_writer_arg
*rwa
,
2043 struct drr_freeobjects
*drrfo
)
2048 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
2049 return (SET_ERROR(EINVAL
));
2051 for (obj
= drrfo
->drr_firstobj
;
2052 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
&& next_err
== 0;
2053 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0)) {
2056 if (dmu_object_info(rwa
->os
, obj
, NULL
) != 0)
2059 err
= dmu_free_long_object(rwa
->os
, obj
);
2063 if (next_err
!= ESRCH
)
2069 receive_write(struct receive_writer_arg
*rwa
, struct drr_write
*drrw
,
2075 if (drrw
->drr_offset
+ drrw
->drr_length
< drrw
->drr_offset
||
2076 !DMU_OT_IS_VALID(drrw
->drr_type
))
2077 return (SET_ERROR(EINVAL
));
2080 * For resuming to work, records must be in increasing order
2081 * by (object, offset).
2083 if (drrw
->drr_object
< rwa
->last_object
||
2084 (drrw
->drr_object
== rwa
->last_object
&&
2085 drrw
->drr_offset
< rwa
->last_offset
)) {
2086 return (SET_ERROR(EINVAL
));
2088 rwa
->last_object
= drrw
->drr_object
;
2089 rwa
->last_offset
= drrw
->drr_offset
;
2091 if (dmu_object_info(rwa
->os
, drrw
->drr_object
, NULL
) != 0)
2092 return (SET_ERROR(EINVAL
));
2094 tx
= dmu_tx_create(rwa
->os
);
2096 dmu_tx_hold_write(tx
, drrw
->drr_object
,
2097 drrw
->drr_offset
, drrw
->drr_length
);
2098 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2103 if (rwa
->byteswap
) {
2104 dmu_object_byteswap_t byteswap
=
2105 DMU_OT_BYTESWAP(drrw
->drr_type
);
2106 dmu_ot_byteswap
[byteswap
].ob_func(abuf
->b_data
,
2111 if (dmu_bonus_hold(rwa
->os
, drrw
->drr_object
, FTAG
, &bonus
) != 0)
2112 return (SET_ERROR(EINVAL
));
2113 dmu_assign_arcbuf(bonus
, drrw
->drr_offset
, abuf
, tx
);
2116 * Note: If the receive fails, we want the resume stream to start
2117 * with the same record that we last successfully received (as opposed
2118 * to the next record), so that we can verify that we are
2119 * resuming from the correct location.
2121 save_resume_state(rwa
, drrw
->drr_object
, drrw
->drr_offset
, tx
);
2123 dmu_buf_rele(bonus
, FTAG
);
2129 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2130 * streams to refer to a copy of the data that is already on the
2131 * system because it came in earlier in the stream. This function
2132 * finds the earlier copy of the data, and uses that copy instead of
2133 * data from the stream to fulfill this write.
2136 receive_write_byref(struct receive_writer_arg
*rwa
,
2137 struct drr_write_byref
*drrwbr
)
2141 guid_map_entry_t gmesrch
;
2142 guid_map_entry_t
*gmep
;
2144 objset_t
*ref_os
= NULL
;
2147 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
2148 return (SET_ERROR(EINVAL
));
2151 * If the GUID of the referenced dataset is different from the
2152 * GUID of the target dataset, find the referenced dataset.
2154 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
2155 gmesrch
.guid
= drrwbr
->drr_refguid
;
2156 if ((gmep
= avl_find(rwa
->guid_to_ds_map
, &gmesrch
,
2158 return (SET_ERROR(EINVAL
));
2160 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
2161 return (SET_ERROR(EINVAL
));
2166 err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
2167 drrwbr
->drr_refoffset
, FTAG
, &dbp
, DMU_READ_PREFETCH
);
2171 tx
= dmu_tx_create(rwa
->os
);
2173 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
2174 drrwbr
->drr_offset
, drrwbr
->drr_length
);
2175 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2180 dmu_write(rwa
->os
, drrwbr
->drr_object
,
2181 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
2182 dmu_buf_rele(dbp
, FTAG
);
2184 /* See comment in restore_write. */
2185 save_resume_state(rwa
, drrwbr
->drr_object
, drrwbr
->drr_offset
, tx
);
2191 receive_write_embedded(struct receive_writer_arg
*rwa
,
2192 struct drr_write_embedded
*drrwe
, void *data
)
2197 if (drrwe
->drr_offset
+ drrwe
->drr_length
< drrwe
->drr_offset
)
2200 if (drrwe
->drr_psize
> BPE_PAYLOAD_SIZE
)
2203 if (drrwe
->drr_etype
>= NUM_BP_EMBEDDED_TYPES
)
2205 if (drrwe
->drr_compression
>= ZIO_COMPRESS_FUNCTIONS
)
2208 tx
= dmu_tx_create(rwa
->os
);
2210 dmu_tx_hold_write(tx
, drrwe
->drr_object
,
2211 drrwe
->drr_offset
, drrwe
->drr_length
);
2212 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2218 dmu_write_embedded(rwa
->os
, drrwe
->drr_object
,
2219 drrwe
->drr_offset
, data
, drrwe
->drr_etype
,
2220 drrwe
->drr_compression
, drrwe
->drr_lsize
, drrwe
->drr_psize
,
2221 rwa
->byteswap
^ ZFS_HOST_BYTEORDER
, tx
);
2223 /* See comment in restore_write. */
2224 save_resume_state(rwa
, drrwe
->drr_object
, drrwe
->drr_offset
, tx
);
2230 receive_spill(struct receive_writer_arg
*rwa
, struct drr_spill
*drrs
,
2234 dmu_buf_t
*db
, *db_spill
;
2237 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
2238 drrs
->drr_length
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)))
2239 return (SET_ERROR(EINVAL
));
2241 if (dmu_object_info(rwa
->os
, drrs
->drr_object
, NULL
) != 0)
2242 return (SET_ERROR(EINVAL
));
2244 VERIFY0(dmu_bonus_hold(rwa
->os
, drrs
->drr_object
, FTAG
, &db
));
2245 if ((err
= dmu_spill_hold_by_bonus(db
, FTAG
, &db_spill
)) != 0) {
2246 dmu_buf_rele(db
, FTAG
);
2250 tx
= dmu_tx_create(rwa
->os
);
2252 dmu_tx_hold_spill(tx
, db
->db_object
);
2254 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2256 dmu_buf_rele(db
, FTAG
);
2257 dmu_buf_rele(db_spill
, FTAG
);
2261 dmu_buf_will_dirty(db_spill
, tx
);
2263 if (db_spill
->db_size
< drrs
->drr_length
)
2264 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
2265 drrs
->drr_length
, tx
));
2266 bcopy(data
, db_spill
->db_data
, drrs
->drr_length
);
2268 dmu_buf_rele(db
, FTAG
);
2269 dmu_buf_rele(db_spill
, FTAG
);
2277 receive_free(struct receive_writer_arg
*rwa
, struct drr_free
*drrf
)
2281 if (drrf
->drr_length
!= -1ULL &&
2282 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
2283 return (SET_ERROR(EINVAL
));
2285 if (dmu_object_info(rwa
->os
, drrf
->drr_object
, NULL
) != 0)
2286 return (SET_ERROR(EINVAL
));
2288 err
= dmu_free_long_range(rwa
->os
, drrf
->drr_object
,
2289 drrf
->drr_offset
, drrf
->drr_length
);
2294 /* used to destroy the drc_ds on error */
2296 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
2298 if (drc
->drc_resumable
) {
2299 /* wait for our resume state to be written to disk */
2300 txg_wait_synced(drc
->drc_ds
->ds_dir
->dd_pool
, 0);
2301 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
2303 char name
[MAXNAMELEN
];
2304 dsl_dataset_name(drc
->drc_ds
, name
);
2305 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
2306 (void) dsl_destroy_head(name
);
2311 receive_cksum(struct receive_arg
*ra
, int len
, void *buf
)
2314 fletcher_4_incremental_byteswap(buf
, len
, &ra
->cksum
);
2316 fletcher_4_incremental_native(buf
, len
, &ra
->cksum
);
2321 * Read the payload into a buffer of size len, and update the current record's
2323 * Allocate ra->next_rrd and read the next record's header into
2324 * ra->next_rrd->header.
2325 * Verify checksum of payload and next record.
2328 receive_read_payload_and_next_header(struct receive_arg
*ra
, int len
, void *buf
)
2333 ASSERT3U(len
, <=, SPA_MAXBLOCKSIZE
);
2334 err
= receive_read(ra
, len
, buf
);
2337 receive_cksum(ra
, len
, buf
);
2339 /* note: rrd is NULL when reading the begin record's payload */
2340 if (ra
->rrd
!= NULL
) {
2341 ra
->rrd
->payload
= buf
;
2342 ra
->rrd
->payload_size
= len
;
2343 ra
->rrd
->bytes_read
= ra
->bytes_read
;
2347 ra
->prev_cksum
= ra
->cksum
;
2349 ra
->next_rrd
= kmem_zalloc(sizeof (*ra
->next_rrd
), KM_SLEEP
);
2350 err
= receive_read(ra
, sizeof (ra
->next_rrd
->header
),
2351 &ra
->next_rrd
->header
);
2352 ra
->next_rrd
->bytes_read
= ra
->bytes_read
;
2354 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2355 ra
->next_rrd
= NULL
;
2358 if (ra
->next_rrd
->header
.drr_type
== DRR_BEGIN
) {
2359 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2360 ra
->next_rrd
= NULL
;
2361 return (SET_ERROR(EINVAL
));
2365 * Note: checksum is of everything up to but not including the
2368 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2369 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
2371 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2372 &ra
->next_rrd
->header
);
2374 zio_cksum_t cksum_orig
=
2375 ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2376 zio_cksum_t
*cksump
=
2377 &ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2380 byteswap_record(&ra
->next_rrd
->header
);
2382 if ((!ZIO_CHECKSUM_IS_ZERO(cksump
)) &&
2383 !ZIO_CHECKSUM_EQUAL(ra
->cksum
, *cksump
)) {
2384 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2385 ra
->next_rrd
= NULL
;
2386 return (SET_ERROR(ECKSUM
));
2389 receive_cksum(ra
, sizeof (cksum_orig
), &cksum_orig
);
2395 objlist_create(struct objlist
*list
)
2397 list_create(&list
->list
, sizeof (struct receive_objnode
),
2398 offsetof(struct receive_objnode
, node
));
2399 list
->last_lookup
= 0;
2403 objlist_destroy(struct objlist
*list
)
2405 for (struct receive_objnode
*n
= list_remove_head(&list
->list
);
2406 n
!= NULL
; n
= list_remove_head(&list
->list
)) {
2407 kmem_free(n
, sizeof (*n
));
2409 list_destroy(&list
->list
);
2413 * This function looks through the objlist to see if the specified object number
2414 * is contained in the objlist. In the process, it will remove all object
2415 * numbers in the list that are smaller than the specified object number. Thus,
2416 * any lookup of an object number smaller than a previously looked up object
2417 * number will always return false; therefore, all lookups should be done in
2421 objlist_exists(struct objlist
*list
, uint64_t object
)
2423 struct receive_objnode
*node
= list_head(&list
->list
);
2424 ASSERT3U(object
, >=, list
->last_lookup
);
2425 list
->last_lookup
= object
;
2426 while (node
!= NULL
&& node
->object
< object
) {
2427 VERIFY3P(node
, ==, list_remove_head(&list
->list
));
2428 kmem_free(node
, sizeof (*node
));
2429 node
= list_head(&list
->list
);
2431 return (node
!= NULL
&& node
->object
== object
);
2435 * The objlist is a list of object numbers stored in ascending order. However,
2436 * the insertion of new object numbers does not seek out the correct location to
2437 * store a new object number; instead, it appends it to the list for simplicity.
2438 * Thus, any users must take care to only insert new object numbers in ascending
2442 objlist_insert(struct objlist
*list
, uint64_t object
)
2444 struct receive_objnode
*node
= kmem_zalloc(sizeof (*node
), KM_SLEEP
);
2445 node
->object
= object
;
2447 struct receive_objnode
*last_object
= list_tail(&list
->list
);
2448 uint64_t last_objnum
= (last_object
!= NULL
? last_object
->object
: 0);
2449 ASSERT3U(node
->object
, >, last_objnum
);
2451 list_insert_tail(&list
->list
, node
);
2455 * Issue the prefetch reads for any necessary indirect blocks.
2457 * We use the object ignore list to tell us whether or not to issue prefetches
2458 * for a given object. We do this for both correctness (in case the blocksize
2459 * of an object has changed) and performance (if the object doesn't exist, don't
2460 * needlessly try to issue prefetches). We also trim the list as we go through
2461 * the stream to prevent it from growing to an unbounded size.
2463 * The object numbers within will always be in sorted order, and any write
2464 * records we see will also be in sorted order, but they're not sorted with
2465 * respect to each other (i.e. we can get several object records before
2466 * receiving each object's write records). As a result, once we've reached a
2467 * given object number, we can safely remove any reference to lower object
2468 * numbers in the ignore list. In practice, we receive up to 32 object records
2469 * before receiving write records, so the list can have up to 32 nodes in it.
2473 receive_read_prefetch(struct receive_arg
*ra
,
2474 uint64_t object
, uint64_t offset
, uint64_t length
)
2476 if (!objlist_exists(&ra
->ignore_objlist
, object
)) {
2477 dmu_prefetch(ra
->os
, object
, 1, offset
, length
,
2478 ZIO_PRIORITY_SYNC_READ
);
2483 * Read records off the stream, issuing any necessary prefetches.
2486 receive_read_record(struct receive_arg
*ra
)
2490 switch (ra
->rrd
->header
.drr_type
) {
2493 struct drr_object
*drro
= &ra
->rrd
->header
.drr_u
.drr_object
;
2494 uint32_t size
= P2ROUNDUP(drro
->drr_bonuslen
, 8);
2495 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2496 dmu_object_info_t doi
;
2497 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
2499 kmem_free(buf
, size
);
2502 err
= dmu_object_info(ra
->os
, drro
->drr_object
, &doi
);
2504 * See receive_read_prefetch for an explanation why we're
2505 * storing this object in the ignore_obj_list.
2507 if (err
== ENOENT
||
2508 (err
== 0 && doi
.doi_data_block_size
!= drro
->drr_blksz
)) {
2509 objlist_insert(&ra
->ignore_objlist
, drro
->drr_object
);
2514 case DRR_FREEOBJECTS
:
2516 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2521 struct drr_write
*drrw
= &ra
->rrd
->header
.drr_u
.drr_write
;
2522 arc_buf_t
*abuf
= arc_loan_buf(dmu_objset_spa(ra
->os
),
2525 err
= receive_read_payload_and_next_header(ra
,
2526 drrw
->drr_length
, abuf
->b_data
);
2528 dmu_return_arcbuf(abuf
);
2531 ra
->rrd
->write_buf
= abuf
;
2532 receive_read_prefetch(ra
, drrw
->drr_object
, drrw
->drr_offset
,
2536 case DRR_WRITE_BYREF
:
2538 struct drr_write_byref
*drrwb
=
2539 &ra
->rrd
->header
.drr_u
.drr_write_byref
;
2540 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2541 receive_read_prefetch(ra
, drrwb
->drr_object
, drrwb
->drr_offset
,
2545 case DRR_WRITE_EMBEDDED
:
2547 struct drr_write_embedded
*drrwe
=
2548 &ra
->rrd
->header
.drr_u
.drr_write_embedded
;
2549 uint32_t size
= P2ROUNDUP(drrwe
->drr_psize
, 8);
2550 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2552 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
2554 kmem_free(buf
, size
);
2558 receive_read_prefetch(ra
, drrwe
->drr_object
, drrwe
->drr_offset
,
2565 * It might be beneficial to prefetch indirect blocks here, but
2566 * we don't really have the data to decide for sure.
2568 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2573 struct drr_end
*drre
= &ra
->rrd
->header
.drr_u
.drr_end
;
2574 if (!ZIO_CHECKSUM_EQUAL(ra
->prev_cksum
, drre
->drr_checksum
))
2575 return (SET_ERROR(ECKSUM
));
2580 struct drr_spill
*drrs
= &ra
->rrd
->header
.drr_u
.drr_spill
;
2581 void *buf
= kmem_zalloc(drrs
->drr_length
, KM_SLEEP
);
2582 err
= receive_read_payload_and_next_header(ra
, drrs
->drr_length
,
2585 kmem_free(buf
, drrs
->drr_length
);
2589 return (SET_ERROR(EINVAL
));
2594 * Commit the records to the pool.
2597 receive_process_record(struct receive_writer_arg
*rwa
,
2598 struct receive_record_arg
*rrd
)
2602 /* Processing in order, therefore bytes_read should be increasing. */
2603 ASSERT3U(rrd
->bytes_read
, >=, rwa
->bytes_read
);
2604 rwa
->bytes_read
= rrd
->bytes_read
;
2606 switch (rrd
->header
.drr_type
) {
2609 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2610 err
= receive_object(rwa
, drro
, rrd
->payload
);
2611 kmem_free(rrd
->payload
, rrd
->payload_size
);
2612 rrd
->payload
= NULL
;
2615 case DRR_FREEOBJECTS
:
2617 struct drr_freeobjects
*drrfo
=
2618 &rrd
->header
.drr_u
.drr_freeobjects
;
2619 return (receive_freeobjects(rwa
, drrfo
));
2623 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2624 err
= receive_write(rwa
, drrw
, rrd
->write_buf
);
2625 /* if receive_write() is successful, it consumes the arc_buf */
2627 dmu_return_arcbuf(rrd
->write_buf
);
2628 rrd
->write_buf
= NULL
;
2629 rrd
->payload
= NULL
;
2632 case DRR_WRITE_BYREF
:
2634 struct drr_write_byref
*drrwbr
=
2635 &rrd
->header
.drr_u
.drr_write_byref
;
2636 return (receive_write_byref(rwa
, drrwbr
));
2638 case DRR_WRITE_EMBEDDED
:
2640 struct drr_write_embedded
*drrwe
=
2641 &rrd
->header
.drr_u
.drr_write_embedded
;
2642 err
= receive_write_embedded(rwa
, drrwe
, rrd
->payload
);
2643 kmem_free(rrd
->payload
, rrd
->payload_size
);
2644 rrd
->payload
= NULL
;
2649 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
2650 return (receive_free(rwa
, drrf
));
2654 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
2655 err
= receive_spill(rwa
, drrs
, rrd
->payload
);
2656 kmem_free(rrd
->payload
, rrd
->payload_size
);
2657 rrd
->payload
= NULL
;
2661 return (SET_ERROR(EINVAL
));
2666 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2667 * receive_process_record When we're done, signal the main thread and exit.
2670 receive_writer_thread(void *arg
)
2672 struct receive_writer_arg
*rwa
= arg
;
2673 struct receive_record_arg
*rrd
;
2674 for (rrd
= bqueue_dequeue(&rwa
->q
); !rrd
->eos_marker
;
2675 rrd
= bqueue_dequeue(&rwa
->q
)) {
2677 * If there's an error, the main thread will stop putting things
2678 * on the queue, but we need to clear everything in it before we
2681 if (rwa
->err
== 0) {
2682 rwa
->err
= receive_process_record(rwa
, rrd
);
2683 } else if (rrd
->write_buf
!= NULL
) {
2684 dmu_return_arcbuf(rrd
->write_buf
);
2685 rrd
->write_buf
= NULL
;
2686 rrd
->payload
= NULL
;
2687 } else if (rrd
->payload
!= NULL
) {
2688 kmem_free(rrd
->payload
, rrd
->payload_size
);
2689 rrd
->payload
= NULL
;
2691 kmem_free(rrd
, sizeof (*rrd
));
2693 kmem_free(rrd
, sizeof (*rrd
));
2694 mutex_enter(&rwa
->mutex
);
2696 cv_signal(&rwa
->cv
);
2697 mutex_exit(&rwa
->mutex
);
2701 resume_check(struct receive_arg
*ra
, nvlist_t
*begin_nvl
)
2704 objset_t
*mos
= dmu_objset_pool(ra
->os
)->dp_meta_objset
;
2705 uint64_t dsobj
= dmu_objset_id(ra
->os
);
2706 uint64_t resume_obj
, resume_off
;
2708 if (nvlist_lookup_uint64(begin_nvl
,
2709 "resume_object", &resume_obj
) != 0 ||
2710 nvlist_lookup_uint64(begin_nvl
,
2711 "resume_offset", &resume_off
) != 0) {
2712 return (SET_ERROR(EINVAL
));
2714 VERIFY0(zap_lookup(mos
, dsobj
,
2715 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
));
2716 if (resume_obj
!= val
)
2717 return (SET_ERROR(EINVAL
));
2718 VERIFY0(zap_lookup(mos
, dsobj
,
2719 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
));
2720 if (resume_off
!= val
)
2721 return (SET_ERROR(EINVAL
));
2727 * Read in the stream's records, one by one, and apply them to the pool. There
2728 * are two threads involved; the thread that calls this function will spin up a
2729 * worker thread, read the records off the stream one by one, and issue
2730 * prefetches for any necessary indirect blocks. It will then push the records
2731 * onto an internal blocking queue. The worker thread will pull the records off
2732 * the queue, and actually write the data into the DMU. This way, the worker
2733 * thread doesn't have to wait for reads to complete, since everything it needs
2734 * (the indirect blocks) will be prefetched.
2736 * NB: callers *must* call dmu_recv_end() if this succeeds.
2739 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
2740 int cleanup_fd
, uint64_t *action_handlep
)
2743 struct receive_arg ra
= { 0 };
2744 struct receive_writer_arg rwa
= { 0 };
2746 nvlist_t
*begin_nvl
= NULL
;
2748 ra
.byteswap
= drc
->drc_byteswap
;
2749 ra
.cksum
= drc
->drc_cksum
;
2753 if (dsl_dataset_is_zapified(drc
->drc_ds
)) {
2754 (void) zap_lookup(drc
->drc_ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2755 drc
->drc_ds
->ds_object
, DS_FIELD_RESUME_BYTES
,
2756 sizeof (ra
.bytes_read
), 1, &ra
.bytes_read
);
2759 objlist_create(&ra
.ignore_objlist
);
2761 /* these were verified in dmu_recv_begin */
2762 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
2764 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
2767 * Open the objset we are modifying.
2769 VERIFY0(dmu_objset_from_ds(drc
->drc_ds
, &ra
.os
));
2771 ASSERT(dsl_dataset_phys(drc
->drc_ds
)->ds_flags
& DS_FLAG_INCONSISTENT
);
2773 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
2775 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2776 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
2779 if (cleanup_fd
== -1) {
2780 ra
.err
= SET_ERROR(EBADF
);
2783 ra
.err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
2789 if (*action_handlep
== 0) {
2790 rwa
.guid_to_ds_map
=
2791 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
2792 avl_create(rwa
.guid_to_ds_map
, guid_compare
,
2793 sizeof (guid_map_entry_t
),
2794 offsetof(guid_map_entry_t
, avlnode
));
2795 err
= zfs_onexit_add_cb(minor
,
2796 free_guid_map_onexit
, rwa
.guid_to_ds_map
,
2801 err
= zfs_onexit_cb_data(minor
, *action_handlep
,
2802 (void **)&rwa
.guid_to_ds_map
);
2807 drc
->drc_guid_to_ds_map
= rwa
.guid_to_ds_map
;
2810 uint32_t payloadlen
= drc
->drc_drr_begin
->drr_payloadlen
;
2811 void *payload
= NULL
;
2812 if (payloadlen
!= 0)
2813 payload
= kmem_alloc(payloadlen
, KM_SLEEP
);
2815 err
= receive_read_payload_and_next_header(&ra
, payloadlen
, payload
);
2817 if (payloadlen
!= 0)
2818 kmem_free(payload
, payloadlen
);
2821 if (payloadlen
!= 0) {
2822 err
= nvlist_unpack(payload
, payloadlen
, &begin_nvl
, KM_SLEEP
);
2823 kmem_free(payload
, payloadlen
);
2828 if (featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
2829 err
= resume_check(&ra
, begin_nvl
);
2834 (void) bqueue_init(&rwa
.q
, zfs_recv_queue_length
,
2835 offsetof(struct receive_record_arg
, node
));
2836 cv_init(&rwa
.cv
, NULL
, CV_DEFAULT
, NULL
);
2837 mutex_init(&rwa
.mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
2839 rwa
.byteswap
= drc
->drc_byteswap
;
2840 rwa
.resumable
= drc
->drc_resumable
;
2842 (void) thread_create(NULL
, 0, receive_writer_thread
, &rwa
, 0, curproc
,
2843 TS_RUN
, minclsyspri
);
2845 * We're reading rwa.err without locks, which is safe since we are the
2846 * only reader, and the worker thread is the only writer. It's ok if we
2847 * miss a write for an iteration or two of the loop, since the writer
2848 * thread will keep freeing records we send it until we send it an eos
2851 * We can leave this loop in 3 ways: First, if rwa.err is
2852 * non-zero. In that case, the writer thread will free the rrd we just
2853 * pushed. Second, if we're interrupted; in that case, either it's the
2854 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2855 * has been handed off to the writer thread who will free it. Finally,
2856 * if receive_read_record fails or we're at the end of the stream, then
2857 * we free ra.rrd and exit.
2859 while (rwa
.err
== 0) {
2860 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
2861 err
= SET_ERROR(EINTR
);
2865 ASSERT3P(ra
.rrd
, ==, NULL
);
2866 ra
.rrd
= ra
.next_rrd
;
2868 /* Allocates and loads header into ra.next_rrd */
2869 err
= receive_read_record(&ra
);
2871 if (ra
.rrd
->header
.drr_type
== DRR_END
|| err
!= 0) {
2872 kmem_free(ra
.rrd
, sizeof (*ra
.rrd
));
2877 bqueue_enqueue(&rwa
.q
, ra
.rrd
,
2878 sizeof (struct receive_record_arg
) + ra
.rrd
->payload_size
);
2881 if (ra
.next_rrd
== NULL
)
2882 ra
.next_rrd
= kmem_zalloc(sizeof (*ra
.next_rrd
), KM_SLEEP
);
2883 ra
.next_rrd
->eos_marker
= B_TRUE
;
2884 bqueue_enqueue(&rwa
.q
, ra
.next_rrd
, 1);
2886 mutex_enter(&rwa
.mutex
);
2888 cv_wait(&rwa
.cv
, &rwa
.mutex
);
2890 mutex_exit(&rwa
.mutex
);
2892 cv_destroy(&rwa
.cv
);
2893 mutex_destroy(&rwa
.mutex
);
2894 bqueue_destroy(&rwa
.q
);
2899 nvlist_free(begin_nvl
);
2900 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
2901 zfs_onexit_fd_rele(cleanup_fd
);
2905 * Clean up references. If receive is not resumable,
2906 * destroy what we created, so we don't leave it in
2907 * the inconsistent state.
2909 dmu_recv_cleanup_ds(drc
);
2913 objlist_destroy(&ra
.ignore_objlist
);
2918 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
2920 dmu_recv_cookie_t
*drc
= arg
;
2921 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2924 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
2926 if (!drc
->drc_newfs
) {
2927 dsl_dataset_t
*origin_head
;
2929 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
2932 if (drc
->drc_force
) {
2934 * We will destroy any snapshots in tofs (i.e. before
2935 * origin_head) that are after the origin (which is
2936 * the snap before drc_ds, because drc_ds can not
2937 * have any snaps of its own).
2941 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
2943 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
2944 dsl_dataset_t
*snap
;
2945 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
2949 if (snap
->ds_dir
!= origin_head
->ds_dir
)
2950 error
= SET_ERROR(EINVAL
);
2952 error
= dsl_destroy_snapshot_check_impl(
2955 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
2956 dsl_dataset_rele(snap
, FTAG
);
2961 dsl_dataset_rele(origin_head
, FTAG
);
2965 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
2966 origin_head
, drc
->drc_force
, drc
->drc_owner
, tx
);
2968 dsl_dataset_rele(origin_head
, FTAG
);
2971 error
= dsl_dataset_snapshot_check_impl(origin_head
,
2972 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
2973 dsl_dataset_rele(origin_head
, FTAG
);
2977 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
2979 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
2980 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
2986 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
2988 dmu_recv_cookie_t
*drc
= arg
;
2989 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2991 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
2992 tx
, "snap=%s", drc
->drc_tosnap
);
2994 if (!drc
->drc_newfs
) {
2995 dsl_dataset_t
*origin_head
;
2997 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
3000 if (drc
->drc_force
) {
3002 * Destroy any snapshots of drc_tofs (origin_head)
3003 * after the origin (the snap before drc_ds).
3007 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3009 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3010 dsl_dataset_t
*snap
;
3011 VERIFY0(dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3013 ASSERT3P(snap
->ds_dir
, ==, origin_head
->ds_dir
);
3014 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3015 dsl_destroy_snapshot_sync_impl(snap
,
3017 dsl_dataset_rele(snap
, FTAG
);
3020 VERIFY3P(drc
->drc_ds
->ds_prev
, ==,
3021 origin_head
->ds_prev
);
3023 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
3025 dsl_dataset_snapshot_sync_impl(origin_head
,
3026 drc
->drc_tosnap
, tx
);
3028 /* set snapshot's creation time and guid */
3029 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
3030 dsl_dataset_phys(origin_head
->ds_prev
)->ds_creation_time
=
3031 drc
->drc_drrb
->drr_creation_time
;
3032 dsl_dataset_phys(origin_head
->ds_prev
)->ds_guid
=
3033 drc
->drc_drrb
->drr_toguid
;
3034 dsl_dataset_phys(origin_head
->ds_prev
)->ds_flags
&=
3035 ~DS_FLAG_INCONSISTENT
;
3037 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
3038 dsl_dataset_phys(origin_head
)->ds_flags
&=
3039 ~DS_FLAG_INCONSISTENT
;
3041 dsl_dataset_rele(origin_head
, FTAG
);
3042 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
3044 if (drc
->drc_owner
!= NULL
)
3045 VERIFY3P(origin_head
->ds_owner
, ==, drc
->drc_owner
);
3047 dsl_dataset_t
*ds
= drc
->drc_ds
;
3049 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
3051 /* set snapshot's creation time and guid */
3052 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
3053 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_time
=
3054 drc
->drc_drrb
->drr_creation_time
;
3055 dsl_dataset_phys(ds
->ds_prev
)->ds_guid
=
3056 drc
->drc_drrb
->drr_toguid
;
3057 dsl_dataset_phys(ds
->ds_prev
)->ds_flags
&=
3058 ~DS_FLAG_INCONSISTENT
;
3060 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3061 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
3062 if (dsl_dataset_has_resume_receive_state(ds
)) {
3063 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3064 DS_FIELD_RESUME_FROMGUID
, tx
);
3065 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3066 DS_FIELD_RESUME_OBJECT
, tx
);
3067 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3068 DS_FIELD_RESUME_OFFSET
, tx
);
3069 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3070 DS_FIELD_RESUME_BYTES
, tx
);
3071 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3072 DS_FIELD_RESUME_TOGUID
, tx
);
3073 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3074 DS_FIELD_RESUME_TONAME
, tx
);
3077 drc
->drc_newsnapobj
= dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
;
3079 * Release the hold from dmu_recv_begin. This must be done before
3080 * we return to open context, so that when we free the dataset's dnode,
3081 * we can evict its bonus buffer.
3083 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
3088 add_ds_to_guidmap(const char *name
, avl_tree_t
*guid_map
, uint64_t snapobj
)
3091 dsl_dataset_t
*snapds
;
3092 guid_map_entry_t
*gmep
;
3095 ASSERT(guid_map
!= NULL
);
3097 err
= dsl_pool_hold(name
, FTAG
, &dp
);
3100 gmep
= kmem_alloc(sizeof (*gmep
), KM_SLEEP
);
3101 err
= dsl_dataset_hold_obj(dp
, snapobj
, gmep
, &snapds
);
3103 gmep
->guid
= dsl_dataset_phys(snapds
)->ds_guid
;
3104 gmep
->gme_ds
= snapds
;
3105 avl_add(guid_map
, gmep
);
3106 dsl_dataset_long_hold(snapds
, gmep
);
3108 kmem_free(gmep
, sizeof (*gmep
));
3111 dsl_pool_rele(dp
, FTAG
);
3115 static int dmu_recv_end_modified_blocks
= 3;
3118 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
3121 char name
[MAXNAMELEN
];
3125 * We will be destroying the ds; make sure its origin is unmounted if
3128 dsl_dataset_name(drc
->drc_ds
, name
);
3129 zfs_destroy_unmount_origin(name
);
3132 error
= dsl_sync_task(drc
->drc_tofs
,
3133 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3134 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
);
3137 dmu_recv_cleanup_ds(drc
);
3142 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
3146 error
= dsl_sync_task(drc
->drc_tofs
,
3147 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3148 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
);
3151 dmu_recv_cleanup_ds(drc
);
3152 } else if (drc
->drc_guid_to_ds_map
!= NULL
) {
3153 (void) add_ds_to_guidmap(drc
->drc_tofs
,
3154 drc
->drc_guid_to_ds_map
,
3155 drc
->drc_newsnapobj
);
3161 dmu_recv_end(dmu_recv_cookie_t
*drc
, void *owner
)
3163 drc
->drc_owner
= owner
;
3166 return (dmu_recv_new_end(drc
));
3168 return (dmu_recv_existing_end(drc
));
3172 * Return TRUE if this objset is currently being received into.
3175 dmu_objset_is_receiving(objset_t
*os
)
3177 return (os
->os_dsl_dataset
!= NULL
&&
3178 os
->os_dsl_dataset
->ds_owner
== dmu_recv_tag
);