4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
20 #include <sys/zfs_context.h>
22 #include <sys/spa_impl.h>
23 #include <sys/vdev_impl.h>
24 #include <sys/fs/zfs.h>
26 #include <sys/zio_checksum.h>
27 #include <sys/metaslab.h>
28 #include <sys/refcount.h>
30 #include <sys/vdev_indirect_mapping.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_synctask.h>
38 * An indirect vdev corresponds to a vdev that has been removed. Since
39 * we cannot rewrite block pointers of snapshots, etc., we keep a
40 * mapping from old location on the removed device to the new location
41 * on another device in the pool and use this mapping whenever we need
42 * to access the DVA. Unfortunately, this mapping did not respect
43 * logical block boundaries when it was first created, and so a DVA on
44 * this indirect vdev may be "split" into multiple sections that each
45 * map to a different location. As a consequence, not all DVAs can be
46 * translated to an equivalent new DVA. Instead we must provide a
47 * "vdev_remap" operation that executes a callback on each contiguous
48 * segment of the new location. This function is used in multiple ways:
50 * - i/os to this vdev use the callback to determine where the
51 * data is now located, and issue child i/os for each segment's new
54 * - frees and claims to this vdev use the callback to free or claim
55 * each mapped segment. (Note that we don't actually need to claim
56 * log blocks on indirect vdevs, because we don't allocate to
57 * removing vdevs. However, zdb uses zio_claim() for its leak
62 * "Big theory statement" for how we mark blocks obsolete.
64 * When a block on an indirect vdev is freed or remapped, a section of
65 * that vdev's mapping may no longer be referenced (aka "obsolete"). We
66 * keep track of how much of each mapping entry is obsolete. When
67 * an entry becomes completely obsolete, we can remove it, thus reducing
68 * the memory used by the mapping. The complete picture of obsolescence
69 * is given by the following data structures, described below:
70 * - the entry-specific obsolete count
71 * - the vdev-specific obsolete spacemap
72 * - the pool-specific obsolete bpobj
74 * == On disk data structures used ==
76 * We track the obsolete space for the pool using several objects. Each
77 * of these objects is created on demand and freed when no longer
78 * needed, and is assumed to be empty if it does not exist.
79 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
81 * - Each vic_mapping_object (associated with an indirect vdev) can
82 * have a vimp_counts_object. This is an array of uint32_t's
83 * with the same number of entries as the vic_mapping_object. When
84 * the mapping is condensed, entries from the vic_obsolete_sm_object
85 * (see below) are folded into the counts. Therefore, each
86 * obsolete_counts entry tells us the number of bytes in the
87 * corresponding mapping entry that were not referenced when the
88 * mapping was last condensed.
90 * - Each indirect or removing vdev can have a vic_obsolete_sm_object.
91 * This is a space map containing an alloc entry for every DVA that
92 * has been obsoleted since the last time this indirect vdev was
93 * condensed. We use this object in order to improve performance
94 * when marking a DVA as obsolete. Instead of modifying an arbitrary
95 * offset of the vimp_counts_object, we only need to append an entry
96 * to the end of this object. When a DVA becomes obsolete, it is
97 * added to the obsolete space map. This happens when the DVA is
98 * freed, remapped and not referenced by a snapshot, or the last
99 * snapshot referencing it is destroyed.
101 * - Each dataset can have a ds_remap_deadlist object. This is a
102 * deadlist object containing all blocks that were remapped in this
103 * dataset but referenced in a previous snapshot. Blocks can *only*
104 * appear on this list if they were remapped (dsl_dataset_block_remapped);
105 * blocks that were killed in a head dataset are put on the normal
106 * ds_deadlist and marked obsolete when they are freed.
108 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks
109 * in the pool that need to be marked obsolete. When a snapshot is
110 * destroyed, we move some of the ds_remap_deadlist to the obsolete
111 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
112 * asynchronously process the obsolete bpobj, moving its entries to
113 * the specific vdevs' obsolete space maps.
115 * == Summary of how we mark blocks as obsolete ==
117 * - When freeing a block: if any DVA is on an indirect vdev, append to
118 * vic_obsolete_sm_object.
119 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
120 * references; otherwise append to vic_obsolete_sm_object).
121 * - When freeing a snapshot: move parts of ds_remap_deadlist to
122 * dp_obsolete_bpobj (same algorithm as ds_deadlist).
123 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
124 * individual vdev's vic_obsolete_sm_object.
128 * "Big theory statement" for how we condense indirect vdevs.
130 * Condensing an indirect vdev's mapping is the process of determining
131 * the precise counts of obsolete space for each mapping entry (by
132 * integrating the obsolete spacemap into the obsolete counts) and
133 * writing out a new mapping that contains only referenced entries.
135 * We condense a vdev when we expect the mapping to shrink (see
136 * vdev_indirect_should_condense()), but only perform one condense at a
137 * time to limit the memory usage. In addition, we use a separate
138 * open-context thread (spa_condense_indirect_thread) to incrementally
139 * create the new mapping object in a way that minimizes the impact on
140 * the rest of the system.
142 * == Generating a new mapping ==
144 * To generate a new mapping, we follow these steps:
146 * 1. Save the old obsolete space map and create a new mapping object
147 * (see spa_condense_indirect_start_sync()). This initializes the
148 * spa_condensing_indirect_phys with the "previous obsolete space map",
149 * which is now read only. Newly obsolete DVAs will be added to a
150 * new (initially empty) obsolete space map, and will not be
151 * considered as part of this condense operation.
153 * 2. Construct in memory the precise counts of obsolete space for each
154 * mapping entry, by incorporating the obsolete space map into the
155 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
157 * 3. Iterate through each mapping entry, writing to the new mapping any
158 * entries that are not completely obsolete (i.e. which don't have
159 * obsolete count == mapping length). (See
160 * spa_condense_indirect_generate_new_mapping().)
162 * 4. Destroy the old mapping object and switch over to the new one
163 * (spa_condense_indirect_complete_sync).
165 * == Restarting from failure ==
167 * To restart the condense when we import/open the pool, we must start
168 * at the 2nd step above: reconstruct the precise counts in memory,
169 * based on the space map + counts. Then in the 3rd step, we start
170 * iterating where we left off: at vimp_max_offset of the new mapping
174 boolean_t zfs_condense_indirect_vdevs_enable
= B_TRUE
;
177 * Condense if at least this percent of the bytes in the mapping is
178 * obsolete. With the default of 25%, the amount of space mapped
179 * will be reduced to 1% of its original size after at most 16
180 * condenses. Higher values will condense less often (causing less
181 * i/o); lower values will reduce the mapping size more quickly.
183 int zfs_indirect_condense_obsolete_pct
= 25;
186 * Condense if the obsolete space map takes up more than this amount of
187 * space on disk (logically). This limits the amount of disk space
188 * consumed by the obsolete space map; the default of 1GB is small enough
189 * that we typically don't mind "wasting" it.
191 uint64_t zfs_condense_max_obsolete_bytes
= 1024 * 1024 * 1024;
194 * Don't bother condensing if the mapping uses less than this amount of
195 * memory. The default of 128KB is considered a "trivial" amount of
196 * memory and not worth reducing.
198 uint64_t zfs_condense_min_mapping_bytes
= 128 * 1024;
201 * This is used by the test suite so that it can ensure that certain
202 * actions happen while in the middle of a condense (which might otherwise
203 * complete too quickly). If used to reduce the performance impact of
204 * condensing in production, a maximum value of 1 should be sufficient.
206 int zfs_condense_indirect_commit_entry_delay_ticks
= 0;
209 * If a split block contains more than this many segments, consider it too
210 * computationally expensive to check all (2^num_segments) possible
211 * combinations. Instead, try at most 2^_segments_max randomly-selected
214 * This is reasonable if only a few segment copies are damaged and the
215 * majority of segment copies are good. This allows all the segment copies to
216 * participate fairly in the reconstruction and prevents the repeated use of
219 int zfs_reconstruct_indirect_segments_max
= 10;
222 * The indirect_child_t represents the vdev that we will read from, when we
223 * need to read all copies of the data (e.g. for scrub or reconstruction).
224 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
225 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
226 * ic_vdev is a child of the mirror.
228 typedef struct indirect_child
{
234 * The indirect_split_t represents one mapped segment of an i/o to the
235 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
236 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
237 * For split blocks, there will be several of these.
239 typedef struct indirect_split
{
240 list_node_t is_node
; /* link on iv_splits */
243 * is_split_offset is the offset into the i/o.
244 * This is the sum of the previous splits' is_size's.
246 uint64_t is_split_offset
;
248 vdev_t
*is_vdev
; /* top-level vdev */
249 uint64_t is_target_offset
; /* offset on is_vdev */
251 int is_children
; /* number of entries in is_child[] */
254 * is_good_child is the child that we are currently using to
255 * attempt reconstruction.
259 indirect_child_t is_child
[1]; /* variable-length */
263 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
264 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
266 typedef struct indirect_vsd
{
267 boolean_t iv_split_block
;
268 boolean_t iv_reconstruct
;
270 list_t iv_splits
; /* list of indirect_split_t's */
274 vdev_indirect_map_free(zio_t
*zio
)
276 indirect_vsd_t
*iv
= zio
->io_vsd
;
278 indirect_split_t
*is
;
279 while ((is
= list_head(&iv
->iv_splits
)) != NULL
) {
280 for (int c
= 0; c
< is
->is_children
; c
++) {
281 indirect_child_t
*ic
= &is
->is_child
[c
];
282 if (ic
->ic_data
!= NULL
)
283 abd_free(ic
->ic_data
);
285 list_remove(&iv
->iv_splits
, is
);
287 offsetof(indirect_split_t
, is_child
[is
->is_children
]));
289 kmem_free(iv
, sizeof (*iv
));
292 static const zio_vsd_ops_t vdev_indirect_vsd_ops
= {
293 vdev_indirect_map_free
,
294 zio_vsd_default_cksum_report
297 * Mark the given offset and size as being obsolete.
300 vdev_indirect_mark_obsolete(vdev_t
*vd
, uint64_t offset
, uint64_t size
)
302 spa_t
*spa
= vd
->vdev_spa
;
304 ASSERT3U(vd
->vdev_indirect_config
.vic_mapping_object
, !=, 0);
305 ASSERT(vd
->vdev_removing
|| vd
->vdev_ops
== &vdev_indirect_ops
);
307 VERIFY(vdev_indirect_mapping_entry_for_offset(
308 vd
->vdev_indirect_mapping
, offset
) != NULL
);
310 if (spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
)) {
311 mutex_enter(&vd
->vdev_obsolete_lock
);
312 range_tree_add(vd
->vdev_obsolete_segments
, offset
, size
);
313 mutex_exit(&vd
->vdev_obsolete_lock
);
314 vdev_dirty(vd
, 0, NULL
, spa_syncing_txg(spa
));
319 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
320 * wrapper is provided because the DMU does not know about vdev_t's and
321 * cannot directly call vdev_indirect_mark_obsolete.
324 spa_vdev_indirect_mark_obsolete(spa_t
*spa
, uint64_t vdev_id
, uint64_t offset
,
325 uint64_t size
, dmu_tx_t
*tx
)
327 vdev_t
*vd
= vdev_lookup_top(spa
, vdev_id
);
328 ASSERT(dmu_tx_is_syncing(tx
));
330 /* The DMU can only remap indirect vdevs. */
331 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
332 vdev_indirect_mark_obsolete(vd
, offset
, size
);
335 static spa_condensing_indirect_t
*
336 spa_condensing_indirect_create(spa_t
*spa
)
338 spa_condensing_indirect_phys_t
*scip
=
339 &spa
->spa_condensing_indirect_phys
;
340 spa_condensing_indirect_t
*sci
= kmem_zalloc(sizeof (*sci
), KM_SLEEP
);
341 objset_t
*mos
= spa
->spa_meta_objset
;
343 for (int i
= 0; i
< TXG_SIZE
; i
++) {
344 list_create(&sci
->sci_new_mapping_entries
[i
],
345 sizeof (vdev_indirect_mapping_entry_t
),
346 offsetof(vdev_indirect_mapping_entry_t
, vime_node
));
349 sci
->sci_new_mapping
=
350 vdev_indirect_mapping_open(mos
, scip
->scip_next_mapping_object
);
356 spa_condensing_indirect_destroy(spa_condensing_indirect_t
*sci
)
358 for (int i
= 0; i
< TXG_SIZE
; i
++)
359 list_destroy(&sci
->sci_new_mapping_entries
[i
]);
361 if (sci
->sci_new_mapping
!= NULL
)
362 vdev_indirect_mapping_close(sci
->sci_new_mapping
);
364 kmem_free(sci
, sizeof (*sci
));
368 vdev_indirect_should_condense(vdev_t
*vd
)
370 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
371 spa_t
*spa
= vd
->vdev_spa
;
373 ASSERT(dsl_pool_sync_context(spa
->spa_dsl_pool
));
375 if (!zfs_condense_indirect_vdevs_enable
)
379 * We can only condense one indirect vdev at a time.
381 if (spa
->spa_condensing_indirect
!= NULL
)
384 if (spa_shutting_down(spa
))
388 * The mapping object size must not change while we are
389 * condensing, so we can only condense indirect vdevs
390 * (not vdevs that are still in the middle of being removed).
392 if (vd
->vdev_ops
!= &vdev_indirect_ops
)
396 * If nothing new has been marked obsolete, there is no
397 * point in condensing.
399 if (vd
->vdev_obsolete_sm
== NULL
) {
400 ASSERT0(vdev_obsolete_sm_object(vd
));
404 ASSERT(vd
->vdev_obsolete_sm
!= NULL
);
406 ASSERT3U(vdev_obsolete_sm_object(vd
), ==,
407 space_map_object(vd
->vdev_obsolete_sm
));
409 uint64_t bytes_mapped
= vdev_indirect_mapping_bytes_mapped(vim
);
410 uint64_t bytes_obsolete
= space_map_allocated(vd
->vdev_obsolete_sm
);
411 uint64_t mapping_size
= vdev_indirect_mapping_size(vim
);
412 uint64_t obsolete_sm_size
= space_map_length(vd
->vdev_obsolete_sm
);
414 ASSERT3U(bytes_obsolete
, <=, bytes_mapped
);
417 * If a high percentage of the bytes that are mapped have become
418 * obsolete, condense (unless the mapping is already small enough).
419 * This has a good chance of reducing the amount of memory used
422 if (bytes_obsolete
* 100 / bytes_mapped
>=
423 zfs_indirect_condense_obsolete_pct
&&
424 mapping_size
> zfs_condense_min_mapping_bytes
) {
425 zfs_dbgmsg("should condense vdev %llu because obsolete "
426 "spacemap covers %d%% of %lluMB mapping",
427 (u_longlong_t
)vd
->vdev_id
,
428 (int)(bytes_obsolete
* 100 / bytes_mapped
),
429 (u_longlong_t
)bytes_mapped
/ 1024 / 1024);
434 * If the obsolete space map takes up too much space on disk,
435 * condense in order to free up this disk space.
437 if (obsolete_sm_size
>= zfs_condense_max_obsolete_bytes
) {
438 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
439 "length %lluMB >= max size %lluMB",
440 (u_longlong_t
)vd
->vdev_id
,
441 (u_longlong_t
)obsolete_sm_size
/ 1024 / 1024,
442 (u_longlong_t
)zfs_condense_max_obsolete_bytes
/
451 * This sync task completes (finishes) a condense, deleting the old
452 * mapping and replacing it with the new one.
455 spa_condense_indirect_complete_sync(void *arg
, dmu_tx_t
*tx
)
457 spa_condensing_indirect_t
*sci
= arg
;
458 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
459 spa_condensing_indirect_phys_t
*scip
=
460 &spa
->spa_condensing_indirect_phys
;
461 vdev_t
*vd
= vdev_lookup_top(spa
, scip
->scip_vdev
);
462 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
463 objset_t
*mos
= spa
->spa_meta_objset
;
464 vdev_indirect_mapping_t
*old_mapping
= vd
->vdev_indirect_mapping
;
465 uint64_t old_count
= vdev_indirect_mapping_num_entries(old_mapping
);
467 vdev_indirect_mapping_num_entries(sci
->sci_new_mapping
);
469 ASSERT(dmu_tx_is_syncing(tx
));
470 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
471 ASSERT3P(sci
, ==, spa
->spa_condensing_indirect
);
472 for (int i
= 0; i
< TXG_SIZE
; i
++) {
473 ASSERT(list_is_empty(&sci
->sci_new_mapping_entries
[i
]));
475 ASSERT(vic
->vic_mapping_object
!= 0);
476 ASSERT3U(vd
->vdev_id
, ==, scip
->scip_vdev
);
477 ASSERT(scip
->scip_next_mapping_object
!= 0);
478 ASSERT(scip
->scip_prev_obsolete_sm_object
!= 0);
481 * Reset vdev_indirect_mapping to refer to the new object.
483 rw_enter(&vd
->vdev_indirect_rwlock
, RW_WRITER
);
484 vdev_indirect_mapping_close(vd
->vdev_indirect_mapping
);
485 vd
->vdev_indirect_mapping
= sci
->sci_new_mapping
;
486 rw_exit(&vd
->vdev_indirect_rwlock
);
488 sci
->sci_new_mapping
= NULL
;
489 vdev_indirect_mapping_free(mos
, vic
->vic_mapping_object
, tx
);
490 vic
->vic_mapping_object
= scip
->scip_next_mapping_object
;
491 scip
->scip_next_mapping_object
= 0;
493 space_map_free_obj(mos
, scip
->scip_prev_obsolete_sm_object
, tx
);
494 spa_feature_decr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
495 scip
->scip_prev_obsolete_sm_object
= 0;
499 VERIFY0(zap_remove(mos
, DMU_POOL_DIRECTORY_OBJECT
,
500 DMU_POOL_CONDENSING_INDIRECT
, tx
));
501 spa_condensing_indirect_destroy(spa
->spa_condensing_indirect
);
502 spa
->spa_condensing_indirect
= NULL
;
504 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
505 "new mapping object %llu has %llu entries "
506 "(was %llu entries)",
507 vd
->vdev_id
, dmu_tx_get_txg(tx
), vic
->vic_mapping_object
,
508 new_count
, old_count
);
510 vdev_config_dirty(spa
->spa_root_vdev
);
514 * This sync task appends entries to the new mapping object.
517 spa_condense_indirect_commit_sync(void *arg
, dmu_tx_t
*tx
)
519 spa_condensing_indirect_t
*sci
= arg
;
520 uint64_t txg
= dmu_tx_get_txg(tx
);
521 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
523 ASSERT(dmu_tx_is_syncing(tx
));
524 ASSERT3P(sci
, ==, spa
->spa_condensing_indirect
);
526 vdev_indirect_mapping_add_entries(sci
->sci_new_mapping
,
527 &sci
->sci_new_mapping_entries
[txg
& TXG_MASK
], tx
);
528 ASSERT(list_is_empty(&sci
->sci_new_mapping_entries
[txg
& TXG_MASK
]));
532 * Open-context function to add one entry to the new mapping. The new
533 * entry will be remembered and written from syncing context.
536 spa_condense_indirect_commit_entry(spa_t
*spa
,
537 vdev_indirect_mapping_entry_phys_t
*vimep
, uint32_t count
)
539 spa_condensing_indirect_t
*sci
= spa
->spa_condensing_indirect
;
541 ASSERT3U(count
, <, DVA_GET_ASIZE(&vimep
->vimep_dst
));
543 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
544 dmu_tx_hold_space(tx
, sizeof (*vimep
) + sizeof (count
));
545 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
546 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
549 * If we are the first entry committed this txg, kick off the sync
550 * task to write to the MOS on our behalf.
552 if (list_is_empty(&sci
->sci_new_mapping_entries
[txgoff
])) {
553 dsl_sync_task_nowait(dmu_tx_pool(tx
),
554 spa_condense_indirect_commit_sync
, sci
,
555 0, ZFS_SPACE_CHECK_NONE
, tx
);
558 vdev_indirect_mapping_entry_t
*vime
=
559 kmem_alloc(sizeof (*vime
), KM_SLEEP
);
560 vime
->vime_mapping
= *vimep
;
561 vime
->vime_obsolete_count
= count
;
562 list_insert_tail(&sci
->sci_new_mapping_entries
[txgoff
], vime
);
568 spa_condense_indirect_generate_new_mapping(vdev_t
*vd
,
569 uint32_t *obsolete_counts
, uint64_t start_index
, zthr_t
*zthr
)
571 spa_t
*spa
= vd
->vdev_spa
;
572 uint64_t mapi
= start_index
;
573 vdev_indirect_mapping_t
*old_mapping
= vd
->vdev_indirect_mapping
;
574 uint64_t old_num_entries
=
575 vdev_indirect_mapping_num_entries(old_mapping
);
577 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
578 ASSERT3U(vd
->vdev_id
, ==, spa
->spa_condensing_indirect_phys
.scip_vdev
);
580 zfs_dbgmsg("starting condense of vdev %llu from index %llu",
581 (u_longlong_t
)vd
->vdev_id
,
584 while (mapi
< old_num_entries
) {
586 if (zthr_iscancelled(zthr
)) {
587 zfs_dbgmsg("pausing condense of vdev %llu "
588 "at index %llu", (u_longlong_t
)vd
->vdev_id
,
593 vdev_indirect_mapping_entry_phys_t
*entry
=
594 &old_mapping
->vim_entries
[mapi
];
595 uint64_t entry_size
= DVA_GET_ASIZE(&entry
->vimep_dst
);
596 ASSERT3U(obsolete_counts
[mapi
], <=, entry_size
);
597 if (obsolete_counts
[mapi
] < entry_size
) {
598 spa_condense_indirect_commit_entry(spa
, entry
,
599 obsolete_counts
[mapi
]);
602 * This delay may be requested for testing, debugging,
603 * or performance reasons.
605 delay(zfs_condense_indirect_commit_entry_delay_ticks
);
614 spa_condense_indirect_thread_check(void *arg
, zthr_t
*zthr
)
618 return (spa
->spa_condensing_indirect
!= NULL
);
623 spa_condense_indirect_thread(void *arg
, zthr_t
*zthr
)
628 ASSERT3P(spa
->spa_condensing_indirect
, !=, NULL
);
629 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
630 vd
= vdev_lookup_top(spa
, spa
->spa_condensing_indirect_phys
.scip_vdev
);
631 ASSERT3P(vd
, !=, NULL
);
632 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
634 spa_condensing_indirect_t
*sci
= spa
->spa_condensing_indirect
;
635 spa_condensing_indirect_phys_t
*scip
=
636 &spa
->spa_condensing_indirect_phys
;
638 uint64_t start_index
;
639 vdev_indirect_mapping_t
*old_mapping
= vd
->vdev_indirect_mapping
;
640 space_map_t
*prev_obsolete_sm
= NULL
;
642 ASSERT3U(vd
->vdev_id
, ==, scip
->scip_vdev
);
643 ASSERT(scip
->scip_next_mapping_object
!= 0);
644 ASSERT(scip
->scip_prev_obsolete_sm_object
!= 0);
645 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
647 for (int i
= 0; i
< TXG_SIZE
; i
++) {
649 * The list must start out empty in order for the
650 * _commit_sync() sync task to be properly registered
651 * on the first call to _commit_entry(); so it's wise
652 * to double check and ensure we actually are starting
655 ASSERT(list_is_empty(&sci
->sci_new_mapping_entries
[i
]));
658 VERIFY0(space_map_open(&prev_obsolete_sm
, spa
->spa_meta_objset
,
659 scip
->scip_prev_obsolete_sm_object
, 0, vd
->vdev_asize
, 0));
660 space_map_update(prev_obsolete_sm
);
661 counts
= vdev_indirect_mapping_load_obsolete_counts(old_mapping
);
662 if (prev_obsolete_sm
!= NULL
) {
663 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping
,
664 counts
, prev_obsolete_sm
);
666 space_map_close(prev_obsolete_sm
);
669 * Generate new mapping. Determine what index to continue from
670 * based on the max offset that we've already written in the
673 uint64_t max_offset
=
674 vdev_indirect_mapping_max_offset(sci
->sci_new_mapping
);
675 if (max_offset
== 0) {
676 /* We haven't written anything to the new mapping yet. */
680 * Pick up from where we left off. _entry_for_offset()
681 * returns a pointer into the vim_entries array. If
682 * max_offset is greater than any of the mappings
683 * contained in the table NULL will be returned and
684 * that indicates we've exhausted our iteration of the
688 vdev_indirect_mapping_entry_phys_t
*entry
=
689 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping
,
694 * We've already written the whole new mapping.
695 * This special value will cause us to skip the
696 * generate_new_mapping step and just do the sync
697 * task to complete the condense.
699 start_index
= UINT64_MAX
;
701 start_index
= entry
- old_mapping
->vim_entries
;
702 ASSERT3U(start_index
, <,
703 vdev_indirect_mapping_num_entries(old_mapping
));
707 spa_condense_indirect_generate_new_mapping(vd
, counts
,
710 vdev_indirect_mapping_free_obsolete_counts(old_mapping
, counts
);
713 * If the zthr has received a cancellation signal while running
714 * in generate_new_mapping() or at any point after that, then bail
715 * early. We don't want to complete the condense if the spa is
718 if (zthr_iscancelled(zthr
))
721 VERIFY0(dsl_sync_task(spa_name(spa
), NULL
,
722 spa_condense_indirect_complete_sync
, sci
, 0,
723 ZFS_SPACE_CHECK_EXTRA_RESERVED
));
729 * Sync task to begin the condensing process.
732 spa_condense_indirect_start_sync(vdev_t
*vd
, dmu_tx_t
*tx
)
734 spa_t
*spa
= vd
->vdev_spa
;
735 spa_condensing_indirect_phys_t
*scip
=
736 &spa
->spa_condensing_indirect_phys
;
738 ASSERT0(scip
->scip_next_mapping_object
);
739 ASSERT0(scip
->scip_prev_obsolete_sm_object
);
740 ASSERT0(scip
->scip_vdev
);
741 ASSERT(dmu_tx_is_syncing(tx
));
742 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
743 ASSERT(spa_feature_is_active(spa
, SPA_FEATURE_OBSOLETE_COUNTS
));
744 ASSERT(vdev_indirect_mapping_num_entries(vd
->vdev_indirect_mapping
));
746 uint64_t obsolete_sm_obj
= vdev_obsolete_sm_object(vd
);
747 ASSERT(obsolete_sm_obj
!= 0);
749 scip
->scip_vdev
= vd
->vdev_id
;
750 scip
->scip_next_mapping_object
=
751 vdev_indirect_mapping_alloc(spa
->spa_meta_objset
, tx
);
753 scip
->scip_prev_obsolete_sm_object
= obsolete_sm_obj
;
756 * We don't need to allocate a new space map object, since
757 * vdev_indirect_sync_obsolete will allocate one when needed.
759 space_map_close(vd
->vdev_obsolete_sm
);
760 vd
->vdev_obsolete_sm
= NULL
;
761 VERIFY0(zap_remove(spa
->spa_meta_objset
, vd
->vdev_top_zap
,
762 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM
, tx
));
764 VERIFY0(zap_add(spa
->spa_dsl_pool
->dp_meta_objset
,
765 DMU_POOL_DIRECTORY_OBJECT
,
766 DMU_POOL_CONDENSING_INDIRECT
, sizeof (uint64_t),
767 sizeof (*scip
) / sizeof (uint64_t), scip
, tx
));
769 ASSERT3P(spa
->spa_condensing_indirect
, ==, NULL
);
770 spa
->spa_condensing_indirect
= spa_condensing_indirect_create(spa
);
772 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
774 vd
->vdev_id
, dmu_tx_get_txg(tx
),
775 (u_longlong_t
)scip
->scip_prev_obsolete_sm_object
,
776 (u_longlong_t
)scip
->scip_next_mapping_object
);
778 zthr_wakeup(spa
->spa_condense_zthr
);
782 * Sync to the given vdev's obsolete space map any segments that are no longer
783 * referenced as of the given txg.
785 * If the obsolete space map doesn't exist yet, create and open it.
788 vdev_indirect_sync_obsolete(vdev_t
*vd
, dmu_tx_t
*tx
)
790 spa_t
*spa
= vd
->vdev_spa
;
791 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
793 ASSERT3U(vic
->vic_mapping_object
, !=, 0);
794 ASSERT(range_tree_space(vd
->vdev_obsolete_segments
) > 0);
795 ASSERT(vd
->vdev_removing
|| vd
->vdev_ops
== &vdev_indirect_ops
);
796 ASSERT(spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
));
798 if (vdev_obsolete_sm_object(vd
) == 0) {
799 uint64_t obsolete_sm_object
=
800 space_map_alloc(spa
->spa_meta_objset
,
801 vdev_standard_sm_blksz
, tx
);
803 ASSERT(vd
->vdev_top_zap
!= 0);
804 VERIFY0(zap_add(vd
->vdev_spa
->spa_meta_objset
, vd
->vdev_top_zap
,
805 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM
,
806 sizeof (obsolete_sm_object
), 1, &obsolete_sm_object
, tx
));
807 ASSERT3U(vdev_obsolete_sm_object(vd
), !=, 0);
809 spa_feature_incr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
810 VERIFY0(space_map_open(&vd
->vdev_obsolete_sm
,
811 spa
->spa_meta_objset
, obsolete_sm_object
,
812 0, vd
->vdev_asize
, 0));
813 space_map_update(vd
->vdev_obsolete_sm
);
816 ASSERT(vd
->vdev_obsolete_sm
!= NULL
);
817 ASSERT3U(vdev_obsolete_sm_object(vd
), ==,
818 space_map_object(vd
->vdev_obsolete_sm
));
820 space_map_write(vd
->vdev_obsolete_sm
,
821 vd
->vdev_obsolete_segments
, SM_ALLOC
, SM_NO_VDEVID
, tx
);
822 space_map_update(vd
->vdev_obsolete_sm
);
823 range_tree_vacate(vd
->vdev_obsolete_segments
, NULL
, NULL
);
827 spa_condense_init(spa_t
*spa
)
829 int error
= zap_lookup(spa
->spa_meta_objset
,
830 DMU_POOL_DIRECTORY_OBJECT
,
831 DMU_POOL_CONDENSING_INDIRECT
, sizeof (uint64_t),
832 sizeof (spa
->spa_condensing_indirect_phys
) / sizeof (uint64_t),
833 &spa
->spa_condensing_indirect_phys
);
835 if (spa_writeable(spa
)) {
836 spa
->spa_condensing_indirect
=
837 spa_condensing_indirect_create(spa
);
840 } else if (error
== ENOENT
) {
848 spa_condense_fini(spa_t
*spa
)
850 if (spa
->spa_condensing_indirect
!= NULL
) {
851 spa_condensing_indirect_destroy(spa
->spa_condensing_indirect
);
852 spa
->spa_condensing_indirect
= NULL
;
857 spa_start_indirect_condensing_thread(spa_t
*spa
)
859 ASSERT3P(spa
->spa_condense_zthr
, ==, NULL
);
860 spa
->spa_condense_zthr
= zthr_create(spa_condense_indirect_thread_check
,
861 spa_condense_indirect_thread
, spa
);
865 * Gets the obsolete spacemap object from the vdev's ZAP.
866 * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
870 vdev_obsolete_sm_object(vdev_t
*vd
)
872 ASSERT0(spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
873 if (vd
->vdev_top_zap
== 0) {
878 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
, vd
->vdev_top_zap
,
879 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM
, sizeof (sm_obj
), 1, &sm_obj
);
881 ASSERT(err
== 0 || err
== ENOENT
);
887 vdev_obsolete_counts_are_precise(vdev_t
*vd
)
889 ASSERT0(spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
890 if (vd
->vdev_top_zap
== 0) {
895 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
, vd
->vdev_top_zap
,
896 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE
, sizeof (val
), 1, &val
);
898 ASSERT(err
== 0 || err
== ENOENT
);
905 vdev_indirect_close(vdev_t
*vd
)
911 vdev_indirect_open(vdev_t
*vd
, uint64_t *psize
, uint64_t *max_psize
,
914 *psize
= *max_psize
= vd
->vdev_asize
+
915 VDEV_LABEL_START_SIZE
+ VDEV_LABEL_END_SIZE
;
916 *ashift
= vd
->vdev_ashift
;
920 typedef struct remap_segment
{
924 uint64_t rs_split_offset
;
929 rs_alloc(vdev_t
*vd
, uint64_t offset
, uint64_t asize
, uint64_t split_offset
)
931 remap_segment_t
*rs
= kmem_alloc(sizeof (remap_segment_t
), KM_SLEEP
);
933 rs
->rs_offset
= offset
;
934 rs
->rs_asize
= asize
;
935 rs
->rs_split_offset
= split_offset
;
940 * Given an indirect vdev and an extent on that vdev, it duplicates the
941 * physical entries of the indirect mapping that correspond to the extent
942 * to a new array and returns a pointer to it. In addition, copied_entries
943 * is populated with the number of mapping entries that were duplicated.
945 * Note that the function assumes that the caller holds vdev_indirect_rwlock.
946 * This ensures that the mapping won't change due to condensing as we
947 * copy over its contents.
949 * Finally, since we are doing an allocation, it is up to the caller to
950 * free the array allocated in this function.
952 vdev_indirect_mapping_entry_phys_t
*
953 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t
*vd
, uint64_t offset
,
954 uint64_t asize
, uint64_t *copied_entries
)
956 vdev_indirect_mapping_entry_phys_t
*duplicate_mappings
= NULL
;
957 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
958 uint64_t entries
= 0;
960 ASSERT(RW_READ_HELD(&vd
->vdev_indirect_rwlock
));
962 vdev_indirect_mapping_entry_phys_t
*first_mapping
=
963 vdev_indirect_mapping_entry_for_offset(vim
, offset
);
964 ASSERT3P(first_mapping
, !=, NULL
);
966 vdev_indirect_mapping_entry_phys_t
*m
= first_mapping
;
968 uint64_t size
= DVA_GET_ASIZE(&m
->vimep_dst
);
970 ASSERT3U(offset
, >=, DVA_MAPPING_GET_SRC_OFFSET(m
));
971 ASSERT3U(offset
, <, DVA_MAPPING_GET_SRC_OFFSET(m
) + size
);
973 uint64_t inner_offset
= offset
- DVA_MAPPING_GET_SRC_OFFSET(m
);
974 uint64_t inner_size
= MIN(asize
, size
- inner_offset
);
976 offset
+= inner_size
;
982 size_t copy_length
= entries
* sizeof (*first_mapping
);
983 duplicate_mappings
= kmem_alloc(copy_length
, KM_SLEEP
);
984 bcopy(first_mapping
, duplicate_mappings
, copy_length
);
985 *copied_entries
= entries
;
987 return (duplicate_mappings
);
991 * Goes through the relevant indirect mappings until it hits a concrete vdev
992 * and issues the callback. On the way to the concrete vdev, if any other
993 * indirect vdevs are encountered, then the callback will also be called on
994 * each of those indirect vdevs. For example, if the segment is mapped to
995 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
996 * mapped to segment B on concrete vdev 2, then the callback will be called on
997 * both vdev 1 and vdev 2.
999 * While the callback passed to vdev_indirect_remap() is called on every vdev
1000 * the function encounters, certain callbacks only care about concrete vdevs.
1001 * These types of callbacks should return immediately and explicitly when they
1002 * are called on an indirect vdev.
1004 * Because there is a possibility that a DVA section in the indirect device
1005 * has been split into multiple sections in our mapping, we keep track
1006 * of the relevant contiguous segments of the new location (remap_segment_t)
1007 * in a stack. This way we can call the callback for each of the new sections
1008 * created by a single section of the indirect device. Note though, that in
1009 * this scenario the callbacks in each split block won't occur in-order in
1010 * terms of offset, so callers should not make any assumptions about that.
1012 * For callbacks that don't handle split blocks and immediately return when
1013 * they encounter them (as is the case for remap_blkptr_cb), the caller can
1014 * assume that its callback will be applied from the first indirect vdev
1015 * encountered to the last one and then the concrete vdev, in that order.
1018 vdev_indirect_remap(vdev_t
*vd
, uint64_t offset
, uint64_t asize
,
1019 void (*func
)(uint64_t, vdev_t
*, uint64_t, uint64_t, void *), void *arg
)
1022 spa_t
*spa
= vd
->vdev_spa
;
1024 list_create(&stack
, sizeof (remap_segment_t
),
1025 offsetof(remap_segment_t
, rs_node
));
1027 for (remap_segment_t
*rs
= rs_alloc(vd
, offset
, asize
, 0);
1028 rs
!= NULL
; rs
= list_remove_head(&stack
)) {
1029 vdev_t
*v
= rs
->rs_vd
;
1030 uint64_t num_entries
= 0;
1032 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_READER
) != 0);
1033 ASSERT(rs
->rs_asize
> 0);
1036 * Note: As this function can be called from open context
1037 * (e.g. zio_read()), we need the following rwlock to
1038 * prevent the mapping from being changed by condensing.
1040 * So we grab the lock and we make a copy of the entries
1041 * that are relevant to the extent that we are working on.
1042 * Once that is done, we drop the lock and iterate over
1043 * our copy of the mapping. Once we are done with the with
1044 * the remap segment and we free it, we also free our copy
1045 * of the indirect mapping entries that are relevant to it.
1047 * This way we don't need to wait until the function is
1048 * finished with a segment, to condense it. In addition, we
1049 * don't need a recursive rwlock for the case that a call to
1050 * vdev_indirect_remap() needs to call itself (through the
1051 * codepath of its callback) for the same vdev in the middle
1054 rw_enter(&v
->vdev_indirect_rwlock
, RW_READER
);
1055 vdev_indirect_mapping_t
*vim
= v
->vdev_indirect_mapping
;
1056 ASSERT3P(vim
, !=, NULL
);
1058 vdev_indirect_mapping_entry_phys_t
*mapping
=
1059 vdev_indirect_mapping_duplicate_adjacent_entries(v
,
1060 rs
->rs_offset
, rs
->rs_asize
, &num_entries
);
1061 ASSERT3P(mapping
, !=, NULL
);
1062 ASSERT3U(num_entries
, >, 0);
1063 rw_exit(&v
->vdev_indirect_rwlock
);
1065 for (uint64_t i
= 0; i
< num_entries
; i
++) {
1067 * Note: the vdev_indirect_mapping can not change
1068 * while we are running. It only changes while the
1069 * removal is in progress, and then only from syncing
1070 * context. While a removal is in progress, this
1071 * function is only called for frees, which also only
1072 * happen from syncing context.
1074 vdev_indirect_mapping_entry_phys_t
*m
= &mapping
[i
];
1076 ASSERT3P(m
, !=, NULL
);
1077 ASSERT3U(rs
->rs_asize
, >, 0);
1079 uint64_t size
= DVA_GET_ASIZE(&m
->vimep_dst
);
1080 uint64_t dst_offset
= DVA_GET_OFFSET(&m
->vimep_dst
);
1081 uint64_t dst_vdev
= DVA_GET_VDEV(&m
->vimep_dst
);
1083 ASSERT3U(rs
->rs_offset
, >=,
1084 DVA_MAPPING_GET_SRC_OFFSET(m
));
1085 ASSERT3U(rs
->rs_offset
, <,
1086 DVA_MAPPING_GET_SRC_OFFSET(m
) + size
);
1087 ASSERT3U(dst_vdev
, !=, v
->vdev_id
);
1089 uint64_t inner_offset
= rs
->rs_offset
-
1090 DVA_MAPPING_GET_SRC_OFFSET(m
);
1091 uint64_t inner_size
=
1092 MIN(rs
->rs_asize
, size
- inner_offset
);
1094 vdev_t
*dst_v
= vdev_lookup_top(spa
, dst_vdev
);
1095 ASSERT3P(dst_v
, !=, NULL
);
1097 if (dst_v
->vdev_ops
== &vdev_indirect_ops
) {
1098 list_insert_head(&stack
,
1099 rs_alloc(dst_v
, dst_offset
+ inner_offset
,
1100 inner_size
, rs
->rs_split_offset
));
1104 if ((zfs_flags
& ZFS_DEBUG_INDIRECT_REMAP
) &&
1105 IS_P2ALIGNED(inner_size
, 2 * SPA_MINBLOCKSIZE
)) {
1107 * Note: This clause exists only solely for
1108 * testing purposes. We use it to ensure that
1109 * split blocks work and that the callbacks
1110 * using them yield the same result if issued
1113 uint64_t inner_half
= inner_size
/ 2;
1115 func(rs
->rs_split_offset
+ inner_half
, dst_v
,
1116 dst_offset
+ inner_offset
+ inner_half
,
1119 func(rs
->rs_split_offset
, dst_v
,
1120 dst_offset
+ inner_offset
,
1123 func(rs
->rs_split_offset
, dst_v
,
1124 dst_offset
+ inner_offset
,
1128 rs
->rs_offset
+= inner_size
;
1129 rs
->rs_asize
-= inner_size
;
1130 rs
->rs_split_offset
+= inner_size
;
1132 VERIFY0(rs
->rs_asize
);
1134 kmem_free(mapping
, num_entries
* sizeof (*mapping
));
1135 kmem_free(rs
, sizeof (remap_segment_t
));
1137 list_destroy(&stack
);
1141 vdev_indirect_child_io_done(zio_t
*zio
)
1143 zio_t
*pio
= zio
->io_private
;
1145 mutex_enter(&pio
->io_lock
);
1146 pio
->io_error
= zio_worst_error(pio
->io_error
, zio
->io_error
);
1147 mutex_exit(&pio
->io_lock
);
1149 abd_put(zio
->io_abd
);
1153 * This is a callback for vdev_indirect_remap() which allocates an
1154 * indirect_split_t for each split segment and adds it to iv_splits.
1157 vdev_indirect_gather_splits(uint64_t split_offset
, vdev_t
*vd
, uint64_t offset
,
1158 uint64_t size
, void *arg
)
1161 indirect_vsd_t
*iv
= zio
->io_vsd
;
1163 ASSERT3P(vd
, !=, NULL
);
1165 if (vd
->vdev_ops
== &vdev_indirect_ops
)
1169 if (vd
->vdev_ops
== &vdev_mirror_ops
)
1170 n
= vd
->vdev_children
;
1172 indirect_split_t
*is
=
1173 kmem_zalloc(offsetof(indirect_split_t
, is_child
[n
]), KM_SLEEP
);
1175 is
->is_children
= n
;
1177 is
->is_split_offset
= split_offset
;
1178 is
->is_target_offset
= offset
;
1182 * Note that we only consider multiple copies of the data for
1183 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
1184 * though they use the same ops as mirror, because there's only one
1185 * "good" copy under the replacing/spare.
1187 if (vd
->vdev_ops
== &vdev_mirror_ops
) {
1188 for (int i
= 0; i
< n
; i
++) {
1189 is
->is_child
[i
].ic_vdev
= vd
->vdev_child
[i
];
1192 is
->is_child
[0].ic_vdev
= vd
;
1195 list_insert_tail(&iv
->iv_splits
, is
);
1199 vdev_indirect_read_split_done(zio_t
*zio
)
1201 indirect_child_t
*ic
= zio
->io_private
;
1203 if (zio
->io_error
!= 0) {
1205 * Clear ic_data to indicate that we do not have data for this
1208 abd_free(ic
->ic_data
);
1214 * Issue reads for all copies (mirror children) of all splits.
1217 vdev_indirect_read_all(zio_t
*zio
)
1219 indirect_vsd_t
*iv
= zio
->io_vsd
;
1221 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1222 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1223 for (int i
= 0; i
< is
->is_children
; i
++) {
1224 indirect_child_t
*ic
= &is
->is_child
[i
];
1226 if (!vdev_readable(ic
->ic_vdev
))
1230 * Note, we may read from a child whose DTL
1231 * indicates that the data may not be present here.
1232 * While this might result in a few i/os that will
1233 * likely return incorrect data, it simplifies the
1234 * code since we can treat scrub and resilver
1235 * identically. (The incorrect data will be
1236 * detected and ignored when we verify the
1240 ic
->ic_data
= abd_alloc_sametype(zio
->io_abd
,
1243 zio_nowait(zio_vdev_child_io(zio
, NULL
,
1244 ic
->ic_vdev
, is
->is_target_offset
, ic
->ic_data
,
1245 is
->is_size
, zio
->io_type
, zio
->io_priority
, 0,
1246 vdev_indirect_read_split_done
, ic
));
1249 iv
->iv_reconstruct
= B_TRUE
;
1253 vdev_indirect_io_start(zio_t
*zio
)
1255 spa_t
*spa
= zio
->io_spa
;
1256 indirect_vsd_t
*iv
= kmem_zalloc(sizeof (*iv
), KM_SLEEP
);
1257 list_create(&iv
->iv_splits
,
1258 sizeof (indirect_split_t
), offsetof(indirect_split_t
, is_node
));
1261 zio
->io_vsd_ops
= &vdev_indirect_vsd_ops
;
1263 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_READER
) != 0);
1264 if (zio
->io_type
!= ZIO_TYPE_READ
) {
1265 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_WRITE
);
1267 * Note: this code can handle other kinds of writes,
1268 * but we don't expect them.
1270 ASSERT((zio
->io_flags
& (ZIO_FLAG_SELF_HEAL
|
1271 ZIO_FLAG_RESILVER
| ZIO_FLAG_INDUCE_DAMAGE
)) != 0);
1274 vdev_indirect_remap(zio
->io_vd
, zio
->io_offset
, zio
->io_size
,
1275 vdev_indirect_gather_splits
, zio
);
1277 indirect_split_t
*first
= list_head(&iv
->iv_splits
);
1278 if (first
->is_size
== zio
->io_size
) {
1280 * This is not a split block; we are pointing to the entire
1281 * data, which will checksum the same as the original data.
1282 * Pass the BP down so that the child i/o can verify the
1283 * checksum, and try a different location if available
1284 * (e.g. on a mirror).
1286 * While this special case could be handled the same as the
1287 * general (split block) case, doing it this way ensures
1288 * that the vast majority of blocks on indirect vdevs
1289 * (which are not split) are handled identically to blocks
1290 * on non-indirect vdevs. This allows us to be less strict
1291 * about performance in the general (but rare) case.
1293 ASSERT0(first
->is_split_offset
);
1294 ASSERT3P(list_next(&iv
->iv_splits
, first
), ==, NULL
);
1295 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
1296 first
->is_vdev
, first
->is_target_offset
,
1297 abd_get_offset(zio
->io_abd
, 0),
1298 zio
->io_size
, zio
->io_type
, zio
->io_priority
, 0,
1299 vdev_indirect_child_io_done
, zio
));
1301 iv
->iv_split_block
= B_TRUE
;
1302 if (zio
->io_flags
& (ZIO_FLAG_SCRUB
| ZIO_FLAG_RESILVER
)) {
1304 * Read all copies. Note that for simplicity,
1305 * we don't bother consulting the DTL in the
1308 vdev_indirect_read_all(zio
);
1311 * Read one copy of each split segment, from the
1312 * top-level vdev. Since we don't know the
1313 * checksum of each split individually, the child
1314 * zio can't ensure that we get the right data.
1315 * E.g. if it's a mirror, it will just read from a
1316 * random (healthy) leaf vdev. We have to verify
1317 * the checksum in vdev_indirect_io_done().
1319 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1320 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1321 zio_nowait(zio_vdev_child_io(zio
, NULL
,
1322 is
->is_vdev
, is
->is_target_offset
,
1323 abd_get_offset(zio
->io_abd
,
1324 is
->is_split_offset
),
1325 is
->is_size
, zio
->io_type
,
1326 zio
->io_priority
, 0,
1327 vdev_indirect_child_io_done
, zio
));
1336 * Report a checksum error for a child.
1339 vdev_indirect_checksum_error(zio_t
*zio
,
1340 indirect_split_t
*is
, indirect_child_t
*ic
)
1342 vdev_t
*vd
= ic
->ic_vdev
;
1344 if (zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)
1347 mutex_enter(&vd
->vdev_stat_lock
);
1348 vd
->vdev_stat
.vs_checksum_errors
++;
1349 mutex_exit(&vd
->vdev_stat_lock
);
1351 zio_bad_cksum_t zbc
= { 0 };
1352 void *bad_buf
= abd_borrow_buf_copy(ic
->ic_data
, is
->is_size
);
1353 abd_t
*good_abd
= is
->is_child
[is
->is_good_child
].ic_data
;
1354 void *good_buf
= abd_borrow_buf_copy(good_abd
, is
->is_size
);
1355 zfs_ereport_post_checksum(zio
->io_spa
, vd
, zio
,
1356 is
->is_target_offset
, is
->is_size
, good_buf
, bad_buf
, &zbc
);
1357 abd_return_buf(ic
->ic_data
, bad_buf
, is
->is_size
);
1358 abd_return_buf(good_abd
, good_buf
, is
->is_size
);
1362 * Issue repair i/os for any incorrect copies. We do this by comparing
1363 * each split segment's correct data (is_good_child's ic_data) with each
1364 * other copy of the data. If they differ, then we overwrite the bad data
1365 * with the good copy. Note that we do this without regard for the DTL's,
1366 * which simplifies this code and also issues the optimal number of writes
1367 * (based on which copies actually read bad data, as opposed to which we
1368 * think might be wrong). For the same reason, we always use
1369 * ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
1372 vdev_indirect_repair(zio_t
*zio
)
1374 indirect_vsd_t
*iv
= zio
->io_vsd
;
1376 enum zio_flag flags
= ZIO_FLAG_IO_REPAIR
;
1378 if (!(zio
->io_flags
& (ZIO_FLAG_SCRUB
| ZIO_FLAG_RESILVER
)))
1379 flags
|= ZIO_FLAG_SELF_HEAL
;
1381 if (!spa_writeable(zio
->io_spa
))
1384 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1385 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1386 indirect_child_t
*good_child
= &is
->is_child
[is
->is_good_child
];
1388 for (int c
= 0; c
< is
->is_children
; c
++) {
1389 indirect_child_t
*ic
= &is
->is_child
[c
];
1390 if (ic
== good_child
)
1392 if (ic
->ic_data
== NULL
)
1394 if (abd_cmp(good_child
->ic_data
, ic
->ic_data
,
1398 zio_nowait(zio_vdev_child_io(zio
, NULL
,
1399 ic
->ic_vdev
, is
->is_target_offset
,
1400 good_child
->ic_data
, is
->is_size
,
1401 ZIO_TYPE_WRITE
, ZIO_PRIORITY_ASYNC_WRITE
,
1402 ZIO_FLAG_IO_REPAIR
| ZIO_FLAG_SELF_HEAL
,
1405 vdev_indirect_checksum_error(zio
, is
, ic
);
1411 * Report checksum errors on all children that we read from.
1414 vdev_indirect_all_checksum_errors(zio_t
*zio
)
1416 indirect_vsd_t
*iv
= zio
->io_vsd
;
1418 if (zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)
1421 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1422 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1423 for (int c
= 0; c
< is
->is_children
; c
++) {
1424 indirect_child_t
*ic
= &is
->is_child
[c
];
1426 if (ic
->ic_data
== NULL
)
1429 vdev_t
*vd
= ic
->ic_vdev
;
1431 mutex_enter(&vd
->vdev_stat_lock
);
1432 vd
->vdev_stat
.vs_checksum_errors
++;
1433 mutex_exit(&vd
->vdev_stat_lock
);
1435 zfs_ereport_post_checksum(zio
->io_spa
, vd
, zio
,
1436 is
->is_target_offset
, is
->is_size
,
1443 * This function is called when we have read all copies of the data and need
1444 * to try to find a combination of copies that gives us the right checksum.
1446 * If we pointed to any mirror vdevs, this effectively does the job of the
1447 * mirror. The mirror vdev code can't do its own job because we don't know
1448 * the checksum of each split segment individually. We have to try every
1449 * combination of copies of split segments, until we find one that checksums
1450 * correctly. (Or until we have tried all combinations, or have tried
1451 * 2^zfs_reconstruct_indirect_segments_max combinations. In these cases we
1452 * set io_error to ECKSUM to propagate the error up to the user.)
1454 * For example, if we have 3 segments in the split,
1455 * and each points to a 2-way mirror, we will have the following pieces of
1460 * ======|=====================
1461 * A | data_A_0 data_A_1
1462 * B | data_B_0 data_B_1
1463 * C | data_C_0 data_C_1
1465 * We will try the following (mirror children)^(number of splits) (2^3=8)
1466 * combinations, which is similar to bitwise-little-endian counting in
1467 * binary. In general each "digit" corresponds to a split segment, and the
1468 * base of each digit is is_children, which can be different for each
1471 * "low bit" "high bit"
1473 * data_A_0 data_B_0 data_C_0
1474 * data_A_1 data_B_0 data_C_0
1475 * data_A_0 data_B_1 data_C_0
1476 * data_A_1 data_B_1 data_C_0
1477 * data_A_0 data_B_0 data_C_1
1478 * data_A_1 data_B_0 data_C_1
1479 * data_A_0 data_B_1 data_C_1
1480 * data_A_1 data_B_1 data_C_1
1482 * Note that the split segments may be on the same or different top-level
1483 * vdevs. In either case, we try lots of combinations (see
1484 * zfs_reconstruct_indirect_segments_max). This ensures that if a mirror has
1485 * small silent errors on all of its children, we can still reconstruct the
1486 * correct data, as long as those errors are at sufficiently-separated
1487 * offsets (specifically, separated by the largest block size - default of
1488 * 128KB, but up to 16MB).
1491 vdev_indirect_reconstruct_io_done(zio_t
*zio
)
1493 indirect_vsd_t
*iv
= zio
->io_vsd
;
1494 uint64_t attempts
= 0;
1495 uint64_t attempts_max
= 1ULL << zfs_reconstruct_indirect_segments_max
;
1498 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1499 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
))
1503 /* copy data from splits to main zio */
1505 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1506 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1509 * If this child failed, its ic_data will be NULL.
1510 * Skip this combination.
1512 if (is
->is_child
[is
->is_good_child
].ic_data
== NULL
) {
1517 abd_copy_off(zio
->io_abd
,
1518 is
->is_child
[is
->is_good_child
].ic_data
,
1519 is
->is_split_offset
, 0, is
->is_size
);
1522 /* See if this checksum matches. */
1523 zio_bad_cksum_t zbc
;
1524 ret
= zio_checksum_error(zio
, &zbc
);
1526 /* Found a matching checksum. Issue repair i/os. */
1527 vdev_indirect_repair(zio
);
1528 zio_checksum_verified(zio
);
1533 * Checksum failed; try a different combination of split
1539 if (segments
<= zfs_reconstruct_indirect_segments_max
) {
1541 * There are relatively few segments, so
1542 * deterministically check all combinations. We do
1543 * this by by adding one to the first split's
1544 * good_child. If it overflows, then "carry over" to
1545 * the next split (like counting in base is_children,
1546 * but each digit can have a different base).
1548 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1549 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1550 is
->is_good_child
++;
1551 if (is
->is_good_child
< is
->is_children
) {
1555 is
->is_good_child
= 0;
1557 } else if (++attempts
< attempts_max
) {
1559 * There are too many combinations to try all of them
1560 * in a reasonable amount of time, so try a fixed
1561 * number of random combinations, after which we'll
1562 * consider the block unrecoverable.
1564 for (indirect_split_t
*is
= list_head(&iv
->iv_splits
);
1565 is
!= NULL
; is
= list_next(&iv
->iv_splits
, is
)) {
1567 spa_get_random(is
->is_children
);
1572 /* All combinations failed. */
1573 zio
->io_error
= ret
;
1574 vdev_indirect_all_checksum_errors(zio
);
1575 zio_checksum_verified(zio
);
1582 vdev_indirect_io_done(zio_t
*zio
)
1584 indirect_vsd_t
*iv
= zio
->io_vsd
;
1586 if (iv
->iv_reconstruct
) {
1588 * We have read all copies of the data (e.g. from mirrors),
1589 * either because this was a scrub/resilver, or because the
1590 * one-copy read didn't checksum correctly.
1592 vdev_indirect_reconstruct_io_done(zio
);
1596 if (!iv
->iv_split_block
) {
1598 * This was not a split block, so we passed the BP down,
1599 * and the checksum was handled by the (one) child zio.
1604 zio_bad_cksum_t zbc
;
1605 int ret
= zio_checksum_error(zio
, &zbc
);
1607 zio_checksum_verified(zio
);
1612 * The checksum didn't match. Read all copies of all splits, and
1613 * then we will try to reconstruct. The next time
1614 * vdev_indirect_io_done() is called, iv_reconstruct will be set.
1616 vdev_indirect_read_all(zio
);
1618 zio_vdev_io_redone(zio
);
1621 vdev_ops_t vdev_indirect_ops
= {
1623 vdev_indirect_close
,
1625 vdev_indirect_io_start
,
1626 vdev_indirect_io_done
,
1630 vdev_indirect_remap
,
1631 VDEV_TYPE_INDIRECT
, /* name of this vdev type */
1632 B_FALSE
/* leaf vdev */