4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
37 * Allow allocations to switch to gang blocks quickly. We do this to
38 * avoid having to load lots of space_maps in a given txg. There are,
39 * however, some cases where we want to avoid "fast" ganging and instead
40 * we want to do an exhaustive search of all metaslabs on this device.
41 * Currently we don't allow any gang, slog, or dump device related allocations
44 #define CAN_FASTGANG(flags) \
45 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
46 METASLAB_GANG_AVOID)))
48 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
49 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
50 #define METASLAB_ACTIVE_MASK \
51 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
53 uint64_t metaslab_aliquot
= 512ULL << 10;
54 uint64_t metaslab_gang_bang
= SPA_MAXBLOCKSIZE
+ 1; /* force gang blocks */
57 * The in-core space map representation is more compact than its on-disk form.
58 * The zfs_condense_pct determines how much more compact the in-core
59 * space_map representation must be before we compact it on-disk.
60 * Values should be greater than or equal to 100.
62 int zfs_condense_pct
= 200;
65 * The zfs_mg_noalloc_threshold defines which metaslab groups should
66 * be eligible for allocation. The value is defined as a percentage of
67 * a free space. Metaslab groups that have more free space than
68 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
69 * a metaslab group's free space is less than or equal to the
70 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
71 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
72 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
73 * groups are allowed to accept allocations. Gang blocks are always
74 * eligible to allocate on any metaslab group. The default value of 0 means
75 * no metaslab group will be excluded based on this criterion.
77 int zfs_mg_noalloc_threshold
= 0;
80 * When set will load all metaslabs when pool is first opened.
82 int metaslab_debug_load
= 0;
85 * When set will prevent metaslabs from being unloaded.
87 int metaslab_debug_unload
= 0;
90 * Minimum size which forces the dynamic allocator to change
91 * it's allocation strategy. Once the space map cannot satisfy
92 * an allocation of this size then it switches to using more
93 * aggressive strategy (i.e search by size rather than offset).
95 uint64_t metaslab_df_alloc_threshold
= SPA_MAXBLOCKSIZE
;
98 * The minimum free space, in percent, which must be available
99 * in a space map to continue allocations in a first-fit fashion.
100 * Once the space_map's free space drops below this level we dynamically
101 * switch to using best-fit allocations.
103 int metaslab_df_free_pct
= 4;
106 * A metaslab is considered "free" if it contains a contiguous
107 * segment which is greater than metaslab_min_alloc_size.
109 uint64_t metaslab_min_alloc_size
= DMU_MAX_ACCESS
;
112 * Percentage of all cpus that can be used by the metaslab taskq.
114 int metaslab_load_pct
= 50;
117 * Determines how many txgs a metaslab may remain loaded without having any
118 * allocations from it. As long as a metaslab continues to be used we will
121 int metaslab_unload_delay
= TXG_SIZE
* 2;
124 * Should we be willing to write data to degraded vdevs?
126 boolean_t zfs_write_to_degraded
= B_FALSE
;
129 * Max number of metaslabs per group to preload.
131 int metaslab_preload_limit
= SPA_DVAS_PER_BP
;
134 * Enable/disable preloading of metaslab.
136 boolean_t metaslab_preload_enabled
= B_TRUE
;
139 * Enable/disable additional weight factor for each metaslab.
141 boolean_t metaslab_weight_factor_enable
= B_FALSE
;
145 * ==========================================================================
147 * ==========================================================================
150 metaslab_class_create(spa_t
*spa
, metaslab_ops_t
*ops
)
152 metaslab_class_t
*mc
;
154 mc
= kmem_zalloc(sizeof (metaslab_class_t
), KM_SLEEP
);
164 metaslab_class_destroy(metaslab_class_t
*mc
)
166 ASSERT(mc
->mc_rotor
== NULL
);
167 ASSERT(mc
->mc_alloc
== 0);
168 ASSERT(mc
->mc_deferred
== 0);
169 ASSERT(mc
->mc_space
== 0);
170 ASSERT(mc
->mc_dspace
== 0);
172 kmem_free(mc
, sizeof (metaslab_class_t
));
176 metaslab_class_validate(metaslab_class_t
*mc
)
178 metaslab_group_t
*mg
;
182 * Must hold one of the spa_config locks.
184 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_READER
) ||
185 spa_config_held(mc
->mc_spa
, SCL_ALL
, RW_WRITER
));
187 if ((mg
= mc
->mc_rotor
) == NULL
)
192 ASSERT(vd
->vdev_mg
!= NULL
);
193 ASSERT3P(vd
->vdev_top
, ==, vd
);
194 ASSERT3P(mg
->mg_class
, ==, mc
);
195 ASSERT3P(vd
->vdev_ops
, !=, &vdev_hole_ops
);
196 } while ((mg
= mg
->mg_next
) != mc
->mc_rotor
);
202 metaslab_class_space_update(metaslab_class_t
*mc
, int64_t alloc_delta
,
203 int64_t defer_delta
, int64_t space_delta
, int64_t dspace_delta
)
205 atomic_add_64(&mc
->mc_alloc
, alloc_delta
);
206 atomic_add_64(&mc
->mc_deferred
, defer_delta
);
207 atomic_add_64(&mc
->mc_space
, space_delta
);
208 atomic_add_64(&mc
->mc_dspace
, dspace_delta
);
212 metaslab_class_get_alloc(metaslab_class_t
*mc
)
214 return (mc
->mc_alloc
);
218 metaslab_class_get_deferred(metaslab_class_t
*mc
)
220 return (mc
->mc_deferred
);
224 metaslab_class_get_space(metaslab_class_t
*mc
)
226 return (mc
->mc_space
);
230 metaslab_class_get_dspace(metaslab_class_t
*mc
)
232 return (spa_deflate(mc
->mc_spa
) ? mc
->mc_dspace
: mc
->mc_space
);
236 * ==========================================================================
238 * ==========================================================================
241 metaslab_compare(const void *x1
, const void *x2
)
243 const metaslab_t
*m1
= x1
;
244 const metaslab_t
*m2
= x2
;
246 if (m1
->ms_weight
< m2
->ms_weight
)
248 if (m1
->ms_weight
> m2
->ms_weight
)
252 * If the weights are identical, use the offset to force uniqueness.
254 if (m1
->ms_start
< m2
->ms_start
)
256 if (m1
->ms_start
> m2
->ms_start
)
259 ASSERT3P(m1
, ==, m2
);
265 * Update the allocatable flag and the metaslab group's capacity.
266 * The allocatable flag is set to true if the capacity is below
267 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
268 * from allocatable to non-allocatable or vice versa then the metaslab
269 * group's class is updated to reflect the transition.
272 metaslab_group_alloc_update(metaslab_group_t
*mg
)
274 vdev_t
*vd
= mg
->mg_vd
;
275 metaslab_class_t
*mc
= mg
->mg_class
;
276 vdev_stat_t
*vs
= &vd
->vdev_stat
;
277 boolean_t was_allocatable
;
279 ASSERT(vd
== vd
->vdev_top
);
281 mutex_enter(&mg
->mg_lock
);
282 was_allocatable
= mg
->mg_allocatable
;
284 mg
->mg_free_capacity
= ((vs
->vs_space
- vs
->vs_alloc
) * 100) /
287 mg
->mg_allocatable
= (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
);
290 * The mc_alloc_groups maintains a count of the number of
291 * groups in this metaslab class that are still above the
292 * zfs_mg_noalloc_threshold. This is used by the allocating
293 * threads to determine if they should avoid allocations to
294 * a given group. The allocator will avoid allocations to a group
295 * if that group has reached or is below the zfs_mg_noalloc_threshold
296 * and there are still other groups that are above the threshold.
297 * When a group transitions from allocatable to non-allocatable or
298 * vice versa we update the metaslab class to reflect that change.
299 * When the mc_alloc_groups value drops to 0 that means that all
300 * groups have reached the zfs_mg_noalloc_threshold making all groups
301 * eligible for allocations. This effectively means that all devices
302 * are balanced again.
304 if (was_allocatable
&& !mg
->mg_allocatable
)
305 mc
->mc_alloc_groups
--;
306 else if (!was_allocatable
&& mg
->mg_allocatable
)
307 mc
->mc_alloc_groups
++;
308 mutex_exit(&mg
->mg_lock
);
312 metaslab_group_create(metaslab_class_t
*mc
, vdev_t
*vd
)
314 metaslab_group_t
*mg
;
316 mg
= kmem_zalloc(sizeof (metaslab_group_t
), KM_SLEEP
);
317 mutex_init(&mg
->mg_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
318 avl_create(&mg
->mg_metaslab_tree
, metaslab_compare
,
319 sizeof (metaslab_t
), offsetof(struct metaslab
, ms_group_node
));
322 mg
->mg_activation_count
= 0;
324 mg
->mg_taskq
= taskq_create("metaslab_group_taskq", metaslab_load_pct
,
325 minclsyspri
, 10, INT_MAX
, TASKQ_THREADS_CPU_PCT
);
331 metaslab_group_destroy(metaslab_group_t
*mg
)
333 ASSERT(mg
->mg_prev
== NULL
);
334 ASSERT(mg
->mg_next
== NULL
);
336 * We may have gone below zero with the activation count
337 * either because we never activated in the first place or
338 * because we're done, and possibly removing the vdev.
340 ASSERT(mg
->mg_activation_count
<= 0);
342 taskq_destroy(mg
->mg_taskq
);
343 avl_destroy(&mg
->mg_metaslab_tree
);
344 mutex_destroy(&mg
->mg_lock
);
345 kmem_free(mg
, sizeof (metaslab_group_t
));
349 metaslab_group_activate(metaslab_group_t
*mg
)
351 metaslab_class_t
*mc
= mg
->mg_class
;
352 metaslab_group_t
*mgprev
, *mgnext
;
354 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
356 ASSERT(mc
->mc_rotor
!= mg
);
357 ASSERT(mg
->mg_prev
== NULL
);
358 ASSERT(mg
->mg_next
== NULL
);
359 ASSERT(mg
->mg_activation_count
<= 0);
361 if (++mg
->mg_activation_count
<= 0)
364 mg
->mg_aliquot
= metaslab_aliquot
* MAX(1, mg
->mg_vd
->vdev_children
);
365 metaslab_group_alloc_update(mg
);
367 if ((mgprev
= mc
->mc_rotor
) == NULL
) {
371 mgnext
= mgprev
->mg_next
;
372 mg
->mg_prev
= mgprev
;
373 mg
->mg_next
= mgnext
;
374 mgprev
->mg_next
= mg
;
375 mgnext
->mg_prev
= mg
;
381 metaslab_group_passivate(metaslab_group_t
*mg
)
383 metaslab_class_t
*mc
= mg
->mg_class
;
384 metaslab_group_t
*mgprev
, *mgnext
;
386 ASSERT(spa_config_held(mc
->mc_spa
, SCL_ALLOC
, RW_WRITER
));
388 if (--mg
->mg_activation_count
!= 0) {
389 ASSERT(mc
->mc_rotor
!= mg
);
390 ASSERT(mg
->mg_prev
== NULL
);
391 ASSERT(mg
->mg_next
== NULL
);
392 ASSERT(mg
->mg_activation_count
< 0);
396 taskq_wait(mg
->mg_taskq
);
398 mgprev
= mg
->mg_prev
;
399 mgnext
= mg
->mg_next
;
404 mc
->mc_rotor
= mgnext
;
405 mgprev
->mg_next
= mgnext
;
406 mgnext
->mg_prev
= mgprev
;
414 metaslab_group_add(metaslab_group_t
*mg
, metaslab_t
*msp
)
416 mutex_enter(&mg
->mg_lock
);
417 ASSERT(msp
->ms_group
== NULL
);
420 avl_add(&mg
->mg_metaslab_tree
, msp
);
421 mutex_exit(&mg
->mg_lock
);
425 metaslab_group_remove(metaslab_group_t
*mg
, metaslab_t
*msp
)
427 mutex_enter(&mg
->mg_lock
);
428 ASSERT(msp
->ms_group
== mg
);
429 avl_remove(&mg
->mg_metaslab_tree
, msp
);
430 msp
->ms_group
= NULL
;
431 mutex_exit(&mg
->mg_lock
);
435 metaslab_group_sort(metaslab_group_t
*mg
, metaslab_t
*msp
, uint64_t weight
)
438 * Although in principle the weight can be any value, in
439 * practice we do not use values in the range [1, 510].
441 ASSERT(weight
>= SPA_MINBLOCKSIZE
-1 || weight
== 0);
442 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
444 mutex_enter(&mg
->mg_lock
);
445 ASSERT(msp
->ms_group
== mg
);
446 avl_remove(&mg
->mg_metaslab_tree
, msp
);
447 msp
->ms_weight
= weight
;
448 avl_add(&mg
->mg_metaslab_tree
, msp
);
449 mutex_exit(&mg
->mg_lock
);
453 * Determine if a given metaslab group should skip allocations. A metaslab
454 * group should avoid allocations if its used capacity has crossed the
455 * zfs_mg_noalloc_threshold and there is at least one metaslab group
456 * that can still handle allocations.
459 metaslab_group_allocatable(metaslab_group_t
*mg
)
461 vdev_t
*vd
= mg
->mg_vd
;
462 spa_t
*spa
= vd
->vdev_spa
;
463 metaslab_class_t
*mc
= mg
->mg_class
;
466 * A metaslab group is considered allocatable if its free capacity
467 * is greater than the set value of zfs_mg_noalloc_threshold, it's
468 * associated with a slog, or there are no other metaslab groups
469 * with free capacity greater than zfs_mg_noalloc_threshold.
471 return (mg
->mg_free_capacity
> zfs_mg_noalloc_threshold
||
472 mc
!= spa_normal_class(spa
) || mc
->mc_alloc_groups
== 0);
476 * ==========================================================================
477 * Range tree callbacks
478 * ==========================================================================
482 * Comparison function for the private size-ordered tree. Tree is sorted
483 * by size, larger sizes at the end of the tree.
486 metaslab_rangesize_compare(const void *x1
, const void *x2
)
488 const range_seg_t
*r1
= x1
;
489 const range_seg_t
*r2
= x2
;
490 uint64_t rs_size1
= r1
->rs_end
- r1
->rs_start
;
491 uint64_t rs_size2
= r2
->rs_end
- r2
->rs_start
;
493 if (rs_size1
< rs_size2
)
495 if (rs_size1
> rs_size2
)
498 if (r1
->rs_start
< r2
->rs_start
)
501 if (r1
->rs_start
> r2
->rs_start
)
508 * Create any block allocator specific components. The current allocators
509 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
512 metaslab_rt_create(range_tree_t
*rt
, void *arg
)
514 metaslab_t
*msp
= arg
;
516 ASSERT3P(rt
->rt_arg
, ==, msp
);
517 ASSERT(msp
->ms_tree
== NULL
);
519 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
520 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
524 * Destroy the block allocator specific components.
527 metaslab_rt_destroy(range_tree_t
*rt
, void *arg
)
529 metaslab_t
*msp
= arg
;
531 ASSERT3P(rt
->rt_arg
, ==, msp
);
532 ASSERT3P(msp
->ms_tree
, ==, rt
);
533 ASSERT0(avl_numnodes(&msp
->ms_size_tree
));
535 avl_destroy(&msp
->ms_size_tree
);
539 metaslab_rt_add(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
541 metaslab_t
*msp
= arg
;
543 ASSERT3P(rt
->rt_arg
, ==, msp
);
544 ASSERT3P(msp
->ms_tree
, ==, rt
);
545 VERIFY(!msp
->ms_condensing
);
546 avl_add(&msp
->ms_size_tree
, rs
);
550 metaslab_rt_remove(range_tree_t
*rt
, range_seg_t
*rs
, void *arg
)
552 metaslab_t
*msp
= arg
;
554 ASSERT3P(rt
->rt_arg
, ==, msp
);
555 ASSERT3P(msp
->ms_tree
, ==, rt
);
556 VERIFY(!msp
->ms_condensing
);
557 avl_remove(&msp
->ms_size_tree
, rs
);
561 metaslab_rt_vacate(range_tree_t
*rt
, void *arg
)
563 metaslab_t
*msp
= arg
;
565 ASSERT3P(rt
->rt_arg
, ==, msp
);
566 ASSERT3P(msp
->ms_tree
, ==, rt
);
569 * Normally one would walk the tree freeing nodes along the way.
570 * Since the nodes are shared with the range trees we can avoid
571 * walking all nodes and just reinitialize the avl tree. The nodes
572 * will be freed by the range tree, so we don't want to free them here.
574 avl_create(&msp
->ms_size_tree
, metaslab_rangesize_compare
,
575 sizeof (range_seg_t
), offsetof(range_seg_t
, rs_pp_node
));
578 static range_tree_ops_t metaslab_rt_ops
= {
587 * ==========================================================================
588 * Metaslab block operations
589 * ==========================================================================
593 * Return the maximum contiguous segment within the metaslab.
596 metaslab_block_maxsize(metaslab_t
*msp
)
598 avl_tree_t
*t
= &msp
->ms_size_tree
;
601 if (t
== NULL
|| (rs
= avl_last(t
)) == NULL
)
604 return (rs
->rs_end
- rs
->rs_start
);
608 metaslab_block_alloc(metaslab_t
*msp
, uint64_t size
)
611 range_tree_t
*rt
= msp
->ms_tree
;
613 VERIFY(!msp
->ms_condensing
);
615 start
= msp
->ms_ops
->msop_alloc(msp
, size
);
616 if (start
!= -1ULL) {
617 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
619 VERIFY0(P2PHASE(start
, 1ULL << vd
->vdev_ashift
));
620 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
621 VERIFY3U(range_tree_space(rt
) - size
, <=, msp
->ms_size
);
622 range_tree_remove(rt
, start
, size
);
628 * ==========================================================================
629 * Common allocator routines
630 * ==========================================================================
634 * This is a helper function that can be used by the allocator to find
635 * a suitable block to allocate. This will search the specified AVL
636 * tree looking for a block that matches the specified criteria.
639 metaslab_block_picker(avl_tree_t
*t
, uint64_t *cursor
, uint64_t size
,
642 range_seg_t
*rs
, rsearch
;
645 rsearch
.rs_start
= *cursor
;
646 rsearch
.rs_end
= *cursor
+ size
;
648 rs
= avl_find(t
, &rsearch
, &where
);
650 rs
= avl_nearest(t
, where
, AVL_AFTER
);
653 uint64_t offset
= P2ROUNDUP(rs
->rs_start
, align
);
655 if (offset
+ size
<= rs
->rs_end
) {
656 *cursor
= offset
+ size
;
659 rs
= AVL_NEXT(t
, rs
);
663 * If we know we've searched the whole map (*cursor == 0), give up.
664 * Otherwise, reset the cursor to the beginning and try again.
670 return (metaslab_block_picker(t
, cursor
, size
, align
));
674 * ==========================================================================
675 * The first-fit block allocator
676 * ==========================================================================
679 metaslab_ff_alloc(metaslab_t
*msp
, uint64_t size
)
682 * Find the largest power of 2 block size that evenly divides the
683 * requested size. This is used to try to allocate blocks with similar
684 * alignment from the same area of the metaslab (i.e. same cursor
685 * bucket) but it does not guarantee that other allocations sizes
686 * may exist in the same region.
688 uint64_t align
= size
& -size
;
689 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
690 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
692 return (metaslab_block_picker(t
, cursor
, size
, align
));
697 metaslab_ff_fragmented(metaslab_t
*msp
)
702 static metaslab_ops_t metaslab_ff_ops
= {
704 metaslab_ff_fragmented
708 * ==========================================================================
709 * Dynamic block allocator -
710 * Uses the first fit allocation scheme until space get low and then
711 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
712 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
713 * ==========================================================================
716 metaslab_df_alloc(metaslab_t
*msp
, uint64_t size
)
719 * Find the largest power of 2 block size that evenly divides the
720 * requested size. This is used to try to allocate blocks with similar
721 * alignment from the same area of the metaslab (i.e. same cursor
722 * bucket) but it does not guarantee that other allocations sizes
723 * may exist in the same region.
725 uint64_t align
= size
& -size
;
726 uint64_t *cursor
= &msp
->ms_lbas
[highbit64(align
) - 1];
727 range_tree_t
*rt
= msp
->ms_tree
;
728 avl_tree_t
*t
= &rt
->rt_root
;
729 uint64_t max_size
= metaslab_block_maxsize(msp
);
730 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
732 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
733 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
739 * If we're running low on space switch to using the size
740 * sorted AVL tree (best-fit).
742 if (max_size
< metaslab_df_alloc_threshold
||
743 free_pct
< metaslab_df_free_pct
) {
744 t
= &msp
->ms_size_tree
;
748 return (metaslab_block_picker(t
, cursor
, size
, 1ULL));
752 metaslab_df_fragmented(metaslab_t
*msp
)
754 range_tree_t
*rt
= msp
->ms_tree
;
755 uint64_t max_size
= metaslab_block_maxsize(msp
);
756 int free_pct
= range_tree_space(rt
) * 100 / msp
->ms_size
;
758 if (max_size
>= metaslab_df_alloc_threshold
&&
759 free_pct
>= metaslab_df_free_pct
)
765 static metaslab_ops_t metaslab_df_ops
= {
767 metaslab_df_fragmented
771 * ==========================================================================
772 * Cursor fit block allocator -
773 * Select the largest region in the metaslab, set the cursor to the beginning
774 * of the range and the cursor_end to the end of the range. As allocations
775 * are made advance the cursor. Continue allocating from the cursor until
776 * the range is exhausted and then find a new range.
777 * ==========================================================================
780 metaslab_cf_alloc(metaslab_t
*msp
, uint64_t size
)
782 range_tree_t
*rt
= msp
->ms_tree
;
783 avl_tree_t
*t
= &msp
->ms_size_tree
;
784 uint64_t *cursor
= &msp
->ms_lbas
[0];
785 uint64_t *cursor_end
= &msp
->ms_lbas
[1];
788 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
789 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&rt
->rt_root
));
791 ASSERT3U(*cursor_end
, >=, *cursor
);
793 if ((*cursor
+ size
) > *cursor_end
) {
796 rs
= avl_last(&msp
->ms_size_tree
);
797 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
)
800 *cursor
= rs
->rs_start
;
801 *cursor_end
= rs
->rs_end
;
811 metaslab_cf_fragmented(metaslab_t
*msp
)
813 return (metaslab_block_maxsize(msp
) < metaslab_min_alloc_size
);
816 static metaslab_ops_t metaslab_cf_ops
= {
818 metaslab_cf_fragmented
822 * ==========================================================================
823 * New dynamic fit allocator -
824 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
825 * contiguous blocks. If no region is found then just use the largest segment
827 * ==========================================================================
831 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
832 * to request from the allocator.
834 uint64_t metaslab_ndf_clump_shift
= 4;
837 metaslab_ndf_alloc(metaslab_t
*msp
, uint64_t size
)
839 avl_tree_t
*t
= &msp
->ms_tree
->rt_root
;
841 range_seg_t
*rs
, rsearch
;
842 uint64_t hbit
= highbit64(size
);
843 uint64_t *cursor
= &msp
->ms_lbas
[hbit
- 1];
844 uint64_t max_size
= metaslab_block_maxsize(msp
);
846 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
847 ASSERT3U(avl_numnodes(t
), ==, avl_numnodes(&msp
->ms_size_tree
));
852 rsearch
.rs_start
= *cursor
;
853 rsearch
.rs_end
= *cursor
+ size
;
855 rs
= avl_find(t
, &rsearch
, &where
);
856 if (rs
== NULL
|| (rs
->rs_end
- rs
->rs_start
) < size
) {
857 t
= &msp
->ms_size_tree
;
859 rsearch
.rs_start
= 0;
860 rsearch
.rs_end
= MIN(max_size
,
861 1ULL << (hbit
+ metaslab_ndf_clump_shift
));
862 rs
= avl_find(t
, &rsearch
, &where
);
864 rs
= avl_nearest(t
, where
, AVL_AFTER
);
868 if ((rs
->rs_end
- rs
->rs_start
) >= size
) {
869 *cursor
= rs
->rs_start
+ size
;
870 return (rs
->rs_start
);
876 metaslab_ndf_fragmented(metaslab_t
*msp
)
878 return (metaslab_block_maxsize(msp
) <=
879 (metaslab_min_alloc_size
<< metaslab_ndf_clump_shift
));
882 static metaslab_ops_t metaslab_ndf_ops
= {
884 metaslab_ndf_fragmented
887 metaslab_ops_t
*zfs_metaslab_ops
= &metaslab_df_ops
;
890 * ==========================================================================
892 * ==========================================================================
896 * Wait for any in-progress metaslab loads to complete.
899 metaslab_load_wait(metaslab_t
*msp
)
901 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
903 while (msp
->ms_loading
) {
904 ASSERT(!msp
->ms_loaded
);
905 cv_wait(&msp
->ms_load_cv
, &msp
->ms_lock
);
910 metaslab_load(metaslab_t
*msp
)
914 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
915 ASSERT(!msp
->ms_loaded
);
916 ASSERT(!msp
->ms_loading
);
918 msp
->ms_loading
= B_TRUE
;
921 * If the space map has not been allocated yet, then treat
922 * all the space in the metaslab as free and add it to the
925 if (msp
->ms_sm
!= NULL
)
926 error
= space_map_load(msp
->ms_sm
, msp
->ms_tree
, SM_FREE
);
928 range_tree_add(msp
->ms_tree
, msp
->ms_start
, msp
->ms_size
);
930 msp
->ms_loaded
= (error
== 0);
931 msp
->ms_loading
= B_FALSE
;
933 if (msp
->ms_loaded
) {
934 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
935 range_tree_walk(msp
->ms_defertree
[t
],
936 range_tree_remove
, msp
->ms_tree
);
939 cv_broadcast(&msp
->ms_load_cv
);
944 metaslab_unload(metaslab_t
*msp
)
946 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
947 range_tree_vacate(msp
->ms_tree
, NULL
, NULL
);
948 msp
->ms_loaded
= B_FALSE
;
949 msp
->ms_weight
&= ~METASLAB_ACTIVE_MASK
;
953 metaslab_init(metaslab_group_t
*mg
, uint64_t id
, uint64_t object
, uint64_t txg
)
955 vdev_t
*vd
= mg
->mg_vd
;
956 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
959 msp
= kmem_zalloc(sizeof (metaslab_t
), KM_SLEEP
);
960 mutex_init(&msp
->ms_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
961 cv_init(&msp
->ms_load_cv
, NULL
, CV_DEFAULT
, NULL
);
963 msp
->ms_start
= id
<< vd
->vdev_ms_shift
;
964 msp
->ms_size
= 1ULL << vd
->vdev_ms_shift
;
967 * We only open space map objects that already exist. All others
968 * will be opened when we finally allocate an object for it.
971 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, object
, msp
->ms_start
,
972 msp
->ms_size
, vd
->vdev_ashift
, &msp
->ms_lock
));
973 ASSERT(msp
->ms_sm
!= NULL
);
977 * We create the main range tree here, but we don't create the
978 * alloctree and freetree until metaslab_sync_done(). This serves
979 * two purposes: it allows metaslab_sync_done() to detect the
980 * addition of new space; and for debugging, it ensures that we'd
981 * data fault on any attempt to use this metaslab before it's ready.
983 msp
->ms_tree
= range_tree_create(&metaslab_rt_ops
, msp
, &msp
->ms_lock
);
984 metaslab_group_add(mg
, msp
);
986 msp
->ms_ops
= mg
->mg_class
->mc_ops
;
989 * If we're opening an existing pool (txg == 0) or creating
990 * a new one (txg == TXG_INITIAL), all space is available now.
991 * If we're adding space to an existing pool, the new space
992 * does not become available until after this txg has synced.
994 if (txg
<= TXG_INITIAL
)
995 metaslab_sync_done(msp
, 0);
998 * If metaslab_debug_load is set and we're initializing a metaslab
999 * that has an allocated space_map object then load the its space
1000 * map so that can verify frees.
1002 if (metaslab_debug_load
&& msp
->ms_sm
!= NULL
) {
1003 mutex_enter(&msp
->ms_lock
);
1004 VERIFY0(metaslab_load(msp
));
1005 mutex_exit(&msp
->ms_lock
);
1009 vdev_dirty(vd
, 0, NULL
, txg
);
1010 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
1017 metaslab_fini(metaslab_t
*msp
)
1019 metaslab_group_t
*mg
= msp
->ms_group
;
1021 metaslab_group_remove(mg
, msp
);
1023 mutex_enter(&msp
->ms_lock
);
1025 VERIFY(msp
->ms_group
== NULL
);
1026 vdev_space_update(mg
->mg_vd
, -space_map_allocated(msp
->ms_sm
),
1028 space_map_close(msp
->ms_sm
);
1030 metaslab_unload(msp
);
1031 range_tree_destroy(msp
->ms_tree
);
1033 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1034 range_tree_destroy(msp
->ms_alloctree
[t
]);
1035 range_tree_destroy(msp
->ms_freetree
[t
]);
1038 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1039 range_tree_destroy(msp
->ms_defertree
[t
]);
1042 ASSERT0(msp
->ms_deferspace
);
1044 mutex_exit(&msp
->ms_lock
);
1045 cv_destroy(&msp
->ms_load_cv
);
1046 mutex_destroy(&msp
->ms_lock
);
1048 kmem_free(msp
, sizeof (metaslab_t
));
1052 * Apply a weighting factor based on the histogram information for this
1053 * metaslab. The current weighting factor is somewhat arbitrary and requires
1054 * additional investigation. The implementation provides a measure of
1055 * "weighted" free space and gives a higher weighting for larger contiguous
1056 * regions. The weighting factor is determined by counting the number of
1057 * sm_shift sectors that exist in each region represented by the histogram.
1058 * That value is then multiplied by the power of 2 exponent and the sm_shift
1061 * For example, assume the 2^21 histogram bucket has 4 2MB regions and the
1062 * metaslab has an sm_shift value of 9 (512B):
1064 * 1) calculate the number of sm_shift sectors in the region:
1065 * 2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384
1066 * 2) multiply by the power of 2 exponent and the sm_shift value:
1067 * 16384 * 21 * 9 = 3096576
1068 * This value will be added to the weighting of the metaslab.
1071 metaslab_weight_factor(metaslab_t
*msp
)
1073 uint64_t factor
= 0;
1078 * A null space map means that the entire metaslab is free,
1079 * calculate a weight factor that spans the entire size of the
1082 if (msp
->ms_sm
== NULL
) {
1083 vdev_t
*vd
= msp
->ms_group
->mg_vd
;
1085 i
= highbit64(msp
->ms_size
) - 1;
1086 sectors
= msp
->ms_size
>> vd
->vdev_ashift
;
1087 return (sectors
* i
* vd
->vdev_ashift
);
1090 if (msp
->ms_sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
1093 for (i
= 0; i
< SPACE_MAP_HISTOGRAM_SIZE(msp
->ms_sm
); i
++) {
1094 if (msp
->ms_sm
->sm_phys
->smp_histogram
[i
] == 0)
1098 * Determine the number of sm_shift sectors in the region
1099 * indicated by the histogram. For example, given an
1100 * sm_shift value of 9 (512 bytes) and i = 4 then we know
1101 * that we're looking at an 8K region in the histogram
1102 * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the
1103 * number of sm_shift sectors (512 bytes in this example),
1104 * we would take 8192 / 512 = 16. Since the histogram
1105 * is offset by sm_shift we can simply use the value of
1106 * of i to calculate this (i.e. 2^i = 16 where i = 4).
1108 sectors
= msp
->ms_sm
->sm_phys
->smp_histogram
[i
] << i
;
1109 factor
+= (i
+ msp
->ms_sm
->sm_shift
) * sectors
;
1111 return (factor
* msp
->ms_sm
->sm_shift
);
1115 metaslab_weight(metaslab_t
*msp
)
1117 metaslab_group_t
*mg
= msp
->ms_group
;
1118 vdev_t
*vd
= mg
->mg_vd
;
1119 uint64_t weight
, space
;
1121 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1124 * This vdev is in the process of being removed so there is nothing
1125 * for us to do here.
1127 if (vd
->vdev_removing
) {
1128 ASSERT0(space_map_allocated(msp
->ms_sm
));
1129 ASSERT0(vd
->vdev_ms_shift
);
1134 * The baseline weight is the metaslab's free space.
1136 space
= msp
->ms_size
- space_map_allocated(msp
->ms_sm
);
1140 * Modern disks have uniform bit density and constant angular velocity.
1141 * Therefore, the outer recording zones are faster (higher bandwidth)
1142 * than the inner zones by the ratio of outer to inner track diameter,
1143 * which is typically around 2:1. We account for this by assigning
1144 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1145 * In effect, this means that we'll select the metaslab with the most
1146 * free bandwidth rather than simply the one with the most free space.
1148 weight
= 2 * weight
- (msp
->ms_id
* weight
) / vd
->vdev_ms_count
;
1149 ASSERT(weight
>= space
&& weight
<= 2 * space
);
1151 msp
->ms_factor
= metaslab_weight_factor(msp
);
1152 if (metaslab_weight_factor_enable
)
1153 weight
+= msp
->ms_factor
;
1155 if (msp
->ms_loaded
&& !msp
->ms_ops
->msop_fragmented(msp
)) {
1157 * If this metaslab is one we're actively using, adjust its
1158 * weight to make it preferable to any inactive metaslab so
1159 * we'll polish it off.
1161 weight
|= (msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1168 metaslab_activate(metaslab_t
*msp
, uint64_t activation_weight
)
1170 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1172 if ((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0) {
1173 metaslab_load_wait(msp
);
1174 if (!msp
->ms_loaded
) {
1175 int error
= metaslab_load(msp
);
1177 metaslab_group_sort(msp
->ms_group
, msp
, 0);
1182 metaslab_group_sort(msp
->ms_group
, msp
,
1183 msp
->ms_weight
| activation_weight
);
1185 ASSERT(msp
->ms_loaded
);
1186 ASSERT(msp
->ms_weight
& METASLAB_ACTIVE_MASK
);
1192 metaslab_passivate(metaslab_t
*msp
, uint64_t size
)
1195 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1196 * this metaslab again. In that case, it had better be empty,
1197 * or we would be leaving space on the table.
1199 ASSERT(size
>= SPA_MINBLOCKSIZE
|| range_tree_space(msp
->ms_tree
) == 0);
1200 metaslab_group_sort(msp
->ms_group
, msp
, MIN(msp
->ms_weight
, size
));
1201 ASSERT((msp
->ms_weight
& METASLAB_ACTIVE_MASK
) == 0);
1205 metaslab_preload(void *arg
)
1207 metaslab_t
*msp
= arg
;
1208 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1210 ASSERT(!MUTEX_HELD(&msp
->ms_group
->mg_lock
));
1212 mutex_enter(&msp
->ms_lock
);
1213 metaslab_load_wait(msp
);
1214 if (!msp
->ms_loaded
)
1215 (void) metaslab_load(msp
);
1218 * Set the ms_access_txg value so that we don't unload it right away.
1220 msp
->ms_access_txg
= spa_syncing_txg(spa
) + metaslab_unload_delay
+ 1;
1221 mutex_exit(&msp
->ms_lock
);
1225 metaslab_group_preload(metaslab_group_t
*mg
)
1227 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1229 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1232 if (spa_shutting_down(spa
) || !metaslab_preload_enabled
) {
1233 taskq_wait(mg
->mg_taskq
);
1237 mutex_enter(&mg
->mg_lock
);
1239 * Load the next potential metaslabs
1242 while (msp
!= NULL
) {
1243 metaslab_t
*msp_next
= AVL_NEXT(t
, msp
);
1245 /* If we have reached our preload limit then we're done */
1246 if (++m
> metaslab_preload_limit
)
1250 * We must drop the metaslab group lock here to preserve
1251 * lock ordering with the ms_lock (when grabbing both
1252 * the mg_lock and the ms_lock, the ms_lock must be taken
1253 * first). As a result, it is possible that the ordering
1254 * of the metaslabs within the avl tree may change before
1255 * we reacquire the lock. The metaslab cannot be removed from
1256 * the tree while we're in syncing context so it is safe to
1257 * drop the mg_lock here. If the metaslabs are reordered
1258 * nothing will break -- we just may end up loading a
1259 * less than optimal one.
1261 mutex_exit(&mg
->mg_lock
);
1262 VERIFY(taskq_dispatch(mg
->mg_taskq
, metaslab_preload
,
1263 msp
, TQ_SLEEP
) != NULL
);
1264 mutex_enter(&mg
->mg_lock
);
1267 mutex_exit(&mg
->mg_lock
);
1271 * Determine if the space map's on-disk footprint is past our tolerance
1272 * for inefficiency. We would like to use the following criteria to make
1275 * 1. The size of the space map object should not dramatically increase as a
1276 * result of writing out the free space range tree.
1278 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1279 * times the size than the free space range tree representation
1280 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1282 * Checking the first condition is tricky since we don't want to walk
1283 * the entire AVL tree calculating the estimated on-disk size. Instead we
1284 * use the size-ordered range tree in the metaslab and calculate the
1285 * size required to write out the largest segment in our free tree. If the
1286 * size required to represent that segment on disk is larger than the space
1287 * map object then we avoid condensing this map.
1289 * To determine the second criterion we use a best-case estimate and assume
1290 * each segment can be represented on-disk as a single 64-bit entry. We refer
1291 * to this best-case estimate as the space map's minimal form.
1294 metaslab_should_condense(metaslab_t
*msp
)
1296 space_map_t
*sm
= msp
->ms_sm
;
1298 uint64_t size
, entries
, segsz
;
1300 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1301 ASSERT(msp
->ms_loaded
);
1304 * Use the ms_size_tree range tree, which is ordered by size, to
1305 * obtain the largest segment in the free tree. If the tree is empty
1306 * then we should condense the map.
1308 rs
= avl_last(&msp
->ms_size_tree
);
1313 * Calculate the number of 64-bit entries this segment would
1314 * require when written to disk. If this single segment would be
1315 * larger on-disk than the entire current on-disk structure, then
1316 * clearly condensing will increase the on-disk structure size.
1318 size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
1319 entries
= size
/ (MIN(size
, SM_RUN_MAX
));
1320 segsz
= entries
* sizeof (uint64_t);
1322 return (segsz
<= space_map_length(msp
->ms_sm
) &&
1323 space_map_length(msp
->ms_sm
) >= (zfs_condense_pct
*
1324 sizeof (uint64_t) * avl_numnodes(&msp
->ms_tree
->rt_root
)) / 100);
1328 * Condense the on-disk space map representation to its minimized form.
1329 * The minimized form consists of a small number of allocations followed by
1330 * the entries of the free range tree.
1333 metaslab_condense(metaslab_t
*msp
, uint64_t txg
, dmu_tx_t
*tx
)
1335 spa_t
*spa
= msp
->ms_group
->mg_vd
->vdev_spa
;
1336 range_tree_t
*freetree
= msp
->ms_freetree
[txg
& TXG_MASK
];
1337 range_tree_t
*condense_tree
;
1338 space_map_t
*sm
= msp
->ms_sm
;
1340 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
1341 ASSERT3U(spa_sync_pass(spa
), ==, 1);
1342 ASSERT(msp
->ms_loaded
);
1344 spa_dbgmsg(spa
, "condensing: txg %llu, msp[%llu] %p, "
1345 "smp size %llu, segments %lu", txg
, msp
->ms_id
, msp
,
1346 space_map_length(msp
->ms_sm
), avl_numnodes(&msp
->ms_tree
->rt_root
));
1349 * Create an range tree that is 100% allocated. We remove segments
1350 * that have been freed in this txg, any deferred frees that exist,
1351 * and any allocation in the future. Removing segments should be
1352 * a relatively inexpensive operation since we expect these trees to
1353 * have a small number of nodes.
1355 condense_tree
= range_tree_create(NULL
, NULL
, &msp
->ms_lock
);
1356 range_tree_add(condense_tree
, msp
->ms_start
, msp
->ms_size
);
1359 * Remove what's been freed in this txg from the condense_tree.
1360 * Since we're in sync_pass 1, we know that all the frees from
1361 * this txg are in the freetree.
1363 range_tree_walk(freetree
, range_tree_remove
, condense_tree
);
1365 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1366 range_tree_walk(msp
->ms_defertree
[t
],
1367 range_tree_remove
, condense_tree
);
1370 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1371 range_tree_walk(msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
],
1372 range_tree_remove
, condense_tree
);
1376 * We're about to drop the metaslab's lock thus allowing
1377 * other consumers to change it's content. Set the
1378 * metaslab's ms_condensing flag to ensure that
1379 * allocations on this metaslab do not occur while we're
1380 * in the middle of committing it to disk. This is only critical
1381 * for the ms_tree as all other range trees use per txg
1382 * views of their content.
1384 msp
->ms_condensing
= B_TRUE
;
1386 mutex_exit(&msp
->ms_lock
);
1387 space_map_truncate(sm
, tx
);
1388 mutex_enter(&msp
->ms_lock
);
1391 * While we would ideally like to create a space_map representation
1392 * that consists only of allocation records, doing so can be
1393 * prohibitively expensive because the in-core free tree can be
1394 * large, and therefore computationally expensive to subtract
1395 * from the condense_tree. Instead we sync out two trees, a cheap
1396 * allocation only tree followed by the in-core free tree. While not
1397 * optimal, this is typically close to optimal, and much cheaper to
1400 space_map_write(sm
, condense_tree
, SM_ALLOC
, tx
);
1401 range_tree_vacate(condense_tree
, NULL
, NULL
);
1402 range_tree_destroy(condense_tree
);
1404 space_map_write(sm
, msp
->ms_tree
, SM_FREE
, tx
);
1405 msp
->ms_condensing
= B_FALSE
;
1409 * Write a metaslab to disk in the context of the specified transaction group.
1412 metaslab_sync(metaslab_t
*msp
, uint64_t txg
)
1414 metaslab_group_t
*mg
= msp
->ms_group
;
1415 vdev_t
*vd
= mg
->mg_vd
;
1416 spa_t
*spa
= vd
->vdev_spa
;
1417 objset_t
*mos
= spa_meta_objset(spa
);
1418 range_tree_t
*alloctree
= msp
->ms_alloctree
[txg
& TXG_MASK
];
1419 range_tree_t
**freetree
= &msp
->ms_freetree
[txg
& TXG_MASK
];
1420 range_tree_t
**freed_tree
=
1421 &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1423 uint64_t object
= space_map_object(msp
->ms_sm
);
1425 ASSERT(!vd
->vdev_ishole
);
1428 * This metaslab has just been added so there's no work to do now.
1430 if (*freetree
== NULL
) {
1431 ASSERT3P(alloctree
, ==, NULL
);
1435 ASSERT3P(alloctree
, !=, NULL
);
1436 ASSERT3P(*freetree
, !=, NULL
);
1437 ASSERT3P(*freed_tree
, !=, NULL
);
1439 if (range_tree_space(alloctree
) == 0 &&
1440 range_tree_space(*freetree
) == 0)
1444 * The only state that can actually be changing concurrently with
1445 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1446 * be modifying this txg's alloctree, freetree, freed_tree, or
1447 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1448 * space_map ASSERTs. We drop it whenever we call into the DMU,
1449 * because the DMU can call down to us (e.g. via zio_free()) at
1453 tx
= dmu_tx_create_assigned(spa_get_dsl(spa
), txg
);
1455 if (msp
->ms_sm
== NULL
) {
1456 uint64_t new_object
;
1458 new_object
= space_map_alloc(mos
, tx
);
1459 VERIFY3U(new_object
, !=, 0);
1461 VERIFY0(space_map_open(&msp
->ms_sm
, mos
, new_object
,
1462 msp
->ms_start
, msp
->ms_size
, vd
->vdev_ashift
,
1464 ASSERT(msp
->ms_sm
!= NULL
);
1467 mutex_enter(&msp
->ms_lock
);
1469 if (msp
->ms_loaded
&& spa_sync_pass(spa
) == 1 &&
1470 metaslab_should_condense(msp
)) {
1471 metaslab_condense(msp
, txg
, tx
);
1473 space_map_write(msp
->ms_sm
, alloctree
, SM_ALLOC
, tx
);
1474 space_map_write(msp
->ms_sm
, *freetree
, SM_FREE
, tx
);
1477 range_tree_vacate(alloctree
, NULL
, NULL
);
1479 if (msp
->ms_loaded
) {
1481 * When the space map is loaded, we have an accruate
1482 * histogram in the range tree. This gives us an opportunity
1483 * to bring the space map's histogram up-to-date so we clear
1484 * it first before updating it.
1486 space_map_histogram_clear(msp
->ms_sm
);
1487 space_map_histogram_add(msp
->ms_sm
, msp
->ms_tree
, tx
);
1490 * Since the space map is not loaded we simply update the
1491 * exisiting histogram with what was freed in this txg. This
1492 * means that the on-disk histogram may not have an accurate
1493 * view of the free space but it's close enough to allow
1494 * us to make allocation decisions.
1496 space_map_histogram_add(msp
->ms_sm
, *freetree
, tx
);
1500 * For sync pass 1, we avoid traversing this txg's free range tree
1501 * and instead will just swap the pointers for freetree and
1502 * freed_tree. We can safely do this since the freed_tree is
1503 * guaranteed to be empty on the initial pass.
1505 if (spa_sync_pass(spa
) == 1) {
1506 range_tree_swap(freetree
, freed_tree
);
1508 range_tree_vacate(*freetree
, range_tree_add
, *freed_tree
);
1511 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1512 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
1514 mutex_exit(&msp
->ms_lock
);
1516 if (object
!= space_map_object(msp
->ms_sm
)) {
1517 object
= space_map_object(msp
->ms_sm
);
1518 dmu_write(mos
, vd
->vdev_ms_array
, sizeof (uint64_t) *
1519 msp
->ms_id
, sizeof (uint64_t), &object
, tx
);
1525 * Called after a transaction group has completely synced to mark
1526 * all of the metaslab's free space as usable.
1529 metaslab_sync_done(metaslab_t
*msp
, uint64_t txg
)
1531 metaslab_group_t
*mg
= msp
->ms_group
;
1532 vdev_t
*vd
= mg
->mg_vd
;
1533 range_tree_t
**freed_tree
;
1534 range_tree_t
**defer_tree
;
1535 int64_t alloc_delta
, defer_delta
;
1537 ASSERT(!vd
->vdev_ishole
);
1539 mutex_enter(&msp
->ms_lock
);
1542 * If this metaslab is just becoming available, initialize its
1543 * alloctrees, freetrees, and defertree and add its capacity to
1546 if (msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
] == NULL
) {
1547 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1548 ASSERT(msp
->ms_alloctree
[t
] == NULL
);
1549 ASSERT(msp
->ms_freetree
[t
] == NULL
);
1551 msp
->ms_alloctree
[t
] = range_tree_create(NULL
, msp
,
1553 msp
->ms_freetree
[t
] = range_tree_create(NULL
, msp
,
1557 for (int t
= 0; t
< TXG_DEFER_SIZE
; t
++) {
1558 ASSERT(msp
->ms_defertree
[t
] == NULL
);
1560 msp
->ms_defertree
[t
] = range_tree_create(NULL
, msp
,
1564 vdev_space_update(vd
, 0, 0, msp
->ms_size
);
1567 freed_tree
= &msp
->ms_freetree
[TXG_CLEAN(txg
) & TXG_MASK
];
1568 defer_tree
= &msp
->ms_defertree
[txg
% TXG_DEFER_SIZE
];
1570 alloc_delta
= space_map_alloc_delta(msp
->ms_sm
);
1571 defer_delta
= range_tree_space(*freed_tree
) -
1572 range_tree_space(*defer_tree
);
1574 vdev_space_update(vd
, alloc_delta
+ defer_delta
, defer_delta
, 0);
1576 ASSERT0(range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]));
1577 ASSERT0(range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]));
1580 * If there's a metaslab_load() in progress, wait for it to complete
1581 * so that we have a consistent view of the in-core space map.
1583 metaslab_load_wait(msp
);
1586 * Move the frees from the defer_tree back to the free
1587 * range tree (if it's loaded). Swap the freed_tree and the
1588 * defer_tree -- this is safe to do because we've just emptied out
1591 range_tree_vacate(*defer_tree
,
1592 msp
->ms_loaded
? range_tree_add
: NULL
, msp
->ms_tree
);
1593 range_tree_swap(freed_tree
, defer_tree
);
1595 space_map_update(msp
->ms_sm
);
1597 msp
->ms_deferspace
+= defer_delta
;
1598 ASSERT3S(msp
->ms_deferspace
, >=, 0);
1599 ASSERT3S(msp
->ms_deferspace
, <=, msp
->ms_size
);
1600 if (msp
->ms_deferspace
!= 0) {
1602 * Keep syncing this metaslab until all deferred frees
1603 * are back in circulation.
1605 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
+ 1);
1608 if (msp
->ms_loaded
&& msp
->ms_access_txg
< txg
) {
1609 for (int t
= 1; t
< TXG_CONCURRENT_STATES
; t
++) {
1610 VERIFY0(range_tree_space(
1611 msp
->ms_alloctree
[(txg
+ t
) & TXG_MASK
]));
1614 if (!metaslab_debug_unload
)
1615 metaslab_unload(msp
);
1618 metaslab_group_sort(mg
, msp
, metaslab_weight(msp
));
1619 mutex_exit(&msp
->ms_lock
);
1624 metaslab_sync_reassess(metaslab_group_t
*mg
)
1626 metaslab_group_alloc_update(mg
);
1629 * Preload the next potential metaslabs
1631 metaslab_group_preload(mg
);
1635 metaslab_distance(metaslab_t
*msp
, dva_t
*dva
)
1637 uint64_t ms_shift
= msp
->ms_group
->mg_vd
->vdev_ms_shift
;
1638 uint64_t offset
= DVA_GET_OFFSET(dva
) >> ms_shift
;
1639 uint64_t start
= msp
->ms_id
;
1641 if (msp
->ms_group
->mg_vd
->vdev_id
!= DVA_GET_VDEV(dva
))
1642 return (1ULL << 63);
1645 return ((start
- offset
) << ms_shift
);
1647 return ((offset
- start
) << ms_shift
);
1652 metaslab_group_alloc(metaslab_group_t
*mg
, uint64_t psize
, uint64_t asize
,
1653 uint64_t txg
, uint64_t min_distance
, dva_t
*dva
, int d
)
1655 spa_t
*spa
= mg
->mg_vd
->vdev_spa
;
1656 metaslab_t
*msp
= NULL
;
1657 uint64_t offset
= -1ULL;
1658 avl_tree_t
*t
= &mg
->mg_metaslab_tree
;
1659 uint64_t activation_weight
;
1660 uint64_t target_distance
;
1663 activation_weight
= METASLAB_WEIGHT_PRIMARY
;
1664 for (i
= 0; i
< d
; i
++) {
1665 if (DVA_GET_VDEV(&dva
[i
]) == mg
->mg_vd
->vdev_id
) {
1666 activation_weight
= METASLAB_WEIGHT_SECONDARY
;
1672 boolean_t was_active
;
1674 mutex_enter(&mg
->mg_lock
);
1675 for (msp
= avl_first(t
); msp
; msp
= AVL_NEXT(t
, msp
)) {
1676 if (msp
->ms_weight
< asize
) {
1677 spa_dbgmsg(spa
, "%s: failed to meet weight "
1678 "requirement: vdev %llu, txg %llu, mg %p, "
1679 "msp %p, psize %llu, asize %llu, "
1680 "weight %llu", spa_name(spa
),
1681 mg
->mg_vd
->vdev_id
, txg
,
1682 mg
, msp
, psize
, asize
, msp
->ms_weight
);
1683 mutex_exit(&mg
->mg_lock
);
1688 * If the selected metaslab is condensing, skip it.
1690 if (msp
->ms_condensing
)
1693 was_active
= msp
->ms_weight
& METASLAB_ACTIVE_MASK
;
1694 if (activation_weight
== METASLAB_WEIGHT_PRIMARY
)
1697 target_distance
= min_distance
+
1698 (space_map_allocated(msp
->ms_sm
) != 0 ? 0 :
1701 for (i
= 0; i
< d
; i
++)
1702 if (metaslab_distance(msp
, &dva
[i
]) <
1708 mutex_exit(&mg
->mg_lock
);
1712 mutex_enter(&msp
->ms_lock
);
1715 * Ensure that the metaslab we have selected is still
1716 * capable of handling our request. It's possible that
1717 * another thread may have changed the weight while we
1718 * were blocked on the metaslab lock.
1720 if (msp
->ms_weight
< asize
|| (was_active
&&
1721 !(msp
->ms_weight
& METASLAB_ACTIVE_MASK
) &&
1722 activation_weight
== METASLAB_WEIGHT_PRIMARY
)) {
1723 mutex_exit(&msp
->ms_lock
);
1727 if ((msp
->ms_weight
& METASLAB_WEIGHT_SECONDARY
) &&
1728 activation_weight
== METASLAB_WEIGHT_PRIMARY
) {
1729 metaslab_passivate(msp
,
1730 msp
->ms_weight
& ~METASLAB_ACTIVE_MASK
);
1731 mutex_exit(&msp
->ms_lock
);
1735 if (metaslab_activate(msp
, activation_weight
) != 0) {
1736 mutex_exit(&msp
->ms_lock
);
1741 * If this metaslab is currently condensing then pick again as
1742 * we can't manipulate this metaslab until it's committed
1745 if (msp
->ms_condensing
) {
1746 mutex_exit(&msp
->ms_lock
);
1750 if ((offset
= metaslab_block_alloc(msp
, asize
)) != -1ULL)
1753 metaslab_passivate(msp
, metaslab_block_maxsize(msp
));
1754 mutex_exit(&msp
->ms_lock
);
1757 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
1758 vdev_dirty(mg
->mg_vd
, VDD_METASLAB
, msp
, txg
);
1760 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, asize
);
1761 msp
->ms_access_txg
= txg
+ metaslab_unload_delay
;
1763 mutex_exit(&msp
->ms_lock
);
1769 * Allocate a block for the specified i/o.
1772 metaslab_alloc_dva(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
,
1773 dva_t
*dva
, int d
, dva_t
*hintdva
, uint64_t txg
, int flags
)
1775 metaslab_group_t
*mg
, *rotor
;
1779 int zio_lock
= B_FALSE
;
1780 boolean_t allocatable
;
1781 uint64_t offset
= -1ULL;
1785 ASSERT(!DVA_IS_VALID(&dva
[d
]));
1788 * For testing, make some blocks above a certain size be gang blocks.
1790 if (psize
>= metaslab_gang_bang
&& (ddi_get_lbolt() & 3) == 0)
1791 return (SET_ERROR(ENOSPC
));
1794 * Start at the rotor and loop through all mgs until we find something.
1795 * Note that there's no locking on mc_rotor or mc_aliquot because
1796 * nothing actually breaks if we miss a few updates -- we just won't
1797 * allocate quite as evenly. It all balances out over time.
1799 * If we are doing ditto or log blocks, try to spread them across
1800 * consecutive vdevs. If we're forced to reuse a vdev before we've
1801 * allocated all of our ditto blocks, then try and spread them out on
1802 * that vdev as much as possible. If it turns out to not be possible,
1803 * gradually lower our standards until anything becomes acceptable.
1804 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1805 * gives us hope of containing our fault domains to something we're
1806 * able to reason about. Otherwise, any two top-level vdev failures
1807 * will guarantee the loss of data. With consecutive allocation,
1808 * only two adjacent top-level vdev failures will result in data loss.
1810 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1811 * ourselves on the same vdev as our gang block header. That
1812 * way, we can hope for locality in vdev_cache, plus it makes our
1813 * fault domains something tractable.
1816 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&hintdva
[d
]));
1819 * It's possible the vdev we're using as the hint no
1820 * longer exists (i.e. removed). Consult the rotor when
1826 if (flags
& METASLAB_HINTBP_AVOID
&&
1827 mg
->mg_next
!= NULL
)
1832 } else if (d
!= 0) {
1833 vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[d
- 1]));
1834 mg
= vd
->vdev_mg
->mg_next
;
1840 * If the hint put us into the wrong metaslab class, or into a
1841 * metaslab group that has been passivated, just follow the rotor.
1843 if (mg
->mg_class
!= mc
|| mg
->mg_activation_count
<= 0)
1850 ASSERT(mg
->mg_activation_count
== 1);
1855 * Don't allocate from faulted devices.
1858 spa_config_enter(spa
, SCL_ZIO
, FTAG
, RW_READER
);
1859 allocatable
= vdev_allocatable(vd
);
1860 spa_config_exit(spa
, SCL_ZIO
, FTAG
);
1862 allocatable
= vdev_allocatable(vd
);
1866 * Determine if the selected metaslab group is eligible
1867 * for allocations. If we're ganging or have requested
1868 * an allocation for the smallest gang block size
1869 * then we don't want to avoid allocating to the this
1870 * metaslab group. If we're in this condition we should
1871 * try to allocate from any device possible so that we
1872 * don't inadvertently return ENOSPC and suspend the pool
1873 * even though space is still available.
1875 if (allocatable
&& CAN_FASTGANG(flags
) &&
1876 psize
> SPA_GANGBLOCKSIZE
)
1877 allocatable
= metaslab_group_allocatable(mg
);
1883 * Avoid writing single-copy data to a failing vdev
1884 * unless the user instructs us that it is okay.
1886 if ((vd
->vdev_stat
.vs_write_errors
> 0 ||
1887 vd
->vdev_state
< VDEV_STATE_HEALTHY
) &&
1888 d
== 0 && dshift
== 3 &&
1889 !(zfs_write_to_degraded
&& vd
->vdev_state
==
1890 VDEV_STATE_DEGRADED
)) {
1895 ASSERT(mg
->mg_class
== mc
);
1897 distance
= vd
->vdev_asize
>> dshift
;
1898 if (distance
<= (1ULL << vd
->vdev_ms_shift
))
1903 asize
= vdev_psize_to_asize(vd
, psize
);
1904 ASSERT(P2PHASE(asize
, 1ULL << vd
->vdev_ashift
) == 0);
1906 offset
= metaslab_group_alloc(mg
, psize
, asize
, txg
, distance
,
1908 if (offset
!= -1ULL) {
1910 * If we've just selected this metaslab group,
1911 * figure out whether the corresponding vdev is
1912 * over- or under-used relative to the pool,
1913 * and set an allocation bias to even it out.
1915 if (mc
->mc_aliquot
== 0) {
1916 vdev_stat_t
*vs
= &vd
->vdev_stat
;
1919 vu
= (vs
->vs_alloc
* 100) / (vs
->vs_space
+ 1);
1920 cu
= (mc
->mc_alloc
* 100) / (mc
->mc_space
+ 1);
1923 * Calculate how much more or less we should
1924 * try to allocate from this device during
1925 * this iteration around the rotor.
1926 * For example, if a device is 80% full
1927 * and the pool is 20% full then we should
1928 * reduce allocations by 60% on this device.
1930 * mg_bias = (20 - 80) * 512K / 100 = -307K
1932 * This reduces allocations by 307K for this
1935 mg
->mg_bias
= ((cu
- vu
) *
1936 (int64_t)mg
->mg_aliquot
) / 100;
1939 if (atomic_add_64_nv(&mc
->mc_aliquot
, asize
) >=
1940 mg
->mg_aliquot
+ mg
->mg_bias
) {
1941 mc
->mc_rotor
= mg
->mg_next
;
1945 DVA_SET_VDEV(&dva
[d
], vd
->vdev_id
);
1946 DVA_SET_OFFSET(&dva
[d
], offset
);
1947 DVA_SET_GANG(&dva
[d
], !!(flags
& METASLAB_GANG_HEADER
));
1948 DVA_SET_ASIZE(&dva
[d
], asize
);
1953 mc
->mc_rotor
= mg
->mg_next
;
1955 } while ((mg
= mg
->mg_next
) != rotor
);
1959 ASSERT(dshift
< 64);
1963 if (!allocatable
&& !zio_lock
) {
1969 bzero(&dva
[d
], sizeof (dva_t
));
1971 return (SET_ERROR(ENOSPC
));
1975 * Free the block represented by DVA in the context of the specified
1976 * transaction group.
1979 metaslab_free_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
, boolean_t now
)
1981 uint64_t vdev
= DVA_GET_VDEV(dva
);
1982 uint64_t offset
= DVA_GET_OFFSET(dva
);
1983 uint64_t size
= DVA_GET_ASIZE(dva
);
1987 ASSERT(DVA_IS_VALID(dva
));
1989 if (txg
> spa_freeze_txg(spa
))
1992 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
1993 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
) {
1994 cmn_err(CE_WARN
, "metaslab_free_dva(): bad DVA %llu:%llu",
1995 (u_longlong_t
)vdev
, (u_longlong_t
)offset
);
2000 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2002 if (DVA_GET_GANG(dva
))
2003 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2005 mutex_enter(&msp
->ms_lock
);
2008 range_tree_remove(msp
->ms_alloctree
[txg
& TXG_MASK
],
2011 VERIFY(!msp
->ms_condensing
);
2012 VERIFY3U(offset
, >=, msp
->ms_start
);
2013 VERIFY3U(offset
+ size
, <=, msp
->ms_start
+ msp
->ms_size
);
2014 VERIFY3U(range_tree_space(msp
->ms_tree
) + size
, <=,
2016 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2017 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2018 range_tree_add(msp
->ms_tree
, offset
, size
);
2020 if (range_tree_space(msp
->ms_freetree
[txg
& TXG_MASK
]) == 0)
2021 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2022 range_tree_add(msp
->ms_freetree
[txg
& TXG_MASK
],
2026 mutex_exit(&msp
->ms_lock
);
2030 * Intent log support: upon opening the pool after a crash, notify the SPA
2031 * of blocks that the intent log has allocated for immediate write, but
2032 * which are still considered free by the SPA because the last transaction
2033 * group didn't commit yet.
2036 metaslab_claim_dva(spa_t
*spa
, const dva_t
*dva
, uint64_t txg
)
2038 uint64_t vdev
= DVA_GET_VDEV(dva
);
2039 uint64_t offset
= DVA_GET_OFFSET(dva
);
2040 uint64_t size
= DVA_GET_ASIZE(dva
);
2045 ASSERT(DVA_IS_VALID(dva
));
2047 if ((vd
= vdev_lookup_top(spa
, vdev
)) == NULL
||
2048 (offset
>> vd
->vdev_ms_shift
) >= vd
->vdev_ms_count
)
2049 return (SET_ERROR(ENXIO
));
2051 msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2053 if (DVA_GET_GANG(dva
))
2054 size
= vdev_psize_to_asize(vd
, SPA_GANGBLOCKSIZE
);
2056 mutex_enter(&msp
->ms_lock
);
2058 if ((txg
!= 0 && spa_writeable(spa
)) || !msp
->ms_loaded
)
2059 error
= metaslab_activate(msp
, METASLAB_WEIGHT_SECONDARY
);
2061 if (error
== 0 && !range_tree_contains(msp
->ms_tree
, offset
, size
))
2062 error
= SET_ERROR(ENOENT
);
2064 if (error
|| txg
== 0) { /* txg == 0 indicates dry run */
2065 mutex_exit(&msp
->ms_lock
);
2069 VERIFY(!msp
->ms_condensing
);
2070 VERIFY0(P2PHASE(offset
, 1ULL << vd
->vdev_ashift
));
2071 VERIFY0(P2PHASE(size
, 1ULL << vd
->vdev_ashift
));
2072 VERIFY3U(range_tree_space(msp
->ms_tree
) - size
, <=, msp
->ms_size
);
2073 range_tree_remove(msp
->ms_tree
, offset
, size
);
2075 if (spa_writeable(spa
)) { /* don't dirty if we're zdb(1M) */
2076 if (range_tree_space(msp
->ms_alloctree
[txg
& TXG_MASK
]) == 0)
2077 vdev_dirty(vd
, VDD_METASLAB
, msp
, txg
);
2078 range_tree_add(msp
->ms_alloctree
[txg
& TXG_MASK
], offset
, size
);
2081 mutex_exit(&msp
->ms_lock
);
2087 metaslab_alloc(spa_t
*spa
, metaslab_class_t
*mc
, uint64_t psize
, blkptr_t
*bp
,
2088 int ndvas
, uint64_t txg
, blkptr_t
*hintbp
, int flags
)
2090 dva_t
*dva
= bp
->blk_dva
;
2091 dva_t
*hintdva
= hintbp
->blk_dva
;
2094 ASSERT(bp
->blk_birth
== 0);
2095 ASSERT(BP_PHYSICAL_BIRTH(bp
) == 0);
2097 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2099 if (mc
->mc_rotor
== NULL
) { /* no vdevs in this class */
2100 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2101 return (SET_ERROR(ENOSPC
));
2104 ASSERT(ndvas
> 0 && ndvas
<= spa_max_replication(spa
));
2105 ASSERT(BP_GET_NDVAS(bp
) == 0);
2106 ASSERT(hintbp
== NULL
|| ndvas
<= BP_GET_NDVAS(hintbp
));
2108 for (int d
= 0; d
< ndvas
; d
++) {
2109 error
= metaslab_alloc_dva(spa
, mc
, psize
, dva
, d
, hintdva
,
2112 for (d
--; d
>= 0; d
--) {
2113 metaslab_free_dva(spa
, &dva
[d
], txg
, B_TRUE
);
2114 bzero(&dva
[d
], sizeof (dva_t
));
2116 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2121 ASSERT(BP_GET_NDVAS(bp
) == ndvas
);
2123 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2125 BP_SET_BIRTH(bp
, txg
, txg
);
2131 metaslab_free(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
, boolean_t now
)
2133 const dva_t
*dva
= bp
->blk_dva
;
2134 int ndvas
= BP_GET_NDVAS(bp
);
2136 ASSERT(!BP_IS_HOLE(bp
));
2137 ASSERT(!now
|| bp
->blk_birth
>= spa_syncing_txg(spa
));
2139 spa_config_enter(spa
, SCL_FREE
, FTAG
, RW_READER
);
2141 for (int d
= 0; d
< ndvas
; d
++)
2142 metaslab_free_dva(spa
, &dva
[d
], txg
, now
);
2144 spa_config_exit(spa
, SCL_FREE
, FTAG
);
2148 metaslab_claim(spa_t
*spa
, const blkptr_t
*bp
, uint64_t txg
)
2150 const dva_t
*dva
= bp
->blk_dva
;
2151 int ndvas
= BP_GET_NDVAS(bp
);
2154 ASSERT(!BP_IS_HOLE(bp
));
2158 * First do a dry run to make sure all DVAs are claimable,
2159 * so we don't have to unwind from partial failures below.
2161 if ((error
= metaslab_claim(spa
, bp
, 0)) != 0)
2165 spa_config_enter(spa
, SCL_ALLOC
, FTAG
, RW_READER
);
2167 for (int d
= 0; d
< ndvas
; d
++)
2168 if ((error
= metaslab_claim_dva(spa
, &dva
[d
], txg
)) != 0)
2171 spa_config_exit(spa
, SCL_ALLOC
, FTAG
);
2173 ASSERT(error
== 0 || txg
== 0);
2179 metaslab_check_free(spa_t
*spa
, const blkptr_t
*bp
)
2181 if ((zfs_flags
& ZFS_DEBUG_ZIO_FREE
) == 0)
2184 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2185 for (int i
= 0; i
< BP_GET_NDVAS(bp
); i
++) {
2186 uint64_t vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
2187 vdev_t
*vd
= vdev_lookup_top(spa
, vdev
);
2188 uint64_t offset
= DVA_GET_OFFSET(&bp
->blk_dva
[i
]);
2189 uint64_t size
= DVA_GET_ASIZE(&bp
->blk_dva
[i
]);
2190 metaslab_t
*msp
= vd
->vdev_ms
[offset
>> vd
->vdev_ms_shift
];
2193 range_tree_verify(msp
->ms_tree
, offset
, size
);
2195 for (int j
= 0; j
< TXG_SIZE
; j
++)
2196 range_tree_verify(msp
->ms_freetree
[j
], offset
, size
);
2197 for (int j
= 0; j
< TXG_DEFER_SIZE
; j
++)
2198 range_tree_verify(msp
->ms_defertree
[j
], offset
, size
);
2200 spa_config_exit(spa
, SCL_VDEV
, FTAG
);