7072 zfs fails to expand if lun added when os is in shutdown state
[unleashed.git] / usr / src / uts / common / fs / zfs / metaslab.c
blob6d919b3cab31a7a1adb512ae84e6bd8e30205e65
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
39 * Allow allocations to switch to gang blocks quickly. We do this to
40 * avoid having to load lots of space_maps in a given txg. There are,
41 * however, some cases where we want to avoid "fast" ganging and instead
42 * we want to do an exhaustive search of all metaslabs on this device.
43 * Currently we don't allow any gang, slog, or dump device related allocations
44 * to "fast" gang.
46 #define CAN_FASTGANG(flags) \
47 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
48 METASLAB_GANG_AVOID)))
50 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
51 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
52 #define METASLAB_ACTIVE_MASK \
53 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
55 uint64_t metaslab_aliquot = 512ULL << 10;
56 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
59 * The in-core space map representation is more compact than its on-disk form.
60 * The zfs_condense_pct determines how much more compact the in-core
61 * space_map representation must be before we compact it on-disk.
62 * Values should be greater than or equal to 100.
64 int zfs_condense_pct = 200;
67 * Condensing a metaslab is not guaranteed to actually reduce the amount of
68 * space used on disk. In particular, a space map uses data in increments of
69 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
70 * same number of blocks after condensing. Since the goal of condensing is to
71 * reduce the number of IOPs required to read the space map, we only want to
72 * condense when we can be sure we will reduce the number of blocks used by the
73 * space map. Unfortunately, we cannot precisely compute whether or not this is
74 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
75 * we apply the following heuristic: do not condense a spacemap unless the
76 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
77 * blocks.
79 int zfs_metaslab_condense_block_threshold = 4;
82 * The zfs_mg_noalloc_threshold defines which metaslab groups should
83 * be eligible for allocation. The value is defined as a percentage of
84 * free space. Metaslab groups that have more free space than
85 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
86 * a metaslab group's free space is less than or equal to the
87 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
88 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
89 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
90 * groups are allowed to accept allocations. Gang blocks are always
91 * eligible to allocate on any metaslab group. The default value of 0 means
92 * no metaslab group will be excluded based on this criterion.
94 int zfs_mg_noalloc_threshold = 0;
97 * Metaslab groups are considered eligible for allocations if their
98 * fragmenation metric (measured as a percentage) is less than or equal to
99 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
100 * then it will be skipped unless all metaslab groups within the metaslab
101 * class have also crossed this threshold.
103 int zfs_mg_fragmentation_threshold = 85;
106 * Allow metaslabs to keep their active state as long as their fragmentation
107 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
108 * active metaslab that exceeds this threshold will no longer keep its active
109 * status allowing better metaslabs to be selected.
111 int zfs_metaslab_fragmentation_threshold = 70;
114 * When set will load all metaslabs when pool is first opened.
116 int metaslab_debug_load = 0;
119 * When set will prevent metaslabs from being unloaded.
121 int metaslab_debug_unload = 0;
124 * Minimum size which forces the dynamic allocator to change
125 * it's allocation strategy. Once the space map cannot satisfy
126 * an allocation of this size then it switches to using more
127 * aggressive strategy (i.e search by size rather than offset).
129 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
132 * The minimum free space, in percent, which must be available
133 * in a space map to continue allocations in a first-fit fashion.
134 * Once the space_map's free space drops below this level we dynamically
135 * switch to using best-fit allocations.
137 int metaslab_df_free_pct = 4;
140 * A metaslab is considered "free" if it contains a contiguous
141 * segment which is greater than metaslab_min_alloc_size.
143 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
146 * Percentage of all cpus that can be used by the metaslab taskq.
148 int metaslab_load_pct = 50;
151 * Determines how many txgs a metaslab may remain loaded without having any
152 * allocations from it. As long as a metaslab continues to be used we will
153 * keep it loaded.
155 int metaslab_unload_delay = TXG_SIZE * 2;
158 * Max number of metaslabs per group to preload.
160 int metaslab_preload_limit = SPA_DVAS_PER_BP;
163 * Enable/disable preloading of metaslab.
165 boolean_t metaslab_preload_enabled = B_TRUE;
168 * Enable/disable fragmentation weighting on metaslabs.
170 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
173 * Enable/disable lba weighting (i.e. outer tracks are given preference).
175 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
178 * Enable/disable metaslab group biasing.
180 boolean_t metaslab_bias_enabled = B_TRUE;
182 static uint64_t metaslab_fragmentation(metaslab_t *);
185 * ==========================================================================
186 * Metaslab classes
187 * ==========================================================================
189 metaslab_class_t *
190 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
192 metaslab_class_t *mc;
194 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
196 mc->mc_spa = spa;
197 mc->mc_rotor = NULL;
198 mc->mc_ops = ops;
200 return (mc);
203 void
204 metaslab_class_destroy(metaslab_class_t *mc)
206 ASSERT(mc->mc_rotor == NULL);
207 ASSERT(mc->mc_alloc == 0);
208 ASSERT(mc->mc_deferred == 0);
209 ASSERT(mc->mc_space == 0);
210 ASSERT(mc->mc_dspace == 0);
212 kmem_free(mc, sizeof (metaslab_class_t));
216 metaslab_class_validate(metaslab_class_t *mc)
218 metaslab_group_t *mg;
219 vdev_t *vd;
222 * Must hold one of the spa_config locks.
224 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
225 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
227 if ((mg = mc->mc_rotor) == NULL)
228 return (0);
230 do {
231 vd = mg->mg_vd;
232 ASSERT(vd->vdev_mg != NULL);
233 ASSERT3P(vd->vdev_top, ==, vd);
234 ASSERT3P(mg->mg_class, ==, mc);
235 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
236 } while ((mg = mg->mg_next) != mc->mc_rotor);
238 return (0);
241 void
242 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
243 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
245 atomic_add_64(&mc->mc_alloc, alloc_delta);
246 atomic_add_64(&mc->mc_deferred, defer_delta);
247 atomic_add_64(&mc->mc_space, space_delta);
248 atomic_add_64(&mc->mc_dspace, dspace_delta);
251 uint64_t
252 metaslab_class_get_alloc(metaslab_class_t *mc)
254 return (mc->mc_alloc);
257 uint64_t
258 metaslab_class_get_deferred(metaslab_class_t *mc)
260 return (mc->mc_deferred);
263 uint64_t
264 metaslab_class_get_space(metaslab_class_t *mc)
266 return (mc->mc_space);
269 uint64_t
270 metaslab_class_get_dspace(metaslab_class_t *mc)
272 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
275 void
276 metaslab_class_histogram_verify(metaslab_class_t *mc)
278 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
279 uint64_t *mc_hist;
280 int i;
282 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
283 return;
285 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
286 KM_SLEEP);
288 for (int c = 0; c < rvd->vdev_children; c++) {
289 vdev_t *tvd = rvd->vdev_child[c];
290 metaslab_group_t *mg = tvd->vdev_mg;
293 * Skip any holes, uninitialized top-levels, or
294 * vdevs that are not in this metalab class.
296 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
297 mg->mg_class != mc) {
298 continue;
301 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
302 mc_hist[i] += mg->mg_histogram[i];
305 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
306 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
308 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
312 * Calculate the metaslab class's fragmentation metric. The metric
313 * is weighted based on the space contribution of each metaslab group.
314 * The return value will be a number between 0 and 100 (inclusive), or
315 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
316 * zfs_frag_table for more information about the metric.
318 uint64_t
319 metaslab_class_fragmentation(metaslab_class_t *mc)
321 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
322 uint64_t fragmentation = 0;
324 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
326 for (int c = 0; c < rvd->vdev_children; c++) {
327 vdev_t *tvd = rvd->vdev_child[c];
328 metaslab_group_t *mg = tvd->vdev_mg;
331 * Skip any holes, uninitialized top-levels, or
332 * vdevs that are not in this metalab class.
334 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
335 mg->mg_class != mc) {
336 continue;
340 * If a metaslab group does not contain a fragmentation
341 * metric then just bail out.
343 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
344 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
345 return (ZFS_FRAG_INVALID);
349 * Determine how much this metaslab_group is contributing
350 * to the overall pool fragmentation metric.
352 fragmentation += mg->mg_fragmentation *
353 metaslab_group_get_space(mg);
355 fragmentation /= metaslab_class_get_space(mc);
357 ASSERT3U(fragmentation, <=, 100);
358 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
359 return (fragmentation);
363 * Calculate the amount of expandable space that is available in
364 * this metaslab class. If a device is expanded then its expandable
365 * space will be the amount of allocatable space that is currently not
366 * part of this metaslab class.
368 uint64_t
369 metaslab_class_expandable_space(metaslab_class_t *mc)
371 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
372 uint64_t space = 0;
374 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
375 for (int c = 0; c < rvd->vdev_children; c++) {
376 vdev_t *tvd = rvd->vdev_child[c];
377 metaslab_group_t *mg = tvd->vdev_mg;
379 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
380 mg->mg_class != mc) {
381 continue;
385 * Calculate if we have enough space to add additional
386 * metaslabs. We report the expandable space in terms
387 * of the metaslab size since that's the unit of expansion.
389 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
390 1ULL << tvd->vdev_ms_shift);
392 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
393 return (space);
397 * ==========================================================================
398 * Metaslab groups
399 * ==========================================================================
401 static int
402 metaslab_compare(const void *x1, const void *x2)
404 const metaslab_t *m1 = x1;
405 const metaslab_t *m2 = x2;
407 if (m1->ms_weight < m2->ms_weight)
408 return (1);
409 if (m1->ms_weight > m2->ms_weight)
410 return (-1);
413 * If the weights are identical, use the offset to force uniqueness.
415 if (m1->ms_start < m2->ms_start)
416 return (-1);
417 if (m1->ms_start > m2->ms_start)
418 return (1);
420 ASSERT3P(m1, ==, m2);
422 return (0);
426 * Update the allocatable flag and the metaslab group's capacity.
427 * The allocatable flag is set to true if the capacity is below
428 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
429 * from allocatable to non-allocatable or vice versa then the metaslab
430 * group's class is updated to reflect the transition.
432 static void
433 metaslab_group_alloc_update(metaslab_group_t *mg)
435 vdev_t *vd = mg->mg_vd;
436 metaslab_class_t *mc = mg->mg_class;
437 vdev_stat_t *vs = &vd->vdev_stat;
438 boolean_t was_allocatable;
440 ASSERT(vd == vd->vdev_top);
442 mutex_enter(&mg->mg_lock);
443 was_allocatable = mg->mg_allocatable;
445 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
446 (vs->vs_space + 1);
449 * A metaslab group is considered allocatable if it has plenty
450 * of free space or is not heavily fragmented. We only take
451 * fragmentation into account if the metaslab group has a valid
452 * fragmentation metric (i.e. a value between 0 and 100).
454 mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
455 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
456 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
459 * The mc_alloc_groups maintains a count of the number of
460 * groups in this metaslab class that are still above the
461 * zfs_mg_noalloc_threshold. This is used by the allocating
462 * threads to determine if they should avoid allocations to
463 * a given group. The allocator will avoid allocations to a group
464 * if that group has reached or is below the zfs_mg_noalloc_threshold
465 * and there are still other groups that are above the threshold.
466 * When a group transitions from allocatable to non-allocatable or
467 * vice versa we update the metaslab class to reflect that change.
468 * When the mc_alloc_groups value drops to 0 that means that all
469 * groups have reached the zfs_mg_noalloc_threshold making all groups
470 * eligible for allocations. This effectively means that all devices
471 * are balanced again.
473 if (was_allocatable && !mg->mg_allocatable)
474 mc->mc_alloc_groups--;
475 else if (!was_allocatable && mg->mg_allocatable)
476 mc->mc_alloc_groups++;
478 mutex_exit(&mg->mg_lock);
481 metaslab_group_t *
482 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
484 metaslab_group_t *mg;
486 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
487 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
488 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
489 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
490 mg->mg_vd = vd;
491 mg->mg_class = mc;
492 mg->mg_activation_count = 0;
494 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
495 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
497 return (mg);
500 void
501 metaslab_group_destroy(metaslab_group_t *mg)
503 ASSERT(mg->mg_prev == NULL);
504 ASSERT(mg->mg_next == NULL);
506 * We may have gone below zero with the activation count
507 * either because we never activated in the first place or
508 * because we're done, and possibly removing the vdev.
510 ASSERT(mg->mg_activation_count <= 0);
512 taskq_destroy(mg->mg_taskq);
513 avl_destroy(&mg->mg_metaslab_tree);
514 mutex_destroy(&mg->mg_lock);
515 kmem_free(mg, sizeof (metaslab_group_t));
518 void
519 metaslab_group_activate(metaslab_group_t *mg)
521 metaslab_class_t *mc = mg->mg_class;
522 metaslab_group_t *mgprev, *mgnext;
524 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
526 ASSERT(mc->mc_rotor != mg);
527 ASSERT(mg->mg_prev == NULL);
528 ASSERT(mg->mg_next == NULL);
529 ASSERT(mg->mg_activation_count <= 0);
531 if (++mg->mg_activation_count <= 0)
532 return;
534 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
535 metaslab_group_alloc_update(mg);
537 if ((mgprev = mc->mc_rotor) == NULL) {
538 mg->mg_prev = mg;
539 mg->mg_next = mg;
540 } else {
541 mgnext = mgprev->mg_next;
542 mg->mg_prev = mgprev;
543 mg->mg_next = mgnext;
544 mgprev->mg_next = mg;
545 mgnext->mg_prev = mg;
547 mc->mc_rotor = mg;
550 void
551 metaslab_group_passivate(metaslab_group_t *mg)
553 metaslab_class_t *mc = mg->mg_class;
554 metaslab_group_t *mgprev, *mgnext;
556 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
558 if (--mg->mg_activation_count != 0) {
559 ASSERT(mc->mc_rotor != mg);
560 ASSERT(mg->mg_prev == NULL);
561 ASSERT(mg->mg_next == NULL);
562 ASSERT(mg->mg_activation_count < 0);
563 return;
566 taskq_wait(mg->mg_taskq);
567 metaslab_group_alloc_update(mg);
569 mgprev = mg->mg_prev;
570 mgnext = mg->mg_next;
572 if (mg == mgnext) {
573 mc->mc_rotor = NULL;
574 } else {
575 mc->mc_rotor = mgnext;
576 mgprev->mg_next = mgnext;
577 mgnext->mg_prev = mgprev;
580 mg->mg_prev = NULL;
581 mg->mg_next = NULL;
584 uint64_t
585 metaslab_group_get_space(metaslab_group_t *mg)
587 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
590 void
591 metaslab_group_histogram_verify(metaslab_group_t *mg)
593 uint64_t *mg_hist;
594 vdev_t *vd = mg->mg_vd;
595 uint64_t ashift = vd->vdev_ashift;
596 int i;
598 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
599 return;
601 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
602 KM_SLEEP);
604 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
605 SPACE_MAP_HISTOGRAM_SIZE + ashift);
607 for (int m = 0; m < vd->vdev_ms_count; m++) {
608 metaslab_t *msp = vd->vdev_ms[m];
610 if (msp->ms_sm == NULL)
611 continue;
613 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
614 mg_hist[i + ashift] +=
615 msp->ms_sm->sm_phys->smp_histogram[i];
618 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
619 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
621 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
624 static void
625 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
627 metaslab_class_t *mc = mg->mg_class;
628 uint64_t ashift = mg->mg_vd->vdev_ashift;
630 ASSERT(MUTEX_HELD(&msp->ms_lock));
631 if (msp->ms_sm == NULL)
632 return;
634 mutex_enter(&mg->mg_lock);
635 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
636 mg->mg_histogram[i + ashift] +=
637 msp->ms_sm->sm_phys->smp_histogram[i];
638 mc->mc_histogram[i + ashift] +=
639 msp->ms_sm->sm_phys->smp_histogram[i];
641 mutex_exit(&mg->mg_lock);
644 void
645 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
647 metaslab_class_t *mc = mg->mg_class;
648 uint64_t ashift = mg->mg_vd->vdev_ashift;
650 ASSERT(MUTEX_HELD(&msp->ms_lock));
651 if (msp->ms_sm == NULL)
652 return;
654 mutex_enter(&mg->mg_lock);
655 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
656 ASSERT3U(mg->mg_histogram[i + ashift], >=,
657 msp->ms_sm->sm_phys->smp_histogram[i]);
658 ASSERT3U(mc->mc_histogram[i + ashift], >=,
659 msp->ms_sm->sm_phys->smp_histogram[i]);
661 mg->mg_histogram[i + ashift] -=
662 msp->ms_sm->sm_phys->smp_histogram[i];
663 mc->mc_histogram[i + ashift] -=
664 msp->ms_sm->sm_phys->smp_histogram[i];
666 mutex_exit(&mg->mg_lock);
669 static void
670 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
672 ASSERT(msp->ms_group == NULL);
673 mutex_enter(&mg->mg_lock);
674 msp->ms_group = mg;
675 msp->ms_weight = 0;
676 avl_add(&mg->mg_metaslab_tree, msp);
677 mutex_exit(&mg->mg_lock);
679 mutex_enter(&msp->ms_lock);
680 metaslab_group_histogram_add(mg, msp);
681 mutex_exit(&msp->ms_lock);
684 static void
685 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
687 mutex_enter(&msp->ms_lock);
688 metaslab_group_histogram_remove(mg, msp);
689 mutex_exit(&msp->ms_lock);
691 mutex_enter(&mg->mg_lock);
692 ASSERT(msp->ms_group == mg);
693 avl_remove(&mg->mg_metaslab_tree, msp);
694 msp->ms_group = NULL;
695 mutex_exit(&mg->mg_lock);
698 static void
699 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
702 * Although in principle the weight can be any value, in
703 * practice we do not use values in the range [1, 511].
705 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
706 ASSERT(MUTEX_HELD(&msp->ms_lock));
708 mutex_enter(&mg->mg_lock);
709 ASSERT(msp->ms_group == mg);
710 avl_remove(&mg->mg_metaslab_tree, msp);
711 msp->ms_weight = weight;
712 avl_add(&mg->mg_metaslab_tree, msp);
713 mutex_exit(&mg->mg_lock);
717 * Calculate the fragmentation for a given metaslab group. We can use
718 * a simple average here since all metaslabs within the group must have
719 * the same size. The return value will be a value between 0 and 100
720 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
721 * group have a fragmentation metric.
723 uint64_t
724 metaslab_group_fragmentation(metaslab_group_t *mg)
726 vdev_t *vd = mg->mg_vd;
727 uint64_t fragmentation = 0;
728 uint64_t valid_ms = 0;
730 for (int m = 0; m < vd->vdev_ms_count; m++) {
731 metaslab_t *msp = vd->vdev_ms[m];
733 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
734 continue;
736 valid_ms++;
737 fragmentation += msp->ms_fragmentation;
740 if (valid_ms <= vd->vdev_ms_count / 2)
741 return (ZFS_FRAG_INVALID);
743 fragmentation /= valid_ms;
744 ASSERT3U(fragmentation, <=, 100);
745 return (fragmentation);
749 * Determine if a given metaslab group should skip allocations. A metaslab
750 * group should avoid allocations if its free capacity is less than the
751 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
752 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
753 * that can still handle allocations.
755 static boolean_t
756 metaslab_group_allocatable(metaslab_group_t *mg)
758 vdev_t *vd = mg->mg_vd;
759 spa_t *spa = vd->vdev_spa;
760 metaslab_class_t *mc = mg->mg_class;
763 * We use two key metrics to determine if a metaslab group is
764 * considered allocatable -- free space and fragmentation. If
765 * the free space is greater than the free space threshold and
766 * the fragmentation is less than the fragmentation threshold then
767 * consider the group allocatable. There are two case when we will
768 * not consider these key metrics. The first is if the group is
769 * associated with a slog device and the second is if all groups
770 * in this metaslab class have already been consider ineligible
771 * for allocations.
773 return ((mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
774 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
775 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)) ||
776 mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
780 * ==========================================================================
781 * Range tree callbacks
782 * ==========================================================================
786 * Comparison function for the private size-ordered tree. Tree is sorted
787 * by size, larger sizes at the end of the tree.
789 static int
790 metaslab_rangesize_compare(const void *x1, const void *x2)
792 const range_seg_t *r1 = x1;
793 const range_seg_t *r2 = x2;
794 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
795 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
797 if (rs_size1 < rs_size2)
798 return (-1);
799 if (rs_size1 > rs_size2)
800 return (1);
802 if (r1->rs_start < r2->rs_start)
803 return (-1);
805 if (r1->rs_start > r2->rs_start)
806 return (1);
808 return (0);
812 * Create any block allocator specific components. The current allocators
813 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
815 static void
816 metaslab_rt_create(range_tree_t *rt, void *arg)
818 metaslab_t *msp = arg;
820 ASSERT3P(rt->rt_arg, ==, msp);
821 ASSERT(msp->ms_tree == NULL);
823 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
824 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
828 * Destroy the block allocator specific components.
830 static void
831 metaslab_rt_destroy(range_tree_t *rt, void *arg)
833 metaslab_t *msp = arg;
835 ASSERT3P(rt->rt_arg, ==, msp);
836 ASSERT3P(msp->ms_tree, ==, rt);
837 ASSERT0(avl_numnodes(&msp->ms_size_tree));
839 avl_destroy(&msp->ms_size_tree);
842 static void
843 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
845 metaslab_t *msp = arg;
847 ASSERT3P(rt->rt_arg, ==, msp);
848 ASSERT3P(msp->ms_tree, ==, rt);
849 VERIFY(!msp->ms_condensing);
850 avl_add(&msp->ms_size_tree, rs);
853 static void
854 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
856 metaslab_t *msp = arg;
858 ASSERT3P(rt->rt_arg, ==, msp);
859 ASSERT3P(msp->ms_tree, ==, rt);
860 VERIFY(!msp->ms_condensing);
861 avl_remove(&msp->ms_size_tree, rs);
864 static void
865 metaslab_rt_vacate(range_tree_t *rt, void *arg)
867 metaslab_t *msp = arg;
869 ASSERT3P(rt->rt_arg, ==, msp);
870 ASSERT3P(msp->ms_tree, ==, rt);
873 * Normally one would walk the tree freeing nodes along the way.
874 * Since the nodes are shared with the range trees we can avoid
875 * walking all nodes and just reinitialize the avl tree. The nodes
876 * will be freed by the range tree, so we don't want to free them here.
878 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
879 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
882 static range_tree_ops_t metaslab_rt_ops = {
883 metaslab_rt_create,
884 metaslab_rt_destroy,
885 metaslab_rt_add,
886 metaslab_rt_remove,
887 metaslab_rt_vacate
891 * ==========================================================================
892 * Metaslab block operations
893 * ==========================================================================
897 * Return the maximum contiguous segment within the metaslab.
899 uint64_t
900 metaslab_block_maxsize(metaslab_t *msp)
902 avl_tree_t *t = &msp->ms_size_tree;
903 range_seg_t *rs;
905 if (t == NULL || (rs = avl_last(t)) == NULL)
906 return (0ULL);
908 return (rs->rs_end - rs->rs_start);
911 uint64_t
912 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
914 uint64_t start;
915 range_tree_t *rt = msp->ms_tree;
917 VERIFY(!msp->ms_condensing);
919 start = msp->ms_ops->msop_alloc(msp, size);
920 if (start != -1ULL) {
921 vdev_t *vd = msp->ms_group->mg_vd;
923 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
924 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
925 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
926 range_tree_remove(rt, start, size);
928 return (start);
932 * ==========================================================================
933 * Common allocator routines
934 * ==========================================================================
938 * This is a helper function that can be used by the allocator to find
939 * a suitable block to allocate. This will search the specified AVL
940 * tree looking for a block that matches the specified criteria.
942 static uint64_t
943 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
944 uint64_t align)
946 range_seg_t *rs, rsearch;
947 avl_index_t where;
949 rsearch.rs_start = *cursor;
950 rsearch.rs_end = *cursor + size;
952 rs = avl_find(t, &rsearch, &where);
953 if (rs == NULL)
954 rs = avl_nearest(t, where, AVL_AFTER);
956 while (rs != NULL) {
957 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
959 if (offset + size <= rs->rs_end) {
960 *cursor = offset + size;
961 return (offset);
963 rs = AVL_NEXT(t, rs);
967 * If we know we've searched the whole map (*cursor == 0), give up.
968 * Otherwise, reset the cursor to the beginning and try again.
970 if (*cursor == 0)
971 return (-1ULL);
973 *cursor = 0;
974 return (metaslab_block_picker(t, cursor, size, align));
978 * ==========================================================================
979 * The first-fit block allocator
980 * ==========================================================================
982 static uint64_t
983 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
986 * Find the largest power of 2 block size that evenly divides the
987 * requested size. This is used to try to allocate blocks with similar
988 * alignment from the same area of the metaslab (i.e. same cursor
989 * bucket) but it does not guarantee that other allocations sizes
990 * may exist in the same region.
992 uint64_t align = size & -size;
993 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
994 avl_tree_t *t = &msp->ms_tree->rt_root;
996 return (metaslab_block_picker(t, cursor, size, align));
999 static metaslab_ops_t metaslab_ff_ops = {
1000 metaslab_ff_alloc
1004 * ==========================================================================
1005 * Dynamic block allocator -
1006 * Uses the first fit allocation scheme until space get low and then
1007 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1008 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1009 * ==========================================================================
1011 static uint64_t
1012 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1015 * Find the largest power of 2 block size that evenly divides the
1016 * requested size. This is used to try to allocate blocks with similar
1017 * alignment from the same area of the metaslab (i.e. same cursor
1018 * bucket) but it does not guarantee that other allocations sizes
1019 * may exist in the same region.
1021 uint64_t align = size & -size;
1022 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1023 range_tree_t *rt = msp->ms_tree;
1024 avl_tree_t *t = &rt->rt_root;
1025 uint64_t max_size = metaslab_block_maxsize(msp);
1026 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1028 ASSERT(MUTEX_HELD(&msp->ms_lock));
1029 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1031 if (max_size < size)
1032 return (-1ULL);
1035 * If we're running low on space switch to using the size
1036 * sorted AVL tree (best-fit).
1038 if (max_size < metaslab_df_alloc_threshold ||
1039 free_pct < metaslab_df_free_pct) {
1040 t = &msp->ms_size_tree;
1041 *cursor = 0;
1044 return (metaslab_block_picker(t, cursor, size, 1ULL));
1047 static metaslab_ops_t metaslab_df_ops = {
1048 metaslab_df_alloc
1052 * ==========================================================================
1053 * Cursor fit block allocator -
1054 * Select the largest region in the metaslab, set the cursor to the beginning
1055 * of the range and the cursor_end to the end of the range. As allocations
1056 * are made advance the cursor. Continue allocating from the cursor until
1057 * the range is exhausted and then find a new range.
1058 * ==========================================================================
1060 static uint64_t
1061 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1063 range_tree_t *rt = msp->ms_tree;
1064 avl_tree_t *t = &msp->ms_size_tree;
1065 uint64_t *cursor = &msp->ms_lbas[0];
1066 uint64_t *cursor_end = &msp->ms_lbas[1];
1067 uint64_t offset = 0;
1069 ASSERT(MUTEX_HELD(&msp->ms_lock));
1070 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1072 ASSERT3U(*cursor_end, >=, *cursor);
1074 if ((*cursor + size) > *cursor_end) {
1075 range_seg_t *rs;
1077 rs = avl_last(&msp->ms_size_tree);
1078 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1079 return (-1ULL);
1081 *cursor = rs->rs_start;
1082 *cursor_end = rs->rs_end;
1085 offset = *cursor;
1086 *cursor += size;
1088 return (offset);
1091 static metaslab_ops_t metaslab_cf_ops = {
1092 metaslab_cf_alloc
1096 * ==========================================================================
1097 * New dynamic fit allocator -
1098 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1099 * contiguous blocks. If no region is found then just use the largest segment
1100 * that remains.
1101 * ==========================================================================
1105 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1106 * to request from the allocator.
1108 uint64_t metaslab_ndf_clump_shift = 4;
1110 static uint64_t
1111 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1113 avl_tree_t *t = &msp->ms_tree->rt_root;
1114 avl_index_t where;
1115 range_seg_t *rs, rsearch;
1116 uint64_t hbit = highbit64(size);
1117 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1118 uint64_t max_size = metaslab_block_maxsize(msp);
1120 ASSERT(MUTEX_HELD(&msp->ms_lock));
1121 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1123 if (max_size < size)
1124 return (-1ULL);
1126 rsearch.rs_start = *cursor;
1127 rsearch.rs_end = *cursor + size;
1129 rs = avl_find(t, &rsearch, &where);
1130 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1131 t = &msp->ms_size_tree;
1133 rsearch.rs_start = 0;
1134 rsearch.rs_end = MIN(max_size,
1135 1ULL << (hbit + metaslab_ndf_clump_shift));
1136 rs = avl_find(t, &rsearch, &where);
1137 if (rs == NULL)
1138 rs = avl_nearest(t, where, AVL_AFTER);
1139 ASSERT(rs != NULL);
1142 if ((rs->rs_end - rs->rs_start) >= size) {
1143 *cursor = rs->rs_start + size;
1144 return (rs->rs_start);
1146 return (-1ULL);
1149 static metaslab_ops_t metaslab_ndf_ops = {
1150 metaslab_ndf_alloc
1153 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1156 * ==========================================================================
1157 * Metaslabs
1158 * ==========================================================================
1162 * Wait for any in-progress metaslab loads to complete.
1164 void
1165 metaslab_load_wait(metaslab_t *msp)
1167 ASSERT(MUTEX_HELD(&msp->ms_lock));
1169 while (msp->ms_loading) {
1170 ASSERT(!msp->ms_loaded);
1171 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1176 metaslab_load(metaslab_t *msp)
1178 int error = 0;
1180 ASSERT(MUTEX_HELD(&msp->ms_lock));
1181 ASSERT(!msp->ms_loaded);
1182 ASSERT(!msp->ms_loading);
1184 msp->ms_loading = B_TRUE;
1187 * If the space map has not been allocated yet, then treat
1188 * all the space in the metaslab as free and add it to the
1189 * ms_tree.
1191 if (msp->ms_sm != NULL)
1192 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1193 else
1194 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1196 msp->ms_loaded = (error == 0);
1197 msp->ms_loading = B_FALSE;
1199 if (msp->ms_loaded) {
1200 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1201 range_tree_walk(msp->ms_defertree[t],
1202 range_tree_remove, msp->ms_tree);
1205 cv_broadcast(&msp->ms_load_cv);
1206 return (error);
1209 void
1210 metaslab_unload(metaslab_t *msp)
1212 ASSERT(MUTEX_HELD(&msp->ms_lock));
1213 range_tree_vacate(msp->ms_tree, NULL, NULL);
1214 msp->ms_loaded = B_FALSE;
1215 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1219 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1220 metaslab_t **msp)
1222 vdev_t *vd = mg->mg_vd;
1223 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1224 metaslab_t *ms;
1225 int error;
1227 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1228 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1229 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1230 ms->ms_id = id;
1231 ms->ms_start = id << vd->vdev_ms_shift;
1232 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1235 * We only open space map objects that already exist. All others
1236 * will be opened when we finally allocate an object for it.
1238 if (object != 0) {
1239 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1240 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1242 if (error != 0) {
1243 kmem_free(ms, sizeof (metaslab_t));
1244 return (error);
1247 ASSERT(ms->ms_sm != NULL);
1251 * We create the main range tree here, but we don't create the
1252 * alloctree and freetree until metaslab_sync_done(). This serves
1253 * two purposes: it allows metaslab_sync_done() to detect the
1254 * addition of new space; and for debugging, it ensures that we'd
1255 * data fault on any attempt to use this metaslab before it's ready.
1257 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1258 metaslab_group_add(mg, ms);
1260 ms->ms_fragmentation = metaslab_fragmentation(ms);
1261 ms->ms_ops = mg->mg_class->mc_ops;
1264 * If we're opening an existing pool (txg == 0) or creating
1265 * a new one (txg == TXG_INITIAL), all space is available now.
1266 * If we're adding space to an existing pool, the new space
1267 * does not become available until after this txg has synced.
1269 if (txg <= TXG_INITIAL)
1270 metaslab_sync_done(ms, 0);
1273 * If metaslab_debug_load is set and we're initializing a metaslab
1274 * that has an allocated space_map object then load the its space
1275 * map so that can verify frees.
1277 if (metaslab_debug_load && ms->ms_sm != NULL) {
1278 mutex_enter(&ms->ms_lock);
1279 VERIFY0(metaslab_load(ms));
1280 mutex_exit(&ms->ms_lock);
1283 if (txg != 0) {
1284 vdev_dirty(vd, 0, NULL, txg);
1285 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1288 *msp = ms;
1290 return (0);
1293 void
1294 metaslab_fini(metaslab_t *msp)
1296 metaslab_group_t *mg = msp->ms_group;
1298 metaslab_group_remove(mg, msp);
1300 mutex_enter(&msp->ms_lock);
1302 VERIFY(msp->ms_group == NULL);
1303 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1304 0, -msp->ms_size);
1305 space_map_close(msp->ms_sm);
1307 metaslab_unload(msp);
1308 range_tree_destroy(msp->ms_tree);
1310 for (int t = 0; t < TXG_SIZE; t++) {
1311 range_tree_destroy(msp->ms_alloctree[t]);
1312 range_tree_destroy(msp->ms_freetree[t]);
1315 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1316 range_tree_destroy(msp->ms_defertree[t]);
1319 ASSERT0(msp->ms_deferspace);
1321 mutex_exit(&msp->ms_lock);
1322 cv_destroy(&msp->ms_load_cv);
1323 mutex_destroy(&msp->ms_lock);
1325 kmem_free(msp, sizeof (metaslab_t));
1328 #define FRAGMENTATION_TABLE_SIZE 17
1331 * This table defines a segment size based fragmentation metric that will
1332 * allow each metaslab to derive its own fragmentation value. This is done
1333 * by calculating the space in each bucket of the spacemap histogram and
1334 * multiplying that by the fragmetation metric in this table. Doing
1335 * this for all buckets and dividing it by the total amount of free
1336 * space in this metaslab (i.e. the total free space in all buckets) gives
1337 * us the fragmentation metric. This means that a high fragmentation metric
1338 * equates to most of the free space being comprised of small segments.
1339 * Conversely, if the metric is low, then most of the free space is in
1340 * large segments. A 10% change in fragmentation equates to approximately
1341 * double the number of segments.
1343 * This table defines 0% fragmented space using 16MB segments. Testing has
1344 * shown that segments that are greater than or equal to 16MB do not suffer
1345 * from drastic performance problems. Using this value, we derive the rest
1346 * of the table. Since the fragmentation value is never stored on disk, it
1347 * is possible to change these calculations in the future.
1349 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1350 100, /* 512B */
1351 100, /* 1K */
1352 98, /* 2K */
1353 95, /* 4K */
1354 90, /* 8K */
1355 80, /* 16K */
1356 70, /* 32K */
1357 60, /* 64K */
1358 50, /* 128K */
1359 40, /* 256K */
1360 30, /* 512K */
1361 20, /* 1M */
1362 15, /* 2M */
1363 10, /* 4M */
1364 5, /* 8M */
1365 0 /* 16M */
1369 * Calclate the metaslab's fragmentation metric. A return value
1370 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1371 * not support this metric. Otherwise, the return value should be in the
1372 * range [0, 100].
1374 static uint64_t
1375 metaslab_fragmentation(metaslab_t *msp)
1377 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1378 uint64_t fragmentation = 0;
1379 uint64_t total = 0;
1380 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1381 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1383 if (!feature_enabled)
1384 return (ZFS_FRAG_INVALID);
1387 * A null space map means that the entire metaslab is free
1388 * and thus is not fragmented.
1390 if (msp->ms_sm == NULL)
1391 return (0);
1394 * If this metaslab's space_map has not been upgraded, flag it
1395 * so that we upgrade next time we encounter it.
1397 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1398 uint64_t txg = spa_syncing_txg(spa);
1399 vdev_t *vd = msp->ms_group->mg_vd;
1401 if (spa_writeable(spa)) {
1402 msp->ms_condense_wanted = B_TRUE;
1403 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1404 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1405 "msp %p, vd %p", txg, msp, vd);
1407 return (ZFS_FRAG_INVALID);
1410 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1411 uint64_t space = 0;
1412 uint8_t shift = msp->ms_sm->sm_shift;
1413 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1414 FRAGMENTATION_TABLE_SIZE - 1);
1416 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1417 continue;
1419 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1420 total += space;
1422 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1423 fragmentation += space * zfs_frag_table[idx];
1426 if (total > 0)
1427 fragmentation /= total;
1428 ASSERT3U(fragmentation, <=, 100);
1429 return (fragmentation);
1433 * Compute a weight -- a selection preference value -- for the given metaslab.
1434 * This is based on the amount of free space, the level of fragmentation,
1435 * the LBA range, and whether the metaslab is loaded.
1437 static uint64_t
1438 metaslab_weight(metaslab_t *msp)
1440 metaslab_group_t *mg = msp->ms_group;
1441 vdev_t *vd = mg->mg_vd;
1442 uint64_t weight, space;
1444 ASSERT(MUTEX_HELD(&msp->ms_lock));
1447 * This vdev is in the process of being removed so there is nothing
1448 * for us to do here.
1450 if (vd->vdev_removing) {
1451 ASSERT0(space_map_allocated(msp->ms_sm));
1452 ASSERT0(vd->vdev_ms_shift);
1453 return (0);
1457 * The baseline weight is the metaslab's free space.
1459 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1461 msp->ms_fragmentation = metaslab_fragmentation(msp);
1462 if (metaslab_fragmentation_factor_enabled &&
1463 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1465 * Use the fragmentation information to inversely scale
1466 * down the baseline weight. We need to ensure that we
1467 * don't exclude this metaslab completely when it's 100%
1468 * fragmented. To avoid this we reduce the fragmented value
1469 * by 1.
1471 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1474 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1475 * this metaslab again. The fragmentation metric may have
1476 * decreased the space to something smaller than
1477 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1478 * so that we can consume any remaining space.
1480 if (space > 0 && space < SPA_MINBLOCKSIZE)
1481 space = SPA_MINBLOCKSIZE;
1483 weight = space;
1486 * Modern disks have uniform bit density and constant angular velocity.
1487 * Therefore, the outer recording zones are faster (higher bandwidth)
1488 * than the inner zones by the ratio of outer to inner track diameter,
1489 * which is typically around 2:1. We account for this by assigning
1490 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1491 * In effect, this means that we'll select the metaslab with the most
1492 * free bandwidth rather than simply the one with the most free space.
1494 if (metaslab_lba_weighting_enabled) {
1495 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1496 ASSERT(weight >= space && weight <= 2 * space);
1500 * If this metaslab is one we're actively using, adjust its
1501 * weight to make it preferable to any inactive metaslab so
1502 * we'll polish it off. If the fragmentation on this metaslab
1503 * has exceed our threshold, then don't mark it active.
1505 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1506 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1507 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1510 return (weight);
1513 static int
1514 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1516 ASSERT(MUTEX_HELD(&msp->ms_lock));
1518 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1519 metaslab_load_wait(msp);
1520 if (!msp->ms_loaded) {
1521 int error = metaslab_load(msp);
1522 if (error) {
1523 metaslab_group_sort(msp->ms_group, msp, 0);
1524 return (error);
1528 metaslab_group_sort(msp->ms_group, msp,
1529 msp->ms_weight | activation_weight);
1531 ASSERT(msp->ms_loaded);
1532 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1534 return (0);
1537 static void
1538 metaslab_passivate(metaslab_t *msp, uint64_t size)
1541 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1542 * this metaslab again. In that case, it had better be empty,
1543 * or we would be leaving space on the table.
1545 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1546 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1547 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1550 static void
1551 metaslab_preload(void *arg)
1553 metaslab_t *msp = arg;
1554 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1556 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1558 mutex_enter(&msp->ms_lock);
1559 metaslab_load_wait(msp);
1560 if (!msp->ms_loaded)
1561 (void) metaslab_load(msp);
1564 * Set the ms_access_txg value so that we don't unload it right away.
1566 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1567 mutex_exit(&msp->ms_lock);
1570 static void
1571 metaslab_group_preload(metaslab_group_t *mg)
1573 spa_t *spa = mg->mg_vd->vdev_spa;
1574 metaslab_t *msp;
1575 avl_tree_t *t = &mg->mg_metaslab_tree;
1576 int m = 0;
1578 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1579 taskq_wait(mg->mg_taskq);
1580 return;
1583 mutex_enter(&mg->mg_lock);
1585 * Load the next potential metaslabs
1587 msp = avl_first(t);
1588 while (msp != NULL) {
1589 metaslab_t *msp_next = AVL_NEXT(t, msp);
1592 * We preload only the maximum number of metaslabs specified
1593 * by metaslab_preload_limit. If a metaslab is being forced
1594 * to condense then we preload it too. This will ensure
1595 * that force condensing happens in the next txg.
1597 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
1598 msp = msp_next;
1599 continue;
1603 * We must drop the metaslab group lock here to preserve
1604 * lock ordering with the ms_lock (when grabbing both
1605 * the mg_lock and the ms_lock, the ms_lock must be taken
1606 * first). As a result, it is possible that the ordering
1607 * of the metaslabs within the avl tree may change before
1608 * we reacquire the lock. The metaslab cannot be removed from
1609 * the tree while we're in syncing context so it is safe to
1610 * drop the mg_lock here. If the metaslabs are reordered
1611 * nothing will break -- we just may end up loading a
1612 * less than optimal one.
1614 mutex_exit(&mg->mg_lock);
1615 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1616 msp, TQ_SLEEP) != NULL);
1617 mutex_enter(&mg->mg_lock);
1618 msp = msp_next;
1620 mutex_exit(&mg->mg_lock);
1624 * Determine if the space map's on-disk footprint is past our tolerance
1625 * for inefficiency. We would like to use the following criteria to make
1626 * our decision:
1628 * 1. The size of the space map object should not dramatically increase as a
1629 * result of writing out the free space range tree.
1631 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1632 * times the size than the free space range tree representation
1633 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1635 * 3. The on-disk size of the space map should actually decrease.
1637 * Checking the first condition is tricky since we don't want to walk
1638 * the entire AVL tree calculating the estimated on-disk size. Instead we
1639 * use the size-ordered range tree in the metaslab and calculate the
1640 * size required to write out the largest segment in our free tree. If the
1641 * size required to represent that segment on disk is larger than the space
1642 * map object then we avoid condensing this map.
1644 * To determine the second criterion we use a best-case estimate and assume
1645 * each segment can be represented on-disk as a single 64-bit entry. We refer
1646 * to this best-case estimate as the space map's minimal form.
1648 * Unfortunately, we cannot compute the on-disk size of the space map in this
1649 * context because we cannot accurately compute the effects of compression, etc.
1650 * Instead, we apply the heuristic described in the block comment for
1651 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1652 * is greater than a threshold number of blocks.
1654 static boolean_t
1655 metaslab_should_condense(metaslab_t *msp)
1657 space_map_t *sm = msp->ms_sm;
1658 range_seg_t *rs;
1659 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
1660 dmu_object_info_t doi;
1661 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
1663 ASSERT(MUTEX_HELD(&msp->ms_lock));
1664 ASSERT(msp->ms_loaded);
1667 * Use the ms_size_tree range tree, which is ordered by size, to
1668 * obtain the largest segment in the free tree. We always condense
1669 * metaslabs that are empty and metaslabs for which a condense
1670 * request has been made.
1672 rs = avl_last(&msp->ms_size_tree);
1673 if (rs == NULL || msp->ms_condense_wanted)
1674 return (B_TRUE);
1677 * Calculate the number of 64-bit entries this segment would
1678 * require when written to disk. If this single segment would be
1679 * larger on-disk than the entire current on-disk structure, then
1680 * clearly condensing will increase the on-disk structure size.
1682 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1683 entries = size / (MIN(size, SM_RUN_MAX));
1684 segsz = entries * sizeof (uint64_t);
1686 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
1687 object_size = space_map_length(msp->ms_sm);
1689 dmu_object_info_from_db(sm->sm_dbuf, &doi);
1690 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
1692 return (segsz <= object_size &&
1693 object_size >= (optimal_size * zfs_condense_pct / 100) &&
1694 object_size > zfs_metaslab_condense_block_threshold * record_size);
1698 * Condense the on-disk space map representation to its minimized form.
1699 * The minimized form consists of a small number of allocations followed by
1700 * the entries of the free range tree.
1702 static void
1703 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1705 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1706 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1707 range_tree_t *condense_tree;
1708 space_map_t *sm = msp->ms_sm;
1710 ASSERT(MUTEX_HELD(&msp->ms_lock));
1711 ASSERT3U(spa_sync_pass(spa), ==, 1);
1712 ASSERT(msp->ms_loaded);
1715 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
1716 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
1717 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
1718 msp->ms_group->mg_vd->vdev_spa->spa_name,
1719 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
1720 msp->ms_condense_wanted ? "TRUE" : "FALSE");
1722 msp->ms_condense_wanted = B_FALSE;
1725 * Create an range tree that is 100% allocated. We remove segments
1726 * that have been freed in this txg, any deferred frees that exist,
1727 * and any allocation in the future. Removing segments should be
1728 * a relatively inexpensive operation since we expect these trees to
1729 * have a small number of nodes.
1731 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1732 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1735 * Remove what's been freed in this txg from the condense_tree.
1736 * Since we're in sync_pass 1, we know that all the frees from
1737 * this txg are in the freetree.
1739 range_tree_walk(freetree, range_tree_remove, condense_tree);
1741 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1742 range_tree_walk(msp->ms_defertree[t],
1743 range_tree_remove, condense_tree);
1746 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1747 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1748 range_tree_remove, condense_tree);
1752 * We're about to drop the metaslab's lock thus allowing
1753 * other consumers to change it's content. Set the
1754 * metaslab's ms_condensing flag to ensure that
1755 * allocations on this metaslab do not occur while we're
1756 * in the middle of committing it to disk. This is only critical
1757 * for the ms_tree as all other range trees use per txg
1758 * views of their content.
1760 msp->ms_condensing = B_TRUE;
1762 mutex_exit(&msp->ms_lock);
1763 space_map_truncate(sm, tx);
1764 mutex_enter(&msp->ms_lock);
1767 * While we would ideally like to create a space_map representation
1768 * that consists only of allocation records, doing so can be
1769 * prohibitively expensive because the in-core free tree can be
1770 * large, and therefore computationally expensive to subtract
1771 * from the condense_tree. Instead we sync out two trees, a cheap
1772 * allocation only tree followed by the in-core free tree. While not
1773 * optimal, this is typically close to optimal, and much cheaper to
1774 * compute.
1776 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1777 range_tree_vacate(condense_tree, NULL, NULL);
1778 range_tree_destroy(condense_tree);
1780 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1781 msp->ms_condensing = B_FALSE;
1785 * Write a metaslab to disk in the context of the specified transaction group.
1787 void
1788 metaslab_sync(metaslab_t *msp, uint64_t txg)
1790 metaslab_group_t *mg = msp->ms_group;
1791 vdev_t *vd = mg->mg_vd;
1792 spa_t *spa = vd->vdev_spa;
1793 objset_t *mos = spa_meta_objset(spa);
1794 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1795 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1796 range_tree_t **freed_tree =
1797 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1798 dmu_tx_t *tx;
1799 uint64_t object = space_map_object(msp->ms_sm);
1801 ASSERT(!vd->vdev_ishole);
1804 * This metaslab has just been added so there's no work to do now.
1806 if (*freetree == NULL) {
1807 ASSERT3P(alloctree, ==, NULL);
1808 return;
1811 ASSERT3P(alloctree, !=, NULL);
1812 ASSERT3P(*freetree, !=, NULL);
1813 ASSERT3P(*freed_tree, !=, NULL);
1816 * Normally, we don't want to process a metaslab if there
1817 * are no allocations or frees to perform. However, if the metaslab
1818 * is being forced to condense we need to let it through.
1820 if (range_tree_space(alloctree) == 0 &&
1821 range_tree_space(*freetree) == 0 &&
1822 !msp->ms_condense_wanted)
1823 return;
1826 * The only state that can actually be changing concurrently with
1827 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1828 * be modifying this txg's alloctree, freetree, freed_tree, or
1829 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1830 * space_map ASSERTs. We drop it whenever we call into the DMU,
1831 * because the DMU can call down to us (e.g. via zio_free()) at
1832 * any time.
1835 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1837 if (msp->ms_sm == NULL) {
1838 uint64_t new_object;
1840 new_object = space_map_alloc(mos, tx);
1841 VERIFY3U(new_object, !=, 0);
1843 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1844 msp->ms_start, msp->ms_size, vd->vdev_ashift,
1845 &msp->ms_lock));
1846 ASSERT(msp->ms_sm != NULL);
1849 mutex_enter(&msp->ms_lock);
1852 * Note: metaslab_condense() clears the space_map's histogram.
1853 * Therefore we must verify and remove this histogram before
1854 * condensing.
1856 metaslab_group_histogram_verify(mg);
1857 metaslab_class_histogram_verify(mg->mg_class);
1858 metaslab_group_histogram_remove(mg, msp);
1860 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1861 metaslab_should_condense(msp)) {
1862 metaslab_condense(msp, txg, tx);
1863 } else {
1864 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1865 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1868 if (msp->ms_loaded) {
1870 * When the space map is loaded, we have an accruate
1871 * histogram in the range tree. This gives us an opportunity
1872 * to bring the space map's histogram up-to-date so we clear
1873 * it first before updating it.
1875 space_map_histogram_clear(msp->ms_sm);
1876 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1877 } else {
1879 * Since the space map is not loaded we simply update the
1880 * exisiting histogram with what was freed in this txg. This
1881 * means that the on-disk histogram may not have an accurate
1882 * view of the free space but it's close enough to allow
1883 * us to make allocation decisions.
1885 space_map_histogram_add(msp->ms_sm, *freetree, tx);
1887 metaslab_group_histogram_add(mg, msp);
1888 metaslab_group_histogram_verify(mg);
1889 metaslab_class_histogram_verify(mg->mg_class);
1892 * For sync pass 1, we avoid traversing this txg's free range tree
1893 * and instead will just swap the pointers for freetree and
1894 * freed_tree. We can safely do this since the freed_tree is
1895 * guaranteed to be empty on the initial pass.
1897 if (spa_sync_pass(spa) == 1) {
1898 range_tree_swap(freetree, freed_tree);
1899 } else {
1900 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1902 range_tree_vacate(alloctree, NULL, NULL);
1904 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1905 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1907 mutex_exit(&msp->ms_lock);
1909 if (object != space_map_object(msp->ms_sm)) {
1910 object = space_map_object(msp->ms_sm);
1911 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1912 msp->ms_id, sizeof (uint64_t), &object, tx);
1914 dmu_tx_commit(tx);
1918 * Called after a transaction group has completely synced to mark
1919 * all of the metaslab's free space as usable.
1921 void
1922 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1924 metaslab_group_t *mg = msp->ms_group;
1925 vdev_t *vd = mg->mg_vd;
1926 range_tree_t **freed_tree;
1927 range_tree_t **defer_tree;
1928 int64_t alloc_delta, defer_delta;
1930 ASSERT(!vd->vdev_ishole);
1932 mutex_enter(&msp->ms_lock);
1935 * If this metaslab is just becoming available, initialize its
1936 * alloctrees, freetrees, and defertree and add its capacity to
1937 * the vdev.
1939 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1940 for (int t = 0; t < TXG_SIZE; t++) {
1941 ASSERT(msp->ms_alloctree[t] == NULL);
1942 ASSERT(msp->ms_freetree[t] == NULL);
1944 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1945 &msp->ms_lock);
1946 msp->ms_freetree[t] = range_tree_create(NULL, msp,
1947 &msp->ms_lock);
1950 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1951 ASSERT(msp->ms_defertree[t] == NULL);
1953 msp->ms_defertree[t] = range_tree_create(NULL, msp,
1954 &msp->ms_lock);
1957 vdev_space_update(vd, 0, 0, msp->ms_size);
1960 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1961 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1963 alloc_delta = space_map_alloc_delta(msp->ms_sm);
1964 defer_delta = range_tree_space(*freed_tree) -
1965 range_tree_space(*defer_tree);
1967 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1969 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1970 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1973 * If there's a metaslab_load() in progress, wait for it to complete
1974 * so that we have a consistent view of the in-core space map.
1976 metaslab_load_wait(msp);
1979 * Move the frees from the defer_tree back to the free
1980 * range tree (if it's loaded). Swap the freed_tree and the
1981 * defer_tree -- this is safe to do because we've just emptied out
1982 * the defer_tree.
1984 range_tree_vacate(*defer_tree,
1985 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
1986 range_tree_swap(freed_tree, defer_tree);
1988 space_map_update(msp->ms_sm);
1990 msp->ms_deferspace += defer_delta;
1991 ASSERT3S(msp->ms_deferspace, >=, 0);
1992 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
1993 if (msp->ms_deferspace != 0) {
1995 * Keep syncing this metaslab until all deferred frees
1996 * are back in circulation.
1998 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2001 if (msp->ms_loaded && msp->ms_access_txg < txg) {
2002 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2003 VERIFY0(range_tree_space(
2004 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2007 if (!metaslab_debug_unload)
2008 metaslab_unload(msp);
2011 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2012 mutex_exit(&msp->ms_lock);
2015 void
2016 metaslab_sync_reassess(metaslab_group_t *mg)
2018 metaslab_group_alloc_update(mg);
2019 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2022 * Preload the next potential metaslabs
2024 metaslab_group_preload(mg);
2027 static uint64_t
2028 metaslab_distance(metaslab_t *msp, dva_t *dva)
2030 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2031 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2032 uint64_t start = msp->ms_id;
2034 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2035 return (1ULL << 63);
2037 if (offset < start)
2038 return ((start - offset) << ms_shift);
2039 if (offset > start)
2040 return ((offset - start) << ms_shift);
2041 return (0);
2044 static uint64_t
2045 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
2046 uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2048 spa_t *spa = mg->mg_vd->vdev_spa;
2049 metaslab_t *msp = NULL;
2050 uint64_t offset = -1ULL;
2051 avl_tree_t *t = &mg->mg_metaslab_tree;
2052 uint64_t activation_weight;
2053 uint64_t target_distance;
2054 int i;
2056 activation_weight = METASLAB_WEIGHT_PRIMARY;
2057 for (i = 0; i < d; i++) {
2058 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2059 activation_weight = METASLAB_WEIGHT_SECONDARY;
2060 break;
2064 for (;;) {
2065 boolean_t was_active;
2067 mutex_enter(&mg->mg_lock);
2068 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
2069 if (msp->ms_weight < asize) {
2070 spa_dbgmsg(spa, "%s: failed to meet weight "
2071 "requirement: vdev %llu, txg %llu, mg %p, "
2072 "msp %p, psize %llu, asize %llu, "
2073 "weight %llu", spa_name(spa),
2074 mg->mg_vd->vdev_id, txg,
2075 mg, msp, psize, asize, msp->ms_weight);
2076 mutex_exit(&mg->mg_lock);
2077 return (-1ULL);
2081 * If the selected metaslab is condensing, skip it.
2083 if (msp->ms_condensing)
2084 continue;
2086 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2087 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2088 break;
2090 target_distance = min_distance +
2091 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2092 min_distance >> 1);
2094 for (i = 0; i < d; i++)
2095 if (metaslab_distance(msp, &dva[i]) <
2096 target_distance)
2097 break;
2098 if (i == d)
2099 break;
2101 mutex_exit(&mg->mg_lock);
2102 if (msp == NULL)
2103 return (-1ULL);
2105 mutex_enter(&msp->ms_lock);
2108 * Ensure that the metaslab we have selected is still
2109 * capable of handling our request. It's possible that
2110 * another thread may have changed the weight while we
2111 * were blocked on the metaslab lock.
2113 if (msp->ms_weight < asize || (was_active &&
2114 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
2115 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
2116 mutex_exit(&msp->ms_lock);
2117 continue;
2120 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2121 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2122 metaslab_passivate(msp,
2123 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2124 mutex_exit(&msp->ms_lock);
2125 continue;
2128 if (metaslab_activate(msp, activation_weight) != 0) {
2129 mutex_exit(&msp->ms_lock);
2130 continue;
2134 * If this metaslab is currently condensing then pick again as
2135 * we can't manipulate this metaslab until it's committed
2136 * to disk.
2138 if (msp->ms_condensing) {
2139 mutex_exit(&msp->ms_lock);
2140 continue;
2143 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
2144 break;
2146 metaslab_passivate(msp, metaslab_block_maxsize(msp));
2147 mutex_exit(&msp->ms_lock);
2150 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2151 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2153 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
2154 msp->ms_access_txg = txg + metaslab_unload_delay;
2156 mutex_exit(&msp->ms_lock);
2158 return (offset);
2162 * Allocate a block for the specified i/o.
2164 static int
2165 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2166 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
2168 metaslab_group_t *mg, *rotor;
2169 vdev_t *vd;
2170 int dshift = 3;
2171 int all_zero;
2172 int zio_lock = B_FALSE;
2173 boolean_t allocatable;
2174 uint64_t offset = -1ULL;
2175 uint64_t asize;
2176 uint64_t distance;
2178 ASSERT(!DVA_IS_VALID(&dva[d]));
2181 * For testing, make some blocks above a certain size be gang blocks.
2183 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
2184 return (SET_ERROR(ENOSPC));
2187 * Start at the rotor and loop through all mgs until we find something.
2188 * Note that there's no locking on mc_rotor or mc_aliquot because
2189 * nothing actually breaks if we miss a few updates -- we just won't
2190 * allocate quite as evenly. It all balances out over time.
2192 * If we are doing ditto or log blocks, try to spread them across
2193 * consecutive vdevs. If we're forced to reuse a vdev before we've
2194 * allocated all of our ditto blocks, then try and spread them out on
2195 * that vdev as much as possible. If it turns out to not be possible,
2196 * gradually lower our standards until anything becomes acceptable.
2197 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2198 * gives us hope of containing our fault domains to something we're
2199 * able to reason about. Otherwise, any two top-level vdev failures
2200 * will guarantee the loss of data. With consecutive allocation,
2201 * only two adjacent top-level vdev failures will result in data loss.
2203 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2204 * ourselves on the same vdev as our gang block header. That
2205 * way, we can hope for locality in vdev_cache, plus it makes our
2206 * fault domains something tractable.
2208 if (hintdva) {
2209 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
2212 * It's possible the vdev we're using as the hint no
2213 * longer exists (i.e. removed). Consult the rotor when
2214 * all else fails.
2216 if (vd != NULL) {
2217 mg = vd->vdev_mg;
2219 if (flags & METASLAB_HINTBP_AVOID &&
2220 mg->mg_next != NULL)
2221 mg = mg->mg_next;
2222 } else {
2223 mg = mc->mc_rotor;
2225 } else if (d != 0) {
2226 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
2227 mg = vd->vdev_mg->mg_next;
2228 } else {
2229 mg = mc->mc_rotor;
2233 * If the hint put us into the wrong metaslab class, or into a
2234 * metaslab group that has been passivated, just follow the rotor.
2236 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
2237 mg = mc->mc_rotor;
2239 rotor = mg;
2240 top:
2241 all_zero = B_TRUE;
2242 do {
2243 ASSERT(mg->mg_activation_count == 1);
2245 vd = mg->mg_vd;
2248 * Don't allocate from faulted devices.
2250 if (zio_lock) {
2251 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
2252 allocatable = vdev_allocatable(vd);
2253 spa_config_exit(spa, SCL_ZIO, FTAG);
2254 } else {
2255 allocatable = vdev_allocatable(vd);
2259 * Determine if the selected metaslab group is eligible
2260 * for allocations. If we're ganging or have requested
2261 * an allocation for the smallest gang block size
2262 * then we don't want to avoid allocating to the this
2263 * metaslab group. If we're in this condition we should
2264 * try to allocate from any device possible so that we
2265 * don't inadvertently return ENOSPC and suspend the pool
2266 * even though space is still available.
2268 if (allocatable && CAN_FASTGANG(flags) &&
2269 psize > SPA_GANGBLOCKSIZE)
2270 allocatable = metaslab_group_allocatable(mg);
2272 if (!allocatable)
2273 goto next;
2276 * Avoid writing single-copy data to a failing vdev
2277 * unless the user instructs us that it is okay.
2279 if ((vd->vdev_stat.vs_write_errors > 0 ||
2280 vd->vdev_state < VDEV_STATE_HEALTHY) &&
2281 d == 0 && dshift == 3 && vd->vdev_children == 0) {
2282 all_zero = B_FALSE;
2283 goto next;
2286 ASSERT(mg->mg_class == mc);
2288 distance = vd->vdev_asize >> dshift;
2289 if (distance <= (1ULL << vd->vdev_ms_shift))
2290 distance = 0;
2291 else
2292 all_zero = B_FALSE;
2294 asize = vdev_psize_to_asize(vd, psize);
2295 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
2297 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
2298 dva, d);
2299 if (offset != -1ULL) {
2301 * If we've just selected this metaslab group,
2302 * figure out whether the corresponding vdev is
2303 * over- or under-used relative to the pool,
2304 * and set an allocation bias to even it out.
2306 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
2307 vdev_stat_t *vs = &vd->vdev_stat;
2308 int64_t vu, cu;
2310 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
2311 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
2314 * Calculate how much more or less we should
2315 * try to allocate from this device during
2316 * this iteration around the rotor.
2317 * For example, if a device is 80% full
2318 * and the pool is 20% full then we should
2319 * reduce allocations by 60% on this device.
2321 * mg_bias = (20 - 80) * 512K / 100 = -307K
2323 * This reduces allocations by 307K for this
2324 * iteration.
2326 mg->mg_bias = ((cu - vu) *
2327 (int64_t)mg->mg_aliquot) / 100;
2328 } else if (!metaslab_bias_enabled) {
2329 mg->mg_bias = 0;
2332 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2333 mg->mg_aliquot + mg->mg_bias) {
2334 mc->mc_rotor = mg->mg_next;
2335 mc->mc_aliquot = 0;
2338 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2339 DVA_SET_OFFSET(&dva[d], offset);
2340 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2341 DVA_SET_ASIZE(&dva[d], asize);
2343 return (0);
2345 next:
2346 mc->mc_rotor = mg->mg_next;
2347 mc->mc_aliquot = 0;
2348 } while ((mg = mg->mg_next) != rotor);
2350 if (!all_zero) {
2351 dshift++;
2352 ASSERT(dshift < 64);
2353 goto top;
2356 if (!allocatable && !zio_lock) {
2357 dshift = 3;
2358 zio_lock = B_TRUE;
2359 goto top;
2362 bzero(&dva[d], sizeof (dva_t));
2364 return (SET_ERROR(ENOSPC));
2368 * Free the block represented by DVA in the context of the specified
2369 * transaction group.
2371 static void
2372 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2374 uint64_t vdev = DVA_GET_VDEV(dva);
2375 uint64_t offset = DVA_GET_OFFSET(dva);
2376 uint64_t size = DVA_GET_ASIZE(dva);
2377 vdev_t *vd;
2378 metaslab_t *msp;
2380 ASSERT(DVA_IS_VALID(dva));
2382 if (txg > spa_freeze_txg(spa))
2383 return;
2385 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2386 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2387 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2388 (u_longlong_t)vdev, (u_longlong_t)offset);
2389 ASSERT(0);
2390 return;
2393 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2395 if (DVA_GET_GANG(dva))
2396 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2398 mutex_enter(&msp->ms_lock);
2400 if (now) {
2401 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2402 offset, size);
2404 VERIFY(!msp->ms_condensing);
2405 VERIFY3U(offset, >=, msp->ms_start);
2406 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2407 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2408 msp->ms_size);
2409 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2410 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2411 range_tree_add(msp->ms_tree, offset, size);
2412 } else {
2413 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2414 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2415 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2416 offset, size);
2419 mutex_exit(&msp->ms_lock);
2423 * Intent log support: upon opening the pool after a crash, notify the SPA
2424 * of blocks that the intent log has allocated for immediate write, but
2425 * which are still considered free by the SPA because the last transaction
2426 * group didn't commit yet.
2428 static int
2429 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2431 uint64_t vdev = DVA_GET_VDEV(dva);
2432 uint64_t offset = DVA_GET_OFFSET(dva);
2433 uint64_t size = DVA_GET_ASIZE(dva);
2434 vdev_t *vd;
2435 metaslab_t *msp;
2436 int error = 0;
2438 ASSERT(DVA_IS_VALID(dva));
2440 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2441 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2442 return (SET_ERROR(ENXIO));
2444 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2446 if (DVA_GET_GANG(dva))
2447 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2449 mutex_enter(&msp->ms_lock);
2451 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2452 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2454 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2455 error = SET_ERROR(ENOENT);
2457 if (error || txg == 0) { /* txg == 0 indicates dry run */
2458 mutex_exit(&msp->ms_lock);
2459 return (error);
2462 VERIFY(!msp->ms_condensing);
2463 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2464 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2465 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2466 range_tree_remove(msp->ms_tree, offset, size);
2468 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2469 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2470 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2471 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2474 mutex_exit(&msp->ms_lock);
2476 return (0);
2480 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2481 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2483 dva_t *dva = bp->blk_dva;
2484 dva_t *hintdva = hintbp->blk_dva;
2485 int error = 0;
2487 ASSERT(bp->blk_birth == 0);
2488 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2490 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2492 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2493 spa_config_exit(spa, SCL_ALLOC, FTAG);
2494 return (SET_ERROR(ENOSPC));
2497 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2498 ASSERT(BP_GET_NDVAS(bp) == 0);
2499 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2501 for (int d = 0; d < ndvas; d++) {
2502 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2503 txg, flags);
2504 if (error != 0) {
2505 for (d--; d >= 0; d--) {
2506 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2507 bzero(&dva[d], sizeof (dva_t));
2509 spa_config_exit(spa, SCL_ALLOC, FTAG);
2510 return (error);
2513 ASSERT(error == 0);
2514 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2516 spa_config_exit(spa, SCL_ALLOC, FTAG);
2518 BP_SET_BIRTH(bp, txg, txg);
2520 return (0);
2523 void
2524 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2526 const dva_t *dva = bp->blk_dva;
2527 int ndvas = BP_GET_NDVAS(bp);
2529 ASSERT(!BP_IS_HOLE(bp));
2530 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2532 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2534 for (int d = 0; d < ndvas; d++)
2535 metaslab_free_dva(spa, &dva[d], txg, now);
2537 spa_config_exit(spa, SCL_FREE, FTAG);
2541 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2543 const dva_t *dva = bp->blk_dva;
2544 int ndvas = BP_GET_NDVAS(bp);
2545 int error = 0;
2547 ASSERT(!BP_IS_HOLE(bp));
2549 if (txg != 0) {
2551 * First do a dry run to make sure all DVAs are claimable,
2552 * so we don't have to unwind from partial failures below.
2554 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2555 return (error);
2558 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2560 for (int d = 0; d < ndvas; d++)
2561 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2562 break;
2564 spa_config_exit(spa, SCL_ALLOC, FTAG);
2566 ASSERT(error == 0 || txg == 0);
2568 return (error);
2571 void
2572 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2574 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2575 return;
2577 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2578 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2579 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2580 vdev_t *vd = vdev_lookup_top(spa, vdev);
2581 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2582 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2583 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2585 if (msp->ms_loaded)
2586 range_tree_verify(msp->ms_tree, offset, size);
2588 for (int j = 0; j < TXG_SIZE; j++)
2589 range_tree_verify(msp->ms_freetree[j], offset, size);
2590 for (int j = 0; j < TXG_DEFER_SIZE; j++)
2591 range_tree_verify(msp->ms_defertree[j], offset, size);
2593 spa_config_exit(spa, SCL_VDEV, FTAG);