4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
41 * Note on space map block size:
43 * The data for a given space map can be kept on blocks of any size.
44 * Larger blocks entail fewer I/O operations, but they also cause the
45 * DMU to keep more data in-core, and also to waste more I/O bandwidth
46 * when only a few blocks have changed since the last transaction group.
50 * Enabled whenever we want to stress test the use of double-word
53 boolean_t zfs_force_some_double_word_sm_entries
= B_FALSE
;
56 * Override the default indirect block size of 128K, instead using 16K for
57 * spacemaps (2^14 bytes). This dramatically reduces write inflation since
58 * appending to a spacemap typically has to write one data block (4KB) and one
59 * or two indirect blocks (16K-32K, rather than 128K).
61 int space_map_ibs
= 14;
64 sm_entry_is_debug(uint64_t e
)
66 return (SM_PREFIX_DECODE(e
) == SM_DEBUG_PREFIX
);
70 sm_entry_is_single_word(uint64_t e
)
72 uint8_t prefix
= SM_PREFIX_DECODE(e
);
73 return (prefix
!= SM_DEBUG_PREFIX
&& prefix
!= SM2_PREFIX
);
77 sm_entry_is_double_word(uint64_t e
)
79 return (SM_PREFIX_DECODE(e
) == SM2_PREFIX
);
83 * Iterate through the space map, invoking the callback on each (non-debug)
87 space_map_iterate(space_map_t
*sm
, sm_cb_t callback
, void *arg
)
89 uint64_t sm_len
= space_map_length(sm
);
90 ASSERT3U(sm
->sm_blksz
, !=, 0);
92 dmu_prefetch(sm
->sm_os
, space_map_object(sm
), 0, 0, sm_len
,
93 ZIO_PRIORITY_SYNC_READ
);
95 uint64_t blksz
= sm
->sm_blksz
;
97 for (uint64_t block_base
= 0; block_base
< sm_len
&& error
== 0;
98 block_base
+= blksz
) {
100 error
= dmu_buf_hold(sm
->sm_os
, space_map_object(sm
),
101 block_base
, FTAG
, &db
, DMU_READ_PREFETCH
);
105 uint64_t *block_start
= db
->db_data
;
106 uint64_t block_length
= MIN(sm_len
- block_base
, blksz
);
107 uint64_t *block_end
= block_start
+
108 (block_length
/ sizeof (uint64_t));
110 VERIFY0(P2PHASE(block_length
, sizeof (uint64_t)));
111 VERIFY3U(block_length
, !=, 0);
112 ASSERT3U(blksz
, ==, db
->db_size
);
114 for (uint64_t *block_cursor
= block_start
;
115 block_cursor
< block_end
&& error
== 0; block_cursor
++) {
116 uint64_t e
= *block_cursor
;
118 if (sm_entry_is_debug(e
)) /* Skip debug entries */
121 uint64_t raw_offset
, raw_run
, vdev_id
;
123 if (sm_entry_is_single_word(e
)) {
124 type
= SM_TYPE_DECODE(e
);
125 vdev_id
= SM_NO_VDEVID
;
126 raw_offset
= SM_OFFSET_DECODE(e
);
127 raw_run
= SM_RUN_DECODE(e
);
129 /* it is a two-word entry */
130 ASSERT(sm_entry_is_double_word(e
));
131 raw_run
= SM2_RUN_DECODE(e
);
132 vdev_id
= SM2_VDEV_DECODE(e
);
134 /* move on to the second word */
137 VERIFY3P(block_cursor
, <=, block_end
);
139 type
= SM2_TYPE_DECODE(e
);
140 raw_offset
= SM2_OFFSET_DECODE(e
);
143 uint64_t entry_offset
= (raw_offset
<< sm
->sm_shift
) +
145 uint64_t entry_run
= raw_run
<< sm
->sm_shift
;
147 VERIFY0(P2PHASE(entry_offset
, 1ULL << sm
->sm_shift
));
148 VERIFY0(P2PHASE(entry_run
, 1ULL << sm
->sm_shift
));
149 ASSERT3U(entry_offset
, >=, sm
->sm_start
);
150 ASSERT3U(entry_offset
, <, sm
->sm_start
+ sm
->sm_size
);
151 ASSERT3U(entry_run
, <=, sm
->sm_size
);
152 ASSERT3U(entry_offset
+ entry_run
, <=,
153 sm
->sm_start
+ sm
->sm_size
);
155 space_map_entry_t sme
= {
158 .sme_offset
= entry_offset
,
161 error
= callback(&sme
, arg
);
163 dmu_buf_rele(db
, FTAG
);
169 * Reads the entries from the last block of the space map into
170 * buf in reverse order. Populates nwords with number of words
173 * Refer to block comment within space_map_incremental_destroy()
174 * to understand why this function is needed.
177 space_map_reversed_last_block_entries(space_map_t
*sm
, uint64_t *buf
,
178 uint64_t bufsz
, uint64_t *nwords
)
184 * Find the offset of the last word in the space map and use
185 * that to read the last block of the space map with
188 uint64_t last_word_offset
=
189 sm
->sm_phys
->smp_objsize
- sizeof (uint64_t);
190 error
= dmu_buf_hold(sm
->sm_os
, space_map_object(sm
), last_word_offset
,
191 FTAG
, &db
, DMU_READ_NO_PREFETCH
);
195 ASSERT3U(sm
->sm_object
, ==, db
->db_object
);
196 ASSERT3U(sm
->sm_blksz
, ==, db
->db_size
);
197 ASSERT3U(bufsz
, >=, db
->db_size
);
198 ASSERT(nwords
!= NULL
);
200 uint64_t *words
= db
->db_data
;
202 (sm
->sm_phys
->smp_objsize
- db
->db_offset
) / sizeof (uint64_t);
204 ASSERT3U(*nwords
, <=, bufsz
/ sizeof (uint64_t));
206 uint64_t n
= *nwords
;
208 for (uint64_t i
= 0; i
< n
; i
++) {
209 uint64_t entry
= words
[i
];
210 if (sm_entry_is_double_word(entry
)) {
212 * Since we are populating the buffer backwards
213 * we have to be extra careful and add the two
214 * words of the double-word entry in the right
226 ASSERT(sm_entry_is_debug(entry
) ||
227 sm_entry_is_single_word(entry
));
234 * Assert that we wrote backwards all the
235 * way to the beginning of the buffer.
239 dmu_buf_rele(db
, FTAG
);
244 * Note: This function performs destructive actions - specifically
245 * it deletes entries from the end of the space map. Thus, callers
246 * should ensure that they are holding the appropriate locks for
247 * the space map that they provide.
250 space_map_incremental_destroy(space_map_t
*sm
, sm_cb_t callback
, void *arg
,
253 uint64_t bufsz
= MAX(sm
->sm_blksz
, SPA_MINBLOCKSIZE
);
254 uint64_t *buf
= zio_buf_alloc(bufsz
);
256 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
259 * Ideally we would want to iterate from the beginning of the
260 * space map to the end in incremental steps. The issue with this
261 * approach is that we don't have any field on-disk that points
262 * us where to start between each step. We could try zeroing out
263 * entries that we've destroyed, but this doesn't work either as
264 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
266 * As a result, we destroy its entries incrementally starting from
267 * the end after applying the callback to each of them.
269 * The problem with this approach is that we cannot literally
270 * iterate through the words in the space map backwards as we
271 * can't distinguish two-word space map entries from their second
272 * word. Thus we do the following:
274 * 1] We get all the entries from the last block of the space map
275 * and put them into a buffer in reverse order. This way the
276 * last entry comes first in the buffer, the second to last is
278 * 2] We iterate through the entries in the buffer and we apply
279 * the callback to each one. As we move from entry to entry we
280 * we decrease the size of the space map, deleting effectively
282 * 3] If there are no more entries in the space map or the callback
283 * returns a value other than 0, we stop iterating over the
284 * space map. If there are entries remaining and the callback
285 * returned 0, we go back to step [1].
288 while (space_map_length(sm
) > 0 && error
== 0) {
290 error
= space_map_reversed_last_block_entries(sm
, buf
, bufsz
,
295 ASSERT3U(nwords
, <=, bufsz
/ sizeof (uint64_t));
297 for (uint64_t i
= 0; i
< nwords
; i
++) {
300 if (sm_entry_is_debug(e
)) {
301 sm
->sm_phys
->smp_objsize
-= sizeof (uint64_t);
302 space_map_update(sm
);
307 uint64_t raw_offset
, raw_run
, vdev_id
;
309 if (sm_entry_is_single_word(e
)) {
310 type
= SM_TYPE_DECODE(e
);
311 vdev_id
= SM_NO_VDEVID
;
312 raw_offset
= SM_OFFSET_DECODE(e
);
313 raw_run
= SM_RUN_DECODE(e
);
315 ASSERT(sm_entry_is_double_word(e
));
318 raw_run
= SM2_RUN_DECODE(e
);
319 vdev_id
= SM2_VDEV_DECODE(e
);
321 /* move to the second word */
325 ASSERT3P(i
, <=, nwords
);
327 type
= SM2_TYPE_DECODE(e
);
328 raw_offset
= SM2_OFFSET_DECODE(e
);
331 uint64_t entry_offset
=
332 (raw_offset
<< sm
->sm_shift
) + sm
->sm_start
;
333 uint64_t entry_run
= raw_run
<< sm
->sm_shift
;
335 VERIFY0(P2PHASE(entry_offset
, 1ULL << sm
->sm_shift
));
336 VERIFY0(P2PHASE(entry_run
, 1ULL << sm
->sm_shift
));
337 VERIFY3U(entry_offset
, >=, sm
->sm_start
);
338 VERIFY3U(entry_offset
, <, sm
->sm_start
+ sm
->sm_size
);
339 VERIFY3U(entry_run
, <=, sm
->sm_size
);
340 VERIFY3U(entry_offset
+ entry_run
, <=,
341 sm
->sm_start
+ sm
->sm_size
);
343 space_map_entry_t sme
= {
346 .sme_offset
= entry_offset
,
349 error
= callback(&sme
, arg
);
353 if (type
== SM_ALLOC
)
354 sm
->sm_phys
->smp_alloc
-= entry_run
;
356 sm
->sm_phys
->smp_alloc
+= entry_run
;
357 sm
->sm_phys
->smp_objsize
-= words
* sizeof (uint64_t);
358 space_map_update(sm
);
362 if (space_map_length(sm
) == 0) {
364 ASSERT0(sm
->sm_phys
->smp_objsize
);
365 ASSERT0(sm
->sm_alloc
);
368 zio_buf_free(buf
, bufsz
);
372 typedef struct space_map_load_arg
{
373 space_map_t
*smla_sm
;
374 range_tree_t
*smla_rt
;
376 } space_map_load_arg_t
;
379 space_map_load_callback(space_map_entry_t
*sme
, void *arg
)
381 space_map_load_arg_t
*smla
= arg
;
382 if (sme
->sme_type
== smla
->smla_type
) {
383 VERIFY3U(range_tree_space(smla
->smla_rt
) + sme
->sme_run
, <=,
384 smla
->smla_sm
->sm_size
);
385 range_tree_add(smla
->smla_rt
, sme
->sme_offset
, sme
->sme_run
);
387 range_tree_remove(smla
->smla_rt
, sme
->sme_offset
, sme
->sme_run
);
394 * Load the space map disk into the specified range tree. Segments of maptype
395 * are added to the range tree, other segment types are removed.
398 space_map_load(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
)
402 space_map_load_arg_t smla
;
404 VERIFY0(range_tree_space(rt
));
405 space
= space_map_allocated(sm
);
407 if (maptype
== SM_FREE
) {
408 range_tree_add(rt
, sm
->sm_start
, sm
->sm_size
);
409 space
= sm
->sm_size
- space
;
414 smla
.smla_type
= maptype
;
415 err
= space_map_iterate(sm
, space_map_load_callback
, &smla
);
418 VERIFY3U(range_tree_space(rt
), ==, space
);
420 range_tree_vacate(rt
, NULL
, NULL
);
427 space_map_histogram_clear(space_map_t
*sm
)
429 if (sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
432 bzero(sm
->sm_phys
->smp_histogram
, sizeof (sm
->sm_phys
->smp_histogram
));
436 space_map_histogram_verify(space_map_t
*sm
, range_tree_t
*rt
)
439 * Verify that the in-core range tree does not have any
440 * ranges smaller than our sm_shift size.
442 for (int i
= 0; i
< sm
->sm_shift
; i
++) {
443 if (rt
->rt_histogram
[i
] != 0)
450 space_map_histogram_add(space_map_t
*sm
, range_tree_t
*rt
, dmu_tx_t
*tx
)
454 ASSERT(dmu_tx_is_syncing(tx
));
455 VERIFY3U(space_map_object(sm
), !=, 0);
457 if (sm
->sm_dbuf
->db_size
!= sizeof (space_map_phys_t
))
460 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
462 ASSERT(space_map_histogram_verify(sm
, rt
));
464 * Transfer the content of the range tree histogram to the space
465 * map histogram. The space map histogram contains 32 buckets ranging
466 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
467 * however, can represent ranges from 2^0 to 2^63. Since the space
468 * map only cares about allocatable blocks (minimum of sm_shift) we
469 * can safely ignore all ranges in the range tree smaller than sm_shift.
471 for (int i
= sm
->sm_shift
; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
474 * Since the largest histogram bucket in the space map is
475 * 2^(32+sm_shift-1), we need to normalize the values in
476 * the range tree for any bucket larger than that size. For
477 * example given an sm_shift of 9, ranges larger than 2^40
478 * would get normalized as if they were 1TB ranges. Assume
479 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
480 * the calculation below would normalize this to 5 * 2^4 (16).
482 ASSERT3U(i
, >=, idx
+ sm
->sm_shift
);
483 sm
->sm_phys
->smp_histogram
[idx
] +=
484 rt
->rt_histogram
[i
] << (i
- idx
- sm
->sm_shift
);
487 * Increment the space map's index as long as we haven't
488 * reached the maximum bucket size. Accumulate all ranges
489 * larger than the max bucket size into the last bucket.
491 if (idx
< SPACE_MAP_HISTOGRAM_SIZE
- 1) {
492 ASSERT3U(idx
+ sm
->sm_shift
, ==, i
);
494 ASSERT3U(idx
, <, SPACE_MAP_HISTOGRAM_SIZE
);
500 space_map_write_intro_debug(space_map_t
*sm
, maptype_t maptype
, dmu_tx_t
*tx
)
502 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
504 uint64_t dentry
= SM_PREFIX_ENCODE(SM_DEBUG_PREFIX
) |
505 SM_DEBUG_ACTION_ENCODE(maptype
) |
506 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx
->tx_pool
->dp_spa
)) |
507 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx
));
509 dmu_write(sm
->sm_os
, space_map_object(sm
), sm
->sm_phys
->smp_objsize
,
510 sizeof (dentry
), &dentry
, tx
);
512 sm
->sm_phys
->smp_objsize
+= sizeof (dentry
);
516 * Writes one or more entries given a segment.
518 * Note: The function may release the dbuf from the pointer initially
519 * passed to it, and return a different dbuf. Also, the space map's
520 * dbuf must be dirty for the changes in sm_phys to take effect.
523 space_map_write_seg(space_map_t
*sm
, range_seg_t
*rs
, maptype_t maptype
,
524 uint64_t vdev_id
, uint8_t words
, dmu_buf_t
**dbp
, void *tag
, dmu_tx_t
*tx
)
526 ASSERT3U(words
, !=, 0);
527 ASSERT3U(words
, <=, 2);
529 /* ensure the vdev_id can be represented by the space map */
530 ASSERT3U(vdev_id
, <=, SM_NO_VDEVID
);
533 * if this is a single word entry, ensure that no vdev was
536 IMPLY(words
== 1, vdev_id
== SM_NO_VDEVID
);
538 dmu_buf_t
*db
= *dbp
;
539 ASSERT3U(db
->db_size
, ==, sm
->sm_blksz
);
541 uint64_t *block_base
= db
->db_data
;
542 uint64_t *block_end
= block_base
+ (sm
->sm_blksz
/ sizeof (uint64_t));
543 uint64_t *block_cursor
= block_base
+
544 (sm
->sm_phys
->smp_objsize
- db
->db_offset
) / sizeof (uint64_t);
546 ASSERT3P(block_cursor
, <=, block_end
);
548 uint64_t size
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
549 uint64_t start
= (rs
->rs_start
- sm
->sm_start
) >> sm
->sm_shift
;
550 uint64_t run_max
= (words
== 2) ? SM2_RUN_MAX
: SM_RUN_MAX
;
552 ASSERT3U(rs
->rs_start
, >=, sm
->sm_start
);
553 ASSERT3U(rs
->rs_start
, <, sm
->sm_start
+ sm
->sm_size
);
554 ASSERT3U(rs
->rs_end
- rs
->rs_start
, <=, sm
->sm_size
);
555 ASSERT3U(rs
->rs_end
, <=, sm
->sm_start
+ sm
->sm_size
);
558 ASSERT3P(block_cursor
, <=, block_end
);
561 * If we are at the end of this block, flush it and start
562 * writing again from the beginning.
564 if (block_cursor
== block_end
) {
565 dmu_buf_rele(db
, tag
);
567 uint64_t next_word_offset
= sm
->sm_phys
->smp_objsize
;
568 VERIFY0(dmu_buf_hold(sm
->sm_os
,
569 space_map_object(sm
), next_word_offset
,
570 tag
, &db
, DMU_READ_PREFETCH
));
571 dmu_buf_will_dirty(db
, tx
);
573 /* update caller's dbuf */
576 ASSERT3U(db
->db_size
, ==, sm
->sm_blksz
);
578 block_base
= db
->db_data
;
579 block_cursor
= block_base
;
580 block_end
= block_base
+
581 (db
->db_size
/ sizeof (uint64_t));
585 * If we are writing a two-word entry and we only have one
586 * word left on this block, just pad it with an empty debug
587 * entry and write the two-word entry in the next block.
589 uint64_t *next_entry
= block_cursor
+ 1;
590 if (next_entry
== block_end
&& words
> 1) {
591 ASSERT3U(words
, ==, 2);
592 *block_cursor
= SM_PREFIX_ENCODE(SM_DEBUG_PREFIX
) |
593 SM_DEBUG_ACTION_ENCODE(0) |
594 SM_DEBUG_SYNCPASS_ENCODE(0) |
595 SM_DEBUG_TXG_ENCODE(0);
597 sm
->sm_phys
->smp_objsize
+= sizeof (uint64_t);
598 ASSERT3P(block_cursor
, ==, block_end
);
602 uint64_t run_len
= MIN(size
, run_max
);
605 *block_cursor
= SM_OFFSET_ENCODE(start
) |
606 SM_TYPE_ENCODE(maptype
) |
607 SM_RUN_ENCODE(run_len
);
611 /* write the first word of the entry */
612 *block_cursor
= SM_PREFIX_ENCODE(SM2_PREFIX
) |
613 SM2_RUN_ENCODE(run_len
) |
614 SM2_VDEV_ENCODE(vdev_id
);
617 /* move on to the second word of the entry */
618 ASSERT3P(block_cursor
, <, block_end
);
619 *block_cursor
= SM2_TYPE_ENCODE(maptype
) |
620 SM2_OFFSET_ENCODE(start
);
624 panic("%d-word space map entries are not supported",
628 sm
->sm_phys
->smp_objsize
+= words
* sizeof (uint64_t);
638 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
642 space_map_write_impl(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
,
643 uint64_t vdev_id
, dmu_tx_t
*tx
)
645 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
648 space_map_write_intro_debug(sm
, maptype
, tx
);
652 * We do this right after we write the intro debug entry
653 * because the estimate does not take it into account.
655 uint64_t initial_objsize
= sm
->sm_phys
->smp_objsize
;
656 uint64_t estimated_growth
=
657 space_map_estimate_optimal_size(sm
, rt
, SM_NO_VDEVID
);
658 uint64_t estimated_final_objsize
= initial_objsize
+ estimated_growth
;
662 * Find the offset right after the last word in the space map
663 * and use that to get a hold of the last block, so we can
664 * start appending to it.
666 uint64_t next_word_offset
= sm
->sm_phys
->smp_objsize
;
667 VERIFY0(dmu_buf_hold(sm
->sm_os
, space_map_object(sm
),
668 next_word_offset
, FTAG
, &db
, DMU_READ_PREFETCH
));
669 ASSERT3U(db
->db_size
, ==, sm
->sm_blksz
);
671 dmu_buf_will_dirty(db
, tx
);
673 avl_tree_t
*t
= &rt
->rt_root
;
674 for (range_seg_t
*rs
= avl_first(t
); rs
!= NULL
; rs
= AVL_NEXT(t
, rs
)) {
675 uint64_t offset
= (rs
->rs_start
- sm
->sm_start
) >> sm
->sm_shift
;
676 uint64_t length
= (rs
->rs_end
- rs
->rs_start
) >> sm
->sm_shift
;
680 * We only write two-word entries when both of the following
683 * [1] The feature is enabled.
684 * [2] The offset or run is too big for a single-word entry,
685 * or the vdev_id is set (meaning not equal to
688 * Note that for purposes of testing we've added the case that
689 * we write two-word entries occasionally when the feature is
690 * enabled and zfs_force_some_double_word_sm_entries has been
693 if (spa_feature_is_active(spa
, SPA_FEATURE_SPACEMAP_V2
) &&
694 (offset
>= (1ULL << SM_OFFSET_BITS
) ||
695 length
> SM_RUN_MAX
||
696 vdev_id
!= SM_NO_VDEVID
||
697 (zfs_force_some_double_word_sm_entries
&&
698 spa_get_random(100) == 0)))
701 space_map_write_seg(sm
, rs
, maptype
, vdev_id
, words
,
705 dmu_buf_rele(db
, FTAG
);
709 * We expect our estimation to be based on the worst case
710 * scenario [see comment in space_map_estimate_optimal_size()].
711 * Therefore we expect the actual objsize to be equal or less
712 * than whatever we estimated it to be.
714 ASSERT3U(estimated_final_objsize
, >=, sm
->sm_phys
->smp_objsize
);
719 * Note: This function manipulates the state of the given space map but
720 * does not hold any locks implicitly. Thus the caller is responsible
721 * for synchronizing writes to the space map.
724 space_map_write(space_map_t
*sm
, range_tree_t
*rt
, maptype_t maptype
,
725 uint64_t vdev_id
, dmu_tx_t
*tx
)
727 objset_t
*os
= sm
->sm_os
;
729 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os
)));
730 VERIFY3U(space_map_object(sm
), !=, 0);
732 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
735 * This field is no longer necessary since the in-core space map
736 * now contains the object number but is maintained for backwards
739 sm
->sm_phys
->smp_object
= sm
->sm_object
;
741 if (range_tree_is_empty(rt
)) {
742 VERIFY3U(sm
->sm_object
, ==, sm
->sm_phys
->smp_object
);
746 if (maptype
== SM_ALLOC
)
747 sm
->sm_phys
->smp_alloc
+= range_tree_space(rt
);
749 sm
->sm_phys
->smp_alloc
-= range_tree_space(rt
);
751 uint64_t nodes
= avl_numnodes(&rt
->rt_root
);
752 uint64_t rt_space
= range_tree_space(rt
);
754 space_map_write_impl(sm
, rt
, maptype
, vdev_id
, tx
);
757 * Ensure that the space_map's accounting wasn't changed
758 * while we were in the middle of writing it out.
760 VERIFY3U(nodes
, ==, avl_numnodes(&rt
->rt_root
));
761 VERIFY3U(range_tree_space(rt
), ==, rt_space
);
765 space_map_open_impl(space_map_t
*sm
)
770 error
= dmu_bonus_hold(sm
->sm_os
, sm
->sm_object
, sm
, &sm
->sm_dbuf
);
774 dmu_object_size_from_db(sm
->sm_dbuf
, &sm
->sm_blksz
, &blocks
);
775 sm
->sm_phys
= sm
->sm_dbuf
->db_data
;
780 space_map_open(space_map_t
**smp
, objset_t
*os
, uint64_t object
,
781 uint64_t start
, uint64_t size
, uint8_t shift
)
786 ASSERT(*smp
== NULL
);
790 sm
= kmem_zalloc(sizeof (space_map_t
), KM_SLEEP
);
792 sm
->sm_start
= start
;
794 sm
->sm_shift
= shift
;
796 sm
->sm_object
= object
;
798 error
= space_map_open_impl(sm
);
809 space_map_close(space_map_t
*sm
)
814 if (sm
->sm_dbuf
!= NULL
)
815 dmu_buf_rele(sm
->sm_dbuf
, sm
);
819 kmem_free(sm
, sizeof (*sm
));
823 space_map_truncate(space_map_t
*sm
, int blocksize
, dmu_tx_t
*tx
)
825 objset_t
*os
= sm
->sm_os
;
826 spa_t
*spa
= dmu_objset_spa(os
);
827 dmu_object_info_t doi
;
829 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os
)));
830 ASSERT(dmu_tx_is_syncing(tx
));
831 VERIFY3U(dmu_tx_get_txg(tx
), <=, spa_final_dirty_txg(spa
));
833 dmu_object_info_from_db(sm
->sm_dbuf
, &doi
);
836 * If the space map has the wrong bonus size (because
837 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
838 * the wrong block size (because space_map_blksz has changed),
839 * free and re-allocate its object with the updated sizes.
841 * Otherwise, just truncate the current object.
843 if ((spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
) &&
844 doi
.doi_bonus_size
!= sizeof (space_map_phys_t
)) ||
845 doi
.doi_data_block_size
!= blocksize
||
846 doi
.doi_metadata_block_size
!= 1 << space_map_ibs
) {
847 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
848 "object[%llu]: old bonus %u, old blocksz %u",
849 dmu_tx_get_txg(tx
), spa_name(spa
), sm
, sm
->sm_object
,
850 doi
.doi_bonus_size
, doi
.doi_data_block_size
);
852 space_map_free(sm
, tx
);
853 dmu_buf_rele(sm
->sm_dbuf
, sm
);
855 sm
->sm_object
= space_map_alloc(sm
->sm_os
, blocksize
, tx
);
856 VERIFY0(space_map_open_impl(sm
));
858 VERIFY0(dmu_free_range(os
, space_map_object(sm
), 0, -1ULL, tx
));
861 * If the spacemap is reallocated, its histogram
862 * will be reset. Do the same in the common case so that
863 * bugs related to the uncommon case do not go unnoticed.
865 bzero(sm
->sm_phys
->smp_histogram
,
866 sizeof (sm
->sm_phys
->smp_histogram
));
869 dmu_buf_will_dirty(sm
->sm_dbuf
, tx
);
870 sm
->sm_phys
->smp_objsize
= 0;
871 sm
->sm_phys
->smp_alloc
= 0;
875 * Update the in-core space_map allocation and length values.
878 space_map_update(space_map_t
*sm
)
883 sm
->sm_alloc
= sm
->sm_phys
->smp_alloc
;
884 sm
->sm_length
= sm
->sm_phys
->smp_objsize
;
888 space_map_alloc(objset_t
*os
, int blocksize
, dmu_tx_t
*tx
)
890 spa_t
*spa
= dmu_objset_spa(os
);
894 if (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
)) {
895 spa_feature_incr(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
, tx
);
896 bonuslen
= sizeof (space_map_phys_t
);
897 ASSERT3U(bonuslen
, <=, dmu_bonus_max());
899 bonuslen
= SPACE_MAP_SIZE_V0
;
902 object
= dmu_object_alloc_ibs(os
, DMU_OT_SPACE_MAP
, blocksize
,
903 space_map_ibs
, DMU_OT_SPACE_MAP_HEADER
, bonuslen
, tx
);
909 space_map_free_obj(objset_t
*os
, uint64_t smobj
, dmu_tx_t
*tx
)
911 spa_t
*spa
= dmu_objset_spa(os
);
912 if (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_HISTOGRAM
)) {
913 dmu_object_info_t doi
;
915 VERIFY0(dmu_object_info(os
, smobj
, &doi
));
916 if (doi
.doi_bonus_size
!= SPACE_MAP_SIZE_V0
) {
917 spa_feature_decr(spa
,
918 SPA_FEATURE_SPACEMAP_HISTOGRAM
, tx
);
922 VERIFY0(dmu_object_free(os
, smobj
, tx
));
926 space_map_free(space_map_t
*sm
, dmu_tx_t
*tx
)
931 space_map_free_obj(sm
->sm_os
, space_map_object(sm
), tx
);
936 * Given a range tree, it makes a worst-case estimate of how much
937 * space would the tree's segments take if they were written to
938 * the given space map.
941 space_map_estimate_optimal_size(space_map_t
*sm
, range_tree_t
*rt
,
944 spa_t
*spa
= dmu_objset_spa(sm
->sm_os
);
945 uint64_t shift
= sm
->sm_shift
;
946 uint64_t *histogram
= rt
->rt_histogram
;
947 uint64_t entries_for_seg
= 0;
950 * In order to get a quick estimate of the optimal size that this
951 * range tree would have on-disk as a space map, we iterate through
952 * its histogram buckets instead of iterating through its nodes.
954 * Note that this is a highest-bound/worst-case estimate for the
957 * 1] We assume that we always add a debug padding for each block
958 * we write and we also assume that we start at the last word
959 * of a block attempting to write a two-word entry.
960 * 2] Rounding up errors due to the way segments are distributed
961 * in the buckets of the range tree's histogram.
962 * 3] The activation of zfs_force_some_double_word_sm_entries
963 * (tunable) when testing.
965 * = Math and Rounding Errors =
967 * rt_histogram[i] bucket of a range tree represents the number
968 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
969 * that, we want to divide the buckets into groups: Buckets that
970 * can be represented using a single-word entry, ones that can
971 * be represented with a double-word entry, and ones that can
972 * only be represented with multiple two-word entries.
974 * [Note that if the new encoding feature is not enabled there
975 * are only two groups: single-word entry buckets and multiple
976 * single-word entry buckets. The information below assumes
977 * two-word entries enabled, but it can easily applied when
978 * the feature is not enabled]
980 * To find the highest bucket that can be represented with a
981 * single-word entry we look at the maximum run that such entry
982 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
983 * the run of a space map entry is shifted by sm_shift, thus we
984 * add it to the exponent]. This way, excluding the value of the
985 * maximum run that can be represented by a single-word entry,
986 * all runs that are smaller exist in buckets 0 to
987 * SM_RUN_BITS + shift - 1.
989 * To find the highest bucket that can be represented with a
990 * double-word entry, we follow the same approach. Finally, any
991 * bucket higher than that are represented with multiple two-word
992 * entries. To be more specific, if the highest bucket whose
993 * segments can be represented with a single two-word entry is X,
994 * then bucket X+1 will need 2 two-word entries for each of its
995 * segments, X+2 will need 4, X+3 will need 8, ...etc.
997 * With all of the above we make our estimation based on bucket
998 * groups. There is a rounding error though. As we mentioned in
999 * the example with the one-word entry, the maximum run that can
1000 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
1001 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
1002 * that length fall into the next bucket (and bucket group) where
1003 * we start counting two-word entries and this is one more reason
1004 * why the estimated size may end up being bigger than the actual
1010 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
) ||
1011 (vdev_id
== SM_NO_VDEVID
&& sm
->sm_size
< SM_OFFSET_MAX
)) {
1014 * If we are trying to force some double word entries just
1015 * assume the worst-case of every single word entry being
1016 * written as a double word entry.
1018 uint64_t entry_size
=
1019 (spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
) &&
1020 zfs_force_some_double_word_sm_entries
) ?
1021 (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1023 uint64_t single_entry_max_bucket
= SM_RUN_BITS
+ shift
- 1;
1024 for (; idx
<= single_entry_max_bucket
; idx
++)
1025 size
+= histogram
[idx
] * entry_size
;
1027 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
)) {
1028 for (; idx
< RANGE_TREE_HISTOGRAM_SIZE
; idx
++) {
1029 ASSERT3U(idx
, >=, single_entry_max_bucket
);
1031 1ULL << (idx
- single_entry_max_bucket
);
1032 size
+= histogram
[idx
] *
1033 entries_for_seg
* entry_size
;
1039 ASSERT(spa_feature_is_enabled(spa
, SPA_FEATURE_SPACEMAP_V2
));
1041 uint64_t double_entry_max_bucket
= SM2_RUN_BITS
+ shift
- 1;
1042 for (; idx
<= double_entry_max_bucket
; idx
++)
1043 size
+= histogram
[idx
] * 2 * sizeof (uint64_t);
1045 for (; idx
< RANGE_TREE_HISTOGRAM_SIZE
; idx
++) {
1046 ASSERT3U(idx
, >=, double_entry_max_bucket
);
1047 entries_for_seg
= 1ULL << (idx
- double_entry_max_bucket
);
1048 size
+= histogram
[idx
] *
1049 entries_for_seg
* 2 * sizeof (uint64_t);
1053 * Assume the worst case where we start with the padding at the end
1054 * of the current block and we add an extra padding entry at the end
1055 * of all subsequent blocks.
1057 size
+= ((size
/ sm
->sm_blksz
) + 1) * sizeof (uint64_t);
1063 space_map_object(space_map_t
*sm
)
1065 return (sm
!= NULL
? sm
->sm_object
: 0);
1069 * Returns the already synced, on-disk allocated space.
1072 space_map_allocated(space_map_t
*sm
)
1074 return (sm
!= NULL
? sm
->sm_alloc
: 0);
1078 * Returns the already synced, on-disk length;
1081 space_map_length(space_map_t
*sm
)
1083 return (sm
!= NULL
? sm
->sm_length
: 0);
1087 * Returns the allocated space that is currently syncing.
1090 space_map_alloc_delta(space_map_t
*sm
)
1094 ASSERT(sm
->sm_dbuf
!= NULL
);
1095 return (sm
->sm_phys
->smp_alloc
- space_map_allocated(sm
));