8023 Panic destroying a metaslab deferred range tree
[unleashed.git] / usr / src / uts / common / fs / zfs / space_map.c
blob45a4071101ccdba1aca851c003b29d7d7b1024c3
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/spa.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/zio.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
41 * The data for a given space map can be kept on blocks of any size.
42 * Larger blocks entail fewer i/o operations, but they also cause the
43 * DMU to keep more data in-core, and also to waste more i/o bandwidth
44 * when only a few blocks have changed since the last transaction group.
46 int space_map_blksz = (1 << 12);
49 * Load the space map disk into the specified range tree. Segments of maptype
50 * are added to the range tree, other segment types are removed.
52 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
53 * The caller must be OK with this.
55 int
56 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
58 uint64_t *entry, *entry_map, *entry_map_end;
59 uint64_t bufsize, size, offset, end, space;
60 int error = 0;
62 ASSERT(MUTEX_HELD(sm->sm_lock));
64 end = space_map_length(sm);
65 space = space_map_allocated(sm);
67 VERIFY0(range_tree_space(rt));
69 if (maptype == SM_FREE) {
70 range_tree_add(rt, sm->sm_start, sm->sm_size);
71 space = sm->sm_size - space;
74 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
75 entry_map = zio_buf_alloc(bufsize);
77 mutex_exit(sm->sm_lock);
78 if (end > bufsize) {
79 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
80 end - bufsize, ZIO_PRIORITY_SYNC_READ);
82 mutex_enter(sm->sm_lock);
84 for (offset = 0; offset < end; offset += bufsize) {
85 size = MIN(end - offset, bufsize);
86 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
87 VERIFY(size != 0);
88 ASSERT3U(sm->sm_blksz, !=, 0);
90 dprintf("object=%llu offset=%llx size=%llx\n",
91 space_map_object(sm), offset, size);
93 mutex_exit(sm->sm_lock);
94 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
95 entry_map, DMU_READ_PREFETCH);
96 mutex_enter(sm->sm_lock);
97 if (error != 0)
98 break;
100 entry_map_end = entry_map + (size / sizeof (uint64_t));
101 for (entry = entry_map; entry < entry_map_end; entry++) {
102 uint64_t e = *entry;
103 uint64_t offset, size;
105 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
106 continue;
108 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
109 sm->sm_start;
110 size = SM_RUN_DECODE(e) << sm->sm_shift;
112 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
113 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
114 VERIFY3U(offset, >=, sm->sm_start);
115 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
116 if (SM_TYPE_DECODE(e) == maptype) {
117 VERIFY3U(range_tree_space(rt) + size, <=,
118 sm->sm_size);
119 range_tree_add(rt, offset, size);
120 } else {
121 range_tree_remove(rt, offset, size);
126 if (error == 0)
127 VERIFY3U(range_tree_space(rt), ==, space);
128 else
129 range_tree_vacate(rt, NULL, NULL);
131 zio_buf_free(entry_map, bufsize);
132 return (error);
135 void
136 space_map_histogram_clear(space_map_t *sm)
138 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
139 return;
141 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
144 boolean_t
145 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
148 * Verify that the in-core range tree does not have any
149 * ranges smaller than our sm_shift size.
151 for (int i = 0; i < sm->sm_shift; i++) {
152 if (rt->rt_histogram[i] != 0)
153 return (B_FALSE);
155 return (B_TRUE);
158 void
159 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
161 int idx = 0;
163 ASSERT(MUTEX_HELD(rt->rt_lock));
164 ASSERT(dmu_tx_is_syncing(tx));
165 VERIFY3U(space_map_object(sm), !=, 0);
167 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
168 return;
170 dmu_buf_will_dirty(sm->sm_dbuf, tx);
172 ASSERT(space_map_histogram_verify(sm, rt));
174 * Transfer the content of the range tree histogram to the space
175 * map histogram. The space map histogram contains 32 buckets ranging
176 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
177 * however, can represent ranges from 2^0 to 2^63. Since the space
178 * map only cares about allocatable blocks (minimum of sm_shift) we
179 * can safely ignore all ranges in the range tree smaller than sm_shift.
181 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
184 * Since the largest histogram bucket in the space map is
185 * 2^(32+sm_shift-1), we need to normalize the values in
186 * the range tree for any bucket larger than that size. For
187 * example given an sm_shift of 9, ranges larger than 2^40
188 * would get normalized as if they were 1TB ranges. Assume
189 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
190 * the calculation below would normalize this to 5 * 2^4 (16).
192 ASSERT3U(i, >=, idx + sm->sm_shift);
193 sm->sm_phys->smp_histogram[idx] +=
194 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
197 * Increment the space map's index as long as we haven't
198 * reached the maximum bucket size. Accumulate all ranges
199 * larger than the max bucket size into the last bucket.
201 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
202 ASSERT3U(idx + sm->sm_shift, ==, i);
203 idx++;
204 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
209 uint64_t
210 space_map_entries(space_map_t *sm, range_tree_t *rt)
212 avl_tree_t *t = &rt->rt_root;
213 range_seg_t *rs;
214 uint64_t size, entries;
217 * All space_maps always have a debug entry so account for it here.
219 entries = 1;
222 * Traverse the range tree and calculate the number of space map
223 * entries that would be required to write out the range tree.
225 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
226 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
227 entries += howmany(size, SM_RUN_MAX);
229 return (entries);
233 * Note: space_map_write() will drop sm_lock across dmu_write() calls.
235 void
236 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
237 dmu_tx_t *tx)
239 objset_t *os = sm->sm_os;
240 spa_t *spa = dmu_objset_spa(os);
241 avl_tree_t *t = &rt->rt_root;
242 range_seg_t *rs;
243 uint64_t size, total, rt_space, nodes;
244 uint64_t *entry, *entry_map, *entry_map_end;
245 uint64_t expected_entries, actual_entries = 1;
247 ASSERT(MUTEX_HELD(rt->rt_lock));
248 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
249 VERIFY3U(space_map_object(sm), !=, 0);
250 dmu_buf_will_dirty(sm->sm_dbuf, tx);
253 * This field is no longer necessary since the in-core space map
254 * now contains the object number but is maintained for backwards
255 * compatibility.
257 sm->sm_phys->smp_object = sm->sm_object;
259 if (range_tree_space(rt) == 0) {
260 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
261 return;
264 if (maptype == SM_ALLOC)
265 sm->sm_phys->smp_alloc += range_tree_space(rt);
266 else
267 sm->sm_phys->smp_alloc -= range_tree_space(rt);
269 expected_entries = space_map_entries(sm, rt);
271 entry_map = zio_buf_alloc(sm->sm_blksz);
272 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
273 entry = entry_map;
275 *entry++ = SM_DEBUG_ENCODE(1) |
276 SM_DEBUG_ACTION_ENCODE(maptype) |
277 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
278 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
280 total = 0;
281 nodes = avl_numnodes(&rt->rt_root);
282 rt_space = range_tree_space(rt);
283 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
284 uint64_t start;
286 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
287 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
289 total += size << sm->sm_shift;
291 while (size != 0) {
292 uint64_t run_len;
294 run_len = MIN(size, SM_RUN_MAX);
296 if (entry == entry_map_end) {
297 mutex_exit(rt->rt_lock);
298 dmu_write(os, space_map_object(sm),
299 sm->sm_phys->smp_objsize, sm->sm_blksz,
300 entry_map, tx);
301 mutex_enter(rt->rt_lock);
302 sm->sm_phys->smp_objsize += sm->sm_blksz;
303 entry = entry_map;
306 *entry++ = SM_OFFSET_ENCODE(start) |
307 SM_TYPE_ENCODE(maptype) |
308 SM_RUN_ENCODE(run_len);
310 start += run_len;
311 size -= run_len;
312 actual_entries++;
316 if (entry != entry_map) {
317 size = (entry - entry_map) * sizeof (uint64_t);
318 mutex_exit(rt->rt_lock);
319 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
320 size, entry_map, tx);
321 mutex_enter(rt->rt_lock);
322 sm->sm_phys->smp_objsize += size;
324 ASSERT3U(expected_entries, ==, actual_entries);
327 * Ensure that the space_map's accounting wasn't changed
328 * while we were in the middle of writing it out.
330 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
331 VERIFY3U(range_tree_space(rt), ==, rt_space);
332 VERIFY3U(range_tree_space(rt), ==, total);
334 zio_buf_free(entry_map, sm->sm_blksz);
337 static int
338 space_map_open_impl(space_map_t *sm)
340 int error;
341 u_longlong_t blocks;
343 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
344 if (error)
345 return (error);
347 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
348 sm->sm_phys = sm->sm_dbuf->db_data;
349 return (0);
353 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
354 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
356 space_map_t *sm;
357 int error;
359 ASSERT(*smp == NULL);
360 ASSERT(os != NULL);
361 ASSERT(object != 0);
363 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
365 sm->sm_start = start;
366 sm->sm_size = size;
367 sm->sm_shift = shift;
368 sm->sm_lock = lp;
369 sm->sm_os = os;
370 sm->sm_object = object;
372 error = space_map_open_impl(sm);
373 if (error != 0) {
374 space_map_close(sm);
375 return (error);
378 *smp = sm;
380 return (0);
383 void
384 space_map_close(space_map_t *sm)
386 if (sm == NULL)
387 return;
389 if (sm->sm_dbuf != NULL)
390 dmu_buf_rele(sm->sm_dbuf, sm);
391 sm->sm_dbuf = NULL;
392 sm->sm_phys = NULL;
394 kmem_free(sm, sizeof (*sm));
397 void
398 space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
400 objset_t *os = sm->sm_os;
401 spa_t *spa = dmu_objset_spa(os);
402 dmu_object_info_t doi;
404 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
405 ASSERT(dmu_tx_is_syncing(tx));
406 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
408 dmu_object_info_from_db(sm->sm_dbuf, &doi);
411 * If the space map has the wrong bonus size (because
412 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
413 * the wrong block size (because space_map_blksz has changed),
414 * free and re-allocate its object with the updated sizes.
416 * Otherwise, just truncate the current object.
418 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
419 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
420 doi.doi_data_block_size != space_map_blksz) {
421 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
422 "object[%llu]: old bonus %u, old blocksz %u",
423 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
424 doi.doi_bonus_size, doi.doi_data_block_size);
426 space_map_free(sm, tx);
427 dmu_buf_rele(sm->sm_dbuf, sm);
429 sm->sm_object = space_map_alloc(sm->sm_os, tx);
430 VERIFY0(space_map_open_impl(sm));
431 } else {
432 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
435 * If the spacemap is reallocated, its histogram
436 * will be reset. Do the same in the common case so that
437 * bugs related to the uncommon case do not go unnoticed.
439 bzero(sm->sm_phys->smp_histogram,
440 sizeof (sm->sm_phys->smp_histogram));
443 dmu_buf_will_dirty(sm->sm_dbuf, tx);
444 sm->sm_phys->smp_objsize = 0;
445 sm->sm_phys->smp_alloc = 0;
449 * Update the in-core space_map allocation and length values.
451 void
452 space_map_update(space_map_t *sm)
454 if (sm == NULL)
455 return;
457 ASSERT(MUTEX_HELD(sm->sm_lock));
459 sm->sm_alloc = sm->sm_phys->smp_alloc;
460 sm->sm_length = sm->sm_phys->smp_objsize;
463 uint64_t
464 space_map_alloc(objset_t *os, dmu_tx_t *tx)
466 spa_t *spa = dmu_objset_spa(os);
467 uint64_t object;
468 int bonuslen;
470 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
471 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
472 bonuslen = sizeof (space_map_phys_t);
473 ASSERT3U(bonuslen, <=, dmu_bonus_max());
474 } else {
475 bonuslen = SPACE_MAP_SIZE_V0;
478 object = dmu_object_alloc(os,
479 DMU_OT_SPACE_MAP, space_map_blksz,
480 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
482 return (object);
485 void
486 space_map_free(space_map_t *sm, dmu_tx_t *tx)
488 spa_t *spa;
490 if (sm == NULL)
491 return;
493 spa = dmu_objset_spa(sm->sm_os);
494 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
495 dmu_object_info_t doi;
497 dmu_object_info_from_db(sm->sm_dbuf, &doi);
498 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
499 VERIFY(spa_feature_is_active(spa,
500 SPA_FEATURE_SPACEMAP_HISTOGRAM));
501 spa_feature_decr(spa,
502 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
506 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
507 sm->sm_object = 0;
510 uint64_t
511 space_map_object(space_map_t *sm)
513 return (sm != NULL ? sm->sm_object : 0);
517 * Returns the already synced, on-disk allocated space.
519 uint64_t
520 space_map_allocated(space_map_t *sm)
522 return (sm != NULL ? sm->sm_alloc : 0);
526 * Returns the already synced, on-disk length;
528 uint64_t
529 space_map_length(space_map_t *sm)
531 return (sm != NULL ? sm->sm_length : 0);
535 * Returns the allocated space that is currently syncing.
537 int64_t
538 space_map_alloc_delta(space_map_t *sm)
540 if (sm == NULL)
541 return (0);
542 ASSERT(sm->sm_dbuf != NULL);
543 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));