hammer2 - Fix bulkfree bugs
[dragonfly.git] / sys / vfs / hammer2 / hammer2_bulkfree.c
blob494a45e7ff0086b8acb528537cde1502e3763637
1 /*
2 * Copyright (c) 2013-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/fcntl.h>
38 #include <sys/buf.h>
39 #include <sys/proc.h>
40 #include <sys/namei.h>
41 #include <sys/mount.h>
42 #include <sys/vnode.h>
43 #include <sys/mountctl.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_extern.h>
47 #include "hammer2.h"
49 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1))
50 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix))
53 * breadth-first search
55 typedef struct hammer2_chain_save {
56 TAILQ_ENTRY(hammer2_chain_save) entry;
57 hammer2_chain_t *chain;
58 int pri;
59 } hammer2_chain_save_t;
61 TAILQ_HEAD(hammer2_chain_save_list, hammer2_chain_save);
62 typedef struct hammer2_chain_save_list hammer2_chain_save_list_t;
64 typedef struct hammer2_bulkfree_info {
65 hammer2_dev_t *hmp;
66 kmem_anon_desc_t kp;
67 hammer2_off_t sbase; /* sub-loop iteration */
68 hammer2_off_t sstop;
69 hammer2_bmap_data_t *bmap;
70 int depth;
71 long count_10_00; /* staged->free */
72 long count_11_10; /* allocated->staged */
73 long count_00_11; /* (should not happen) */
74 long count_01_11; /* (should not happen) */
75 long count_10_11; /* staged->allocated */
76 long count_l0cleans;
77 long count_linadjusts;
78 long count_inodes_scanned;
79 long count_dedup_factor;
80 long bytes_scanned;
81 hammer2_off_t adj_free;
82 hammer2_tid_t mtid;
83 hammer2_tid_t saved_mirror_tid;
84 time_t save_time;
85 hammer2_chain_save_list_t list;
86 hammer2_dedup_t *dedup;
87 int pri;
88 } hammer2_bulkfree_info_t;
90 static int h2_bulkfree_test(hammer2_bulkfree_info_t *info,
91 hammer2_blockref_t *bref, int pri);
94 * General bulk scan function with callback. Called with a referenced
95 * but UNLOCKED parent. The parent is returned in the same state.
97 static
98 int
99 hammer2_bulk_scan(hammer2_chain_t *parent,
100 int (*func)(hammer2_bulkfree_info_t *info,
101 hammer2_blockref_t *bref),
102 hammer2_bulkfree_info_t *info)
104 hammer2_blockref_t bref;
105 hammer2_chain_t *chain;
106 int cache_index = -1;
107 int doabort = 0;
108 int first = 1;
110 ++info->pri;
112 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
113 HAMMER2_RESOLVE_SHARED);
114 chain = NULL;
117 * Generally loop on the contents if we have not been flagged
118 * for abort.
120 * Remember that these chains are completely isolated from
121 * the frontend, so we can release locks temporarily without
122 * imploding.
124 while ((doabort & HAMMER2_BULK_ABORT) == 0 &&
125 hammer2_chain_scan(parent, &chain, &bref, &first,
126 &cache_index,
127 HAMMER2_LOOKUP_NODATA |
128 HAMMER2_LOOKUP_SHARED) != NULL) {
130 * Process bref, chain is only non-NULL if the bref
131 * might be recursable (its possible that we sometimes get
132 * a non-NULL chain where the bref cannot be recursed).
134 #if 0
135 kprintf("SCAN %016jx\n", bref.data_off);
136 int xerr = tsleep(&info->pri, PCATCH, "slp", hz / 10);
137 if (xerr == EINTR || xerr == ERESTART) {
138 doabort |= HAMMER2_BULK_ABORT;
140 #endif
141 ++info->pri;
142 if (h2_bulkfree_test(info, &bref, 1))
143 continue;
145 doabort |= func(info, &bref);
147 if (doabort & HAMMER2_BULK_ABORT)
148 break;
151 * A non-null chain is always returned if it is
152 * recursive, otherwise a non-null chain might be
153 * returned but usually is not when not recursive.
155 if (chain == NULL)
156 continue;
159 * Else check type and setup depth-first scan.
161 * Account for bytes actually read.
163 info->bytes_scanned += chain->bytes;
165 switch(chain->bref.type) {
166 case HAMMER2_BREF_TYPE_INODE:
167 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
168 case HAMMER2_BREF_TYPE_INDIRECT:
169 case HAMMER2_BREF_TYPE_VOLUME:
170 case HAMMER2_BREF_TYPE_FREEMAP:
171 ++info->depth;
172 if (info->depth > 16) {
173 hammer2_chain_save_t *save;
174 save = kmalloc(sizeof(*save), M_HAMMER2,
175 M_WAITOK | M_ZERO);
176 save->chain = chain;
177 hammer2_chain_ref(chain);
178 TAILQ_INSERT_TAIL(&info->list, save, entry);
180 /* guess */
181 info->pri += 10;
182 } else {
183 int savepri = info->pri;
185 hammer2_chain_unlock(chain);
186 info->pri = 0;
187 doabort |= hammer2_bulk_scan(chain, func, info);
188 info->pri += savepri;
189 hammer2_chain_lock(chain,
190 HAMMER2_RESOLVE_ALWAYS |
191 HAMMER2_RESOLVE_SHARED);
193 --info->depth;
194 break;
195 default:
196 /* does not recurse */
197 break;
200 if (chain) {
201 hammer2_chain_unlock(chain);
202 hammer2_chain_drop(chain);
206 * Save with higher pri now that we know what it is.
208 h2_bulkfree_test(info, &parent->bref, info->pri + 1);
210 hammer2_chain_unlock(parent);
212 return doabort;
216 * Bulkfree algorithm
218 * Repeat {
219 * Chain flush (partial synchronization) XXX removed
220 * Scan the whole topology - build in-memory freemap (mark 11)
221 * Reconcile the in-memory freemap against the on-disk freemap.
222 * ondisk xx -> ondisk 11 (if allocated)
223 * ondisk 11 -> ondisk 10 (if free in-memory)
224 * ondisk 10 -> ondisk 00 (if free in-memory) - on next pass
227 * The topology scan may have to be performed multiple times to window
228 * freemaps which are too large to fit in kernel memory.
230 * Races are handled using a double-transition (11->10, 10->00). The bulkfree
231 * scan snapshots the volume root's blockset and thus can run concurrent with
232 * normal operations, as long as a full flush is made between each pass to
233 * synchronize any modified chains (otherwise their blocks might be improperly
234 * freed).
236 * Temporary memory in multiples of 64KB is required to reconstruct the leaf
237 * hammer2_bmap_data blocks so they can later be compared against the live
238 * freemap. Each 64KB block represents 128 x 16KB x 1024 = ~2 GB of storage.
239 * A 32MB save area thus represents around ~1 TB. The temporary memory
240 * allocated can be specified. If it is not sufficient multiple topology
241 * passes will be made.
245 * Bulkfree callback info
247 static void hammer2_bulkfree_thread(void *arg __unused);
248 static void cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size);
249 static int h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo,
250 hammer2_blockref_t *bref);
251 static void h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo);
252 static void h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
253 hammer2_off_t data_off, hammer2_bmap_data_t *live,
254 hammer2_bmap_data_t *bmap, int nofree);
256 void
257 hammer2_bulkfree_init(hammer2_dev_t *hmp)
259 hammer2_thr_create(&hmp->bfthr, NULL, hmp,
260 hmp->devrepname, -1, -1,
261 hammer2_bulkfree_thread);
264 void
265 hammer2_bulkfree_uninit(hammer2_dev_t *hmp)
267 hammer2_thr_delete(&hmp->bfthr);
270 static void
271 hammer2_bulkfree_thread(void *arg)
273 hammer2_thread_t *thr = arg;
274 hammer2_ioc_bulkfree_t bfi;
275 uint32_t flags;
277 for (;;) {
278 hammer2_thr_wait_any(thr,
279 HAMMER2_THREAD_STOP |
280 HAMMER2_THREAD_FREEZE |
281 HAMMER2_THREAD_UNFREEZE |
282 HAMMER2_THREAD_REMASTER,
283 hz * 60);
285 flags = thr->flags;
286 cpu_ccfence();
287 if (flags & HAMMER2_THREAD_STOP)
288 break;
289 if (flags & HAMMER2_THREAD_FREEZE) {
290 hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
291 HAMMER2_THREAD_FREEZE);
292 continue;
294 if (flags & HAMMER2_THREAD_UNFREEZE) {
295 hammer2_thr_signal2(thr, 0,
296 HAMMER2_THREAD_FROZEN |
297 HAMMER2_THREAD_UNFREEZE);
298 continue;
300 if (flags & HAMMER2_THREAD_FROZEN)
301 continue;
302 if (flags & HAMMER2_THREAD_REMASTER) {
303 hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
304 bzero(&bfi, sizeof(bfi));
305 bfi.size = 8192 * 1024;
306 hammer2_bulkfree_pass(thr->hmp, &bfi);
309 thr->td = NULL;
310 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
311 /* structure can go invalid at this point */
315 hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_ioc_bulkfree_t *bfi)
317 hammer2_bulkfree_info_t cbinfo;
318 hammer2_chain_t *vchain;
319 hammer2_chain_save_t *save;
320 hammer2_off_t incr;
321 size_t size;
322 int doabort = 0;
325 * A bulkfree operations lock is required for the duration. We
326 * must hold it across our flushes to guarantee that we never run
327 * two bulkfree passes in a row without a flush in the middle.
329 lockmgr(&hmp->bulklk, LK_EXCLUSIVE);
332 * We have to clear the live dedup cache as it might have entries
333 * that are freeable as of now. Any new entries in the dedup cache
334 * made after this point, even if they become freeable, will have
335 * previously been fully allocated and will be protected by the
336 * 2-stage bulkfree.
338 hammer2_dedup_clear(hmp);
341 * Create a stable snapshot of the block tree which we can run
342 * the bulkfree pass on. This allows the bulkfree pass to run
343 * concurrent with all other operations (except another bulkfree)
345 * This must flush all dirty chain data, but does not have to
346 * flush dirty buffer cache buffers which have not yet been
347 * realized and does not have to flush any newly realized dirty
348 * chains while the bulkfree pass is running, as long as said
349 * newly dirtied chains get flushed the next time, before the
350 * next bulkfree pass.
352 vchain = hammer2_flush_quick(hmp);
353 hammer2_chain_bulkdrop(vchain);
354 vchain = hammer2_flush_quick(hmp);
357 * Setup for free pass
359 bzero(&cbinfo, sizeof(cbinfo));
360 size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
361 ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
362 cbinfo.hmp = hmp;
363 cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size, VM_SUBSYS_HAMMER);
364 cbinfo.saved_mirror_tid = hmp->voldata.mirror_tid;
366 cbinfo.dedup = kmalloc(sizeof(*cbinfo.dedup) * HAMMER2_DEDUP_HEUR_SIZE,
367 M_HAMMER2, M_WAITOK | M_ZERO);
370 * Normalize start point to a 2GB boundary. We operate on a
371 * 64KB leaf bitmap boundary which represents 2GB of storage.
373 cbinfo.sbase = bfi->sbase;
374 if (cbinfo.sbase > hmp->voldata.volu_size)
375 cbinfo.sbase = hmp->voldata.volu_size;
376 cbinfo.sbase &= ~HAMMER2_FREEMAP_LEVEL1_MASK;
377 TAILQ_INIT(&cbinfo.list);
380 * Loop on a full meta-data scan as many times as required to
381 * get through all available storage.
383 while (cbinfo.sbase < hmp->voldata.volu_size) {
385 * We have enough ram to represent (incr) bytes of storage.
386 * Each 64KB of ram represents 2GB of storage.
388 cbinfo_bmap_init(&cbinfo, size);
389 incr = size / HAMMER2_FREEMAP_LEVELN_PSIZE *
390 HAMMER2_FREEMAP_LEVEL1_SIZE;
391 if (hmp->voldata.volu_size - cbinfo.sbase < incr)
392 cbinfo.sstop = hmp->voldata.volu_size;
393 else
394 cbinfo.sstop = cbinfo.sbase + incr;
395 if (hammer2_debug & 1) {
396 kprintf("bulkfree pass %016jx/%jdGB\n",
397 (intmax_t)cbinfo.sbase,
398 (intmax_t)incr / HAMMER2_FREEMAP_LEVEL1_SIZE);
402 * Scan topology for stuff inside this range.
404 hammer2_trans_init(hmp->spmp, 0);
405 cbinfo.mtid = hammer2_trans_sub(hmp->spmp);
406 cbinfo.pri = 0;
407 doabort |= hammer2_bulk_scan(vchain, h2_bulkfree_callback,
408 &cbinfo);
410 while ((save = TAILQ_FIRST(&cbinfo.list)) != NULL &&
411 doabort == 0) {
412 TAILQ_REMOVE(&cbinfo.list, save, entry);
413 cbinfo.pri = 0;
414 doabort |= hammer2_bulk_scan(save->chain,
415 h2_bulkfree_callback,
416 &cbinfo);
417 hammer2_chain_drop(save->chain);
418 kfree(save, M_HAMMER2);
420 while (save) {
421 TAILQ_REMOVE(&cbinfo.list, save, entry);
422 hammer2_chain_drop(save->chain);
423 kfree(save, M_HAMMER2);
424 save = TAILQ_FIRST(&cbinfo.list);
427 kprintf("bulkfree lastdrop %d %d doabort=%d\n",
428 vchain->refs, vchain->core.chain_count, doabort);
431 * If complete scan succeeded we can synchronize our
432 * in-memory freemap against live storage. If an abort
433 * did occur we cannot safely synchronize our partially
434 * filled-out in-memory freemap.
436 if (doabort == 0) {
437 h2_bulkfree_sync(&cbinfo);
439 hammer2_voldata_lock(hmp);
440 hammer2_voldata_modify(hmp);
441 hmp->voldata.allocator_free += cbinfo.adj_free;
442 hammer2_voldata_unlock(hmp);
446 * Cleanup for next loop.
448 hammer2_trans_done(hmp->spmp);
449 if (doabort)
450 break;
451 cbinfo.sbase = cbinfo.sstop;
452 cbinfo.adj_free = 0;
454 hammer2_chain_bulkdrop(vchain);
455 kmem_free_swapbacked(&cbinfo.kp);
456 kfree(cbinfo.dedup, M_HAMMER2);
457 cbinfo.dedup = NULL;
459 bfi->sstop = cbinfo.sbase;
461 incr = bfi->sstop / (hmp->voldata.volu_size / 10000);
462 if (incr > 10000)
463 incr = 10000;
465 kprintf("bulkfree pass statistics (%d.%02d%% storage processed):\n",
466 (int)incr / 100,
467 (int)incr % 100);
469 kprintf(" transition->free %ld\n", cbinfo.count_10_00);
470 kprintf(" transition->staged %ld\n", cbinfo.count_11_10);
471 kprintf(" ERR(00)->allocated %ld\n", cbinfo.count_00_11);
472 kprintf(" ERR(01)->allocated %ld\n", cbinfo.count_01_11);
473 kprintf(" staged->allocated %ld\n", cbinfo.count_10_11);
474 kprintf(" ~2MB segs cleaned %ld\n", cbinfo.count_l0cleans);
475 kprintf(" linear adjusts %ld\n", cbinfo.count_linadjusts);
476 kprintf(" dedup factor %ld\n", cbinfo.count_dedup_factor);
478 lockmgr(&hmp->bulklk, LK_RELEASE);
479 /* hammer2_vfs_sync(mp, MNT_WAIT); sync needed */
481 return doabort;
484 static void
485 cbinfo_bmap_init(hammer2_bulkfree_info_t *cbinfo, size_t size)
487 hammer2_bmap_data_t *bmap = cbinfo->bmap;
488 hammer2_key_t key = cbinfo->sbase;
489 hammer2_key_t lokey;
490 hammer2_key_t hikey;
492 lokey = (cbinfo->hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) &
493 ~HAMMER2_SEGMASK64;
494 hikey = cbinfo->hmp->voldata.volu_size & ~HAMMER2_SEGMASK64;
496 bzero(bmap, size);
497 while (size) {
498 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
499 HAMMER2_ZONE_SEG64) {
500 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) +
501 HAMMER2_ZONE_SEG64;
503 if (key < lokey || key >= hikey) {
504 memset(bmap->bitmapq, -1,
505 sizeof(bmap->bitmapq));
506 bmap->avail = 0;
507 bmap->linear = HAMMER2_SEGSIZE;
508 } else {
509 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX);
511 size -= sizeof(*bmap);
512 key += HAMMER2_FREEMAP_LEVEL0_SIZE;
513 ++bmap;
517 static int
518 h2_bulkfree_callback(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref)
520 hammer2_bmap_data_t *bmap;
521 hammer2_off_t data_off;
522 uint16_t class;
523 size_t bytes;
524 int radix;
525 int error;
528 * Check for signal and allow yield to userland during scan
530 if (hammer2_signal_check(&cbinfo->save_time))
531 return HAMMER2_BULK_ABORT;
532 if (bref->type == HAMMER2_BREF_TYPE_INODE) {
533 ++cbinfo->count_inodes_scanned;
534 if ((cbinfo->count_inodes_scanned & 65535) == 0)
535 kprintf(" inodes %6ld bytes %9ld\n",
536 cbinfo->count_inodes_scanned,
537 cbinfo->bytes_scanned);
541 * Calculate the data offset and determine if it is within
542 * the current freemap range being gathered.
544 error = 0;
545 data_off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX;
546 if (data_off < cbinfo->sbase || data_off >= cbinfo->sstop)
547 return 0;
548 if (data_off < cbinfo->hmp->voldata.allocator_beg)
549 return 0;
550 if (data_off >= cbinfo->hmp->voldata.volu_size)
551 return 0;
554 * Calculate the information needed to generate the in-memory
555 * freemap record.
557 * Hammer2 does not allow allocations to cross the L1 (2GB) boundary,
558 * it's a problem if it does. (Or L0 (2MB) for that matter).
560 radix = (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
561 KKASSERT(radix != 0);
562 bytes = (size_t)1 << radix;
563 class = (bref->type << 8) | hammer2_devblkradix(radix);
565 if (data_off + bytes >= cbinfo->sstop) {
566 kprintf("hammer2_bulkfree_scan: illegal 2GB boundary "
567 "%016jx %016jx/%d\n",
568 (intmax_t)bref->data_off,
569 (intmax_t)bref->key,
570 bref->keybits);
571 bytes = cbinfo->sstop - data_off; /* XXX */
575 * Convert to a storage offset relative to the beginning of the
576 * storage range we are collecting. Then lookup the level0 bmap entry.
578 data_off -= cbinfo->sbase;
579 bmap = cbinfo->bmap + (data_off >> HAMMER2_FREEMAP_LEVEL0_RADIX);
582 * Convert data_off to a bmap-relative value (~2MB storage range).
583 * Adjust linear, class, and avail.
585 * Hammer2 does not allow allocations to cross the L0 (2MB) boundary,
587 data_off &= HAMMER2_FREEMAP_LEVEL0_MASK;
588 if (data_off + bytes > HAMMER2_FREEMAP_LEVEL0_SIZE) {
589 kprintf("hammer2_bulkfree_scan: illegal 2MB boundary "
590 "%016jx %016jx/%d\n",
591 (intmax_t)bref->data_off,
592 (intmax_t)bref->key,
593 bref->keybits);
594 bytes = HAMMER2_FREEMAP_LEVEL0_SIZE - data_off;
597 if (bmap->class == 0) {
598 bmap->class = class;
599 bmap->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
601 if (bmap->class != class) {
602 kprintf("hammer2_bulkfree_scan: illegal mixed class "
603 "%016jx %016jx/%d (%04x vs %04x)\n",
604 (intmax_t)bref->data_off,
605 (intmax_t)bref->key,
606 bref->keybits,
607 class, bmap->class);
609 if (bmap->linear < (int32_t)data_off + (int32_t)bytes)
610 bmap->linear = (int32_t)data_off + (int32_t)bytes;
613 * Adjust the hammer2_bitmap_t bitmap[HAMMER2_BMAP_ELEMENTS].
614 * 64-bit entries, 2 bits per entry, to code 11.
616 * NOTE: The allocation can be smaller than HAMMER2_FREEMAP_BLOCK_SIZE.
618 while (bytes > 0) {
619 int bindex;
620 hammer2_bitmap_t bmask;
622 bindex = (int)data_off >> (HAMMER2_FREEMAP_BLOCK_RADIX +
623 HAMMER2_BMAP_INDEX_RADIX);
624 bmask = (hammer2_bitmap_t)3 <<
625 ((((int)data_off & HAMMER2_BMAP_INDEX_MASK) >>
626 HAMMER2_FREEMAP_BLOCK_RADIX) << 1);
629 * NOTE! The (avail) calculation is bitmap-granular. Multiple
630 * sub-granular records can wind up at the same bitmap
631 * position.
633 if ((bmap->bitmapq[bindex] & bmask) == 0) {
634 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE) {
635 bmap->avail -= HAMMER2_FREEMAP_BLOCK_SIZE;
636 } else {
637 bmap->avail -= bytes;
639 bmap->bitmapq[bindex] |= bmask;
641 data_off += HAMMER2_FREEMAP_BLOCK_SIZE;
642 if (bytes < HAMMER2_FREEMAP_BLOCK_SIZE)
643 bytes = 0;
644 else
645 bytes -= HAMMER2_FREEMAP_BLOCK_SIZE;
647 return error;
651 * Synchronize the in-memory bitmap with the live freemap. This is not a
652 * direct copy. Instead the bitmaps must be compared:
654 * In-memory Live-freemap
655 * 00 11 -> 10 (do nothing if live modified)
656 * 10 -> 00 (do nothing if live modified)
657 * 11 10 -> 11 handles race against live
658 * ** -> 11 nominally warn of corruption
661 static void
662 h2_bulkfree_sync(hammer2_bulkfree_info_t *cbinfo)
664 hammer2_off_t data_off;
665 hammer2_key_t key;
666 hammer2_key_t key_dummy;
667 hammer2_bmap_data_t *bmap;
668 hammer2_bmap_data_t *live;
669 hammer2_chain_t *live_parent;
670 hammer2_chain_t *live_chain;
671 int cache_index = -1;
672 int bmapindex;
674 kprintf("hammer2_bulkfree - range ");
676 if (cbinfo->sbase < cbinfo->hmp->voldata.allocator_beg)
677 kprintf("%016jx-",
678 (intmax_t)cbinfo->hmp->voldata.allocator_beg);
679 else
680 kprintf("%016jx-",
681 (intmax_t)cbinfo->sbase);
683 if (cbinfo->sstop > cbinfo->hmp->voldata.volu_size)
684 kprintf("%016jx\n",
685 (intmax_t)cbinfo->hmp->voldata.volu_size);
686 else
687 kprintf("%016jx\n",
688 (intmax_t)cbinfo->sstop);
690 data_off = cbinfo->sbase;
691 bmap = cbinfo->bmap;
693 live_parent = &cbinfo->hmp->fchain;
694 hammer2_chain_ref(live_parent);
695 hammer2_chain_lock(live_parent, HAMMER2_RESOLVE_ALWAYS);
696 live_chain = NULL;
699 * Iterate each hammer2_bmap_data_t line (128 bytes) managing
700 * 4MB of storage.
702 while (data_off < cbinfo->sstop) {
704 * The freemap is not used below allocator_beg or beyond
705 * volu_size.
707 int nofree;
709 if (data_off < cbinfo->hmp->voldata.allocator_beg)
710 goto next;
711 if (data_off >= cbinfo->hmp->voldata.volu_size)
712 goto next;
715 * Locate the freemap leaf on the live filesystem
717 key = (data_off & ~HAMMER2_FREEMAP_LEVEL1_MASK);
718 nofree = 0;
720 if (live_chain == NULL || live_chain->bref.key != key) {
721 if (live_chain) {
722 hammer2_chain_unlock(live_chain);
723 hammer2_chain_drop(live_chain);
725 live_chain = hammer2_chain_lookup(
726 &live_parent,
727 &key_dummy,
728 key,
729 key + HAMMER2_FREEMAP_LEVEL1_MASK,
730 &cache_index,
731 HAMMER2_LOOKUP_ALWAYS);
733 #if 0
735 * If recent allocations were made we avoid races by
736 * not staging or freeing any blocks. We can still
737 * remark blocks as fully allocated.
739 if (live_chain) {
740 if (hammer2_debug & 1) {
741 kprintf("live_chain %016jx\n",
742 (intmax_t)key);
744 if (live_chain->bref.mirror_tid >
745 cbinfo->saved_mirror_tid) {
746 kprintf("hammer2_bulkfree: "
747 "avoid %016jx\n",
748 data_off);
749 nofree = 1;
750 } else {
751 nofree = 0;
754 #endif
756 if (live_chain == NULL) {
758 * XXX if we implement a full recovery mode we need
759 * to create/recreate missing freemap chains if our
760 * bmap has any allocated blocks.
762 if (bmap->class &&
763 bmap->avail != HAMMER2_FREEMAP_LEVEL0_SIZE) {
764 kprintf("hammer2_bulkfree: cannot locate "
765 "live leaf for allocated data "
766 "near %016jx\n",
767 (intmax_t)data_off);
769 goto next;
771 if (live_chain->error) {
772 kprintf("hammer2_bulkfree: error %s looking up "
773 "live leaf for allocated data near %016jx\n",
774 hammer2_error_str(live_chain->error),
775 (intmax_t)data_off);
776 hammer2_chain_unlock(live_chain);
777 hammer2_chain_drop(live_chain);
778 live_chain = NULL;
779 goto next;
782 bmapindex = (data_off & HAMMER2_FREEMAP_LEVEL1_MASK) >>
783 HAMMER2_FREEMAP_LEVEL0_RADIX;
784 live = &live_chain->data->bmdata[bmapindex];
787 * TODO - we could shortcut this by testing that both
788 * live->class and bmap->class are 0, and both avails are
789 * set to HAMMER2_FREEMAP_LEVEL0_SIZE (4MB).
791 if (bcmp(live->bitmapq, bmap->bitmapq,
792 sizeof(bmap->bitmapq)) == 0) {
793 goto next;
795 if (hammer2_debug & 1) {
796 kprintf("live %016jx %04d.%04x (avail=%d)\n",
797 data_off, bmapindex, live->class, live->avail);
800 hammer2_chain_modify(live_chain, cbinfo->mtid, 0, 0);
801 live = &live_chain->data->bmdata[bmapindex];
803 h2_bulkfree_sync_adjust(cbinfo, data_off, live, bmap, nofree);
804 next:
805 data_off += HAMMER2_FREEMAP_LEVEL0_SIZE;
806 ++bmap;
808 if (live_chain) {
809 hammer2_chain_unlock(live_chain);
810 hammer2_chain_drop(live_chain);
812 if (live_parent) {
813 hammer2_chain_unlock(live_parent);
814 hammer2_chain_drop(live_parent);
819 * When bulkfree is finally able to free a block it must make sure that
820 * the INVALOK bit in any cached DIO is cleared prior to the block being
821 * reused.
823 static
824 void
825 fixup_dio(hammer2_dev_t *hmp, hammer2_off_t data_off, int bindex, int scount)
827 data_off += (scount >> 1) * HAMMER2_FREEMAP_BLOCK_SIZE;
828 data_off += bindex *
829 (HAMMER2_FREEMAP_BLOCK_SIZE * HAMMER2_BMAP_BLOCKS_PER_ELEMENT);
830 hammer2_io_resetinval(hmp, data_off);
834 * Merge the bulkfree bitmap against the existing bitmap.
836 * If nofree is non-zero the merge will only mark free blocks as allocated
837 * and will refuse to free any blocks.
839 static
840 void
841 h2_bulkfree_sync_adjust(hammer2_bulkfree_info_t *cbinfo,
842 hammer2_off_t data_off, hammer2_bmap_data_t *live,
843 hammer2_bmap_data_t *bmap, int nofree)
845 int bindex;
846 int scount;
847 hammer2_bitmap_t lmask;
848 hammer2_bitmap_t mmask;
850 for (bindex = 0; bindex < HAMMER2_BMAP_ELEMENTS; ++bindex) {
851 lmask = live->bitmapq[bindex]; /* live */
852 mmask = bmap->bitmapq[bindex]; /* snapshotted bulkfree */
853 if (lmask == mmask)
854 continue;
856 for (scount = 0;
857 scount < HAMMER2_BMAP_BITS_PER_ELEMENT;
858 scount += 2) {
859 if ((mmask & 3) == 0) {
861 * in-memory 00 live 11 -> 10
862 * live 10 -> 00
864 * Storage might be marked allocated or
865 * staged and must be remarked staged or
866 * free.
868 switch (lmask & 3) {
869 case 0: /* 00 */
870 break;
871 case 1: /* 01 */
872 kprintf("hammer2_bulkfree: cannot "
873 "transition m=00/l=01\n");
874 break;
875 case 2: /* 10 -> 00 */
876 if (nofree)
877 break;
878 live->bitmapq[bindex] &=
879 ~((hammer2_bitmap_t)2 << scount);
880 live->avail +=
881 HAMMER2_FREEMAP_BLOCK_SIZE;
882 if (live->avail >
883 HAMMER2_FREEMAP_LEVEL0_SIZE) {
884 live->avail =
885 HAMMER2_FREEMAP_LEVEL0_SIZE;
887 cbinfo->adj_free +=
888 HAMMER2_FREEMAP_BLOCK_SIZE;
889 ++cbinfo->count_10_00;
890 break;
891 case 3: /* 11 -> 10 */
892 live->bitmapq[bindex] &=
893 ~((hammer2_bitmap_t)1 << scount);
894 ++cbinfo->count_11_10;
895 fixup_dio(cbinfo->hmp, data_off,
896 bindex, scount);
897 break;
899 } else if ((mmask & 3) == 3) {
901 * in-memory 11 live 10 -> 11
902 * live ** -> 11
904 * Storage might be incorrectly marked free
905 * or staged and must be remarked fully
906 * allocated.
908 switch (lmask & 3) {
909 case 0: /* 00 */
910 ++cbinfo->count_00_11;
911 cbinfo->adj_free -=
912 HAMMER2_FREEMAP_BLOCK_SIZE;
913 live->avail -=
914 HAMMER2_FREEMAP_BLOCK_SIZE;
915 if ((int32_t)live->avail < 0)
916 live->avail = 0;
917 break;
918 case 1: /* 01 */
919 ++cbinfo->count_01_11;
920 break;
921 case 2: /* 10 -> 11 */
922 ++cbinfo->count_10_11;
923 break;
924 case 3: /* 11 */
925 break;
927 live->bitmapq[bindex] |=
928 ((hammer2_bitmap_t)3 << scount);
930 mmask >>= 2;
931 lmask >>= 2;
936 * Determine if the live bitmap is completely free and reset its
937 * fields if so. Otherwise check to see if we can reduce the linear
938 * offset.
940 for (bindex = HAMMER2_BMAP_ELEMENTS - 1; bindex >= 0; --bindex) {
941 if (live->bitmapq[bindex] != 0)
942 break;
944 if (nofree) {
945 /* do nothing */
946 } else if (bindex < 0) {
947 live->avail = HAMMER2_FREEMAP_LEVEL0_SIZE;
948 live->class = 0;
949 live->linear = 0;
950 ++cbinfo->count_l0cleans;
951 } else if (bindex < 7) {
952 ++bindex;
953 if (live->linear > bindex * HAMMER2_FREEMAP_BLOCK_SIZE) {
954 live->linear = bindex * HAMMER2_FREEMAP_BLOCK_SIZE;
955 ++cbinfo->count_linadjusts;
959 * XXX this fine-grained measure still has some issues.
961 if (live->linear < bindex * HAMMER2_FREEMAP_BLOCK_SIZE) {
962 live->linear = bindex * HAMMER2_FREEMAP_BLOCK_SIZE;
963 ++cbinfo->count_linadjusts;
965 } else {
966 live->linear = HAMMER2_SEGSIZE;
969 #if 0
970 if (bmap->class) {
971 kprintf("%016jx %04d.%04x (avail=%7d) "
972 "%08x %08x %08x %08x %08x %08x %08x %08x\n",
973 (intmax_t)data_off,
974 (int)((data_off &
975 HAMMER2_FREEMAP_LEVEL1_MASK) >>
976 HAMMER2_FREEMAP_LEVEL0_RADIX),
977 bmap->class,
978 bmap->avail,
979 bmap->bitmap[0], bmap->bitmap[1],
980 bmap->bitmap[2], bmap->bitmap[3],
981 bmap->bitmap[4], bmap->bitmap[5],
982 bmap->bitmap[6], bmap->bitmap[7]);
984 #endif
988 * BULKFREE DEDUP HEURISTIC
990 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
991 * All fields must be loaded into locals and validated.
993 static
995 h2_bulkfree_test(hammer2_bulkfree_info_t *cbinfo, hammer2_blockref_t *bref,
996 int pri)
998 hammer2_dedup_t *dedup;
999 int best;
1000 int n;
1001 int i;
1003 n = hammer2_icrc32(&bref->data_off, sizeof(bref->data_off));
1004 dedup = cbinfo->dedup + (n & (HAMMER2_DEDUP_HEUR_MASK & ~7));
1006 for (i = best = 0; i < 8; ++i) {
1007 if (dedup[i].data_off == bref->data_off) {
1008 if (dedup[i].ticks < pri)
1009 dedup[i].ticks = pri;
1010 if (pri == 1)
1011 cbinfo->count_dedup_factor += dedup[i].ticks;
1012 return 1;
1014 if (dedup[i].ticks < dedup[best].ticks)
1015 best = i;
1017 dedup[best].data_off = bref->data_off;
1018 dedup[best].ticks = pri;
1020 return 0;