MAINTAINERS: Add more files to sam460ex
[qemu.git] / util / hbitmap.c
blob8d402c59d918deb09be365869c8527e3d76f9327
1 /*
2 * Hierarchical Bitmap Data Type
4 * Copyright Red Hat, Inc., 2012
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or
9 * later. See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/hbitmap.h"
14 #include "qemu/host-utils.h"
15 #include "trace.h"
16 #include "crypto/hash.h"
18 /* HBitmaps provides an array of bits. The bits are stored as usual in an
19 * array of unsigned longs, but HBitmap is also optimized to provide fast
20 * iteration over set bits; going from one bit to the next is O(logB n)
21 * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
22 * that the number of levels is in fact fixed.
24 * In order to do this, it stacks multiple bitmaps with progressively coarser
25 * granularity; in all levels except the last, bit N is set iff the N-th
26 * unsigned long is nonzero in the immediately next level. When iteration
27 * completes on the last level it can examine the 2nd-last level to quickly
28 * skip entire words, and even do so recursively to skip blocks of 64 words or
29 * powers thereof (32 on 32-bit machines).
31 * Given an index in the bitmap, it can be split in group of bits like
32 * this (for the 64-bit case):
34 * bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word
35 * bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
36 * bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
38 * So it is easy to move up simply by shifting the index right by
39 * log2(BITS_PER_LONG) bits. To move down, you shift the index left
40 * similarly, and add the word index within the group. Iteration uses
41 * ffs (find first set bit) to find the next word to examine; this
42 * operation can be done in constant time in most current architectures.
44 * Setting or clearing a range of m bits on all levels, the work to perform
45 * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
47 * When iterating on a bitmap, each bit (on any level) is only visited
48 * once. Hence, The total cost of visiting a bitmap with m bits in it is
49 * the number of bits that are set in all bitmaps. Unless the bitmap is
50 * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
51 * cost of advancing from one bit to the next is usually constant (worst case
52 * O(logB n) as in the non-amortized complexity).
55 struct HBitmap {
56 /* Number of total bits in the bottom level. */
57 uint64_t size;
59 /* Number of set bits in the bottom level. */
60 uint64_t count;
62 /* A scaling factor. Given a granularity of G, each bit in the bitmap will
63 * will actually represent a group of 2^G elements. Each operation on a
64 * range of bits first rounds the bits to determine which group they land
65 * in, and then affect the entire page; iteration will only visit the first
66 * bit of each group. Here is an example of operations in a size-16,
67 * granularity-1 HBitmap:
69 * initial state 00000000
70 * set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8)
71 * reset(start=1, count=3) 00111000 (iter: 4, 6, 8)
72 * set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10)
73 * reset(start=5, count=5) 00000000
75 * From an implementation point of view, when setting or resetting bits,
76 * the bitmap will scale bit numbers right by this amount of bits. When
77 * iterating, the bitmap will scale bit numbers left by this amount of
78 * bits.
80 int granularity;
82 /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
83 HBitmap *meta;
85 /* A number of progressively less coarse bitmaps (i.e. level 0 is the
86 * coarsest). Each bit in level N represents a word in level N+1 that
87 * has a set bit, except the last level where each bit represents the
88 * actual bitmap.
90 * Note that all bitmaps have the same number of levels. Even a 1-bit
91 * bitmap will still allocate HBITMAP_LEVELS arrays.
93 unsigned long *levels[HBITMAP_LEVELS];
95 /* The length of each levels[] array. */
96 uint64_t sizes[HBITMAP_LEVELS];
99 /* Advance hbi to the next nonzero word and return it. hbi->pos
100 * is updated. Returns zero if we reach the end of the bitmap.
102 unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
104 size_t pos = hbi->pos;
105 const HBitmap *hb = hbi->hb;
106 unsigned i = HBITMAP_LEVELS - 1;
108 unsigned long cur;
109 do {
110 i--;
111 pos >>= BITS_PER_LEVEL;
112 cur = hbi->cur[i] & hb->levels[i][pos];
113 } while (cur == 0);
115 /* Check for end of iteration. We always use fewer than BITS_PER_LONG
116 * bits in the level 0 bitmap; thus we can repurpose the most significant
117 * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures
118 * that the above loop ends even without an explicit check on i.
121 if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) {
122 return 0;
124 for (; i < HBITMAP_LEVELS - 1; i++) {
125 /* Shift back pos to the left, matching the right shifts above.
126 * The index of this word's least significant set bit provides
127 * the low-order bits.
129 assert(cur);
130 pos = (pos << BITS_PER_LEVEL) + ctzl(cur);
131 hbi->cur[i] = cur & (cur - 1);
133 /* Set up next level for iteration. */
134 cur = hb->levels[i + 1][pos];
137 hbi->pos = pos;
138 trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur);
140 assert(cur);
141 return cur;
144 int64_t hbitmap_iter_next(HBitmapIter *hbi, bool advance)
146 unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
147 hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
148 int64_t item;
150 if (cur == 0) {
151 cur = hbitmap_iter_skip_words(hbi);
152 if (cur == 0) {
153 return -1;
157 if (advance) {
158 /* The next call will resume work from the next bit. */
159 hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
160 } else {
161 hbi->cur[HBITMAP_LEVELS - 1] = cur;
163 item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
165 return item << hbi->granularity;
168 void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
170 unsigned i, bit;
171 uint64_t pos;
173 hbi->hb = hb;
174 pos = first >> hb->granularity;
175 assert(pos < hb->size);
176 hbi->pos = pos >> BITS_PER_LEVEL;
177 hbi->granularity = hb->granularity;
179 for (i = HBITMAP_LEVELS; i-- > 0; ) {
180 bit = pos & (BITS_PER_LONG - 1);
181 pos >>= BITS_PER_LEVEL;
183 /* Drop bits representing items before first. */
184 hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1);
186 /* We have already added level i+1, so the lowest set bit has
187 * been processed. Clear it.
189 if (i != HBITMAP_LEVELS - 1) {
190 hbi->cur[i] &= ~(1UL << bit);
195 int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
197 size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
198 unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
199 uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
200 unsigned long cur = last_lev[pos];
201 unsigned start_bit_offset =
202 (start >> hb->granularity) & (BITS_PER_LONG - 1);
203 int64_t res;
205 cur |= (1UL << start_bit_offset) - 1;
206 assert((start >> hb->granularity) < hb->size);
208 if (cur == (unsigned long)-1) {
209 do {
210 pos++;
211 } while (pos < sz && last_lev[pos] == (unsigned long)-1);
213 if (pos >= sz) {
214 return -1;
217 cur = last_lev[pos];
220 res = (pos << BITS_PER_LEVEL) + ctol(cur);
221 if (res >= hb->size) {
222 return -1;
225 res = res << hb->granularity;
226 if (res < start) {
227 assert(((start - res) >> hb->granularity) == 0);
228 return start;
231 return res;
234 bool hbitmap_empty(const HBitmap *hb)
236 return hb->count == 0;
239 int hbitmap_granularity(const HBitmap *hb)
241 return hb->granularity;
244 uint64_t hbitmap_count(const HBitmap *hb)
246 return hb->count << hb->granularity;
249 /* Count the number of set bits between start and end, not accounting for
250 * the granularity. Also an example of how to use hbitmap_iter_next_word.
252 static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
254 HBitmapIter hbi;
255 uint64_t count = 0;
256 uint64_t end = last + 1;
257 unsigned long cur;
258 size_t pos;
260 hbitmap_iter_init(&hbi, hb, start << hb->granularity);
261 for (;;) {
262 pos = hbitmap_iter_next_word(&hbi, &cur);
263 if (pos >= (end >> BITS_PER_LEVEL)) {
264 break;
266 count += ctpopl(cur);
269 if (pos == (end >> BITS_PER_LEVEL)) {
270 /* Drop bits representing the END-th and subsequent items. */
271 int bit = end & (BITS_PER_LONG - 1);
272 cur &= (1UL << bit) - 1;
273 count += ctpopl(cur);
276 return count;
279 /* Setting starts at the last layer and propagates up if an element
280 * changes.
282 static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
284 unsigned long mask;
285 unsigned long old;
287 assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
288 assert(start <= last);
290 mask = 2UL << (last & (BITS_PER_LONG - 1));
291 mask -= 1UL << (start & (BITS_PER_LONG - 1));
292 old = *elem;
293 *elem |= mask;
294 return old != *elem;
297 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
298 * Returns true if at least one bit is changed. */
299 static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
300 uint64_t last)
302 size_t pos = start >> BITS_PER_LEVEL;
303 size_t lastpos = last >> BITS_PER_LEVEL;
304 bool changed = false;
305 size_t i;
307 i = pos;
308 if (i < lastpos) {
309 uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
310 changed |= hb_set_elem(&hb->levels[level][i], start, next - 1);
311 for (;;) {
312 start = next;
313 next += BITS_PER_LONG;
314 if (++i == lastpos) {
315 break;
317 changed |= (hb->levels[level][i] == 0);
318 hb->levels[level][i] = ~0UL;
321 changed |= hb_set_elem(&hb->levels[level][i], start, last);
323 /* If there was any change in this layer, we may have to update
324 * the one above.
326 if (level > 0 && changed) {
327 hb_set_between(hb, level - 1, pos, lastpos);
329 return changed;
332 void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
334 /* Compute range in the last layer. */
335 uint64_t first, n;
336 uint64_t last = start + count - 1;
338 trace_hbitmap_set(hb, start, count,
339 start >> hb->granularity, last >> hb->granularity);
341 first = start >> hb->granularity;
342 last >>= hb->granularity;
343 assert(last < hb->size);
344 n = last - first + 1;
346 hb->count += n - hb_count_between(hb, first, last);
347 if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
348 hb->meta) {
349 hbitmap_set(hb->meta, start, count);
353 /* Resetting works the other way round: propagate up if the new
354 * value is zero.
356 static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last)
358 unsigned long mask;
359 bool blanked;
361 assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
362 assert(start <= last);
364 mask = 2UL << (last & (BITS_PER_LONG - 1));
365 mask -= 1UL << (start & (BITS_PER_LONG - 1));
366 blanked = *elem != 0 && ((*elem & ~mask) == 0);
367 *elem &= ~mask;
368 return blanked;
371 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
372 * Returns true if at least one bit is changed. */
373 static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
374 uint64_t last)
376 size_t pos = start >> BITS_PER_LEVEL;
377 size_t lastpos = last >> BITS_PER_LEVEL;
378 bool changed = false;
379 size_t i;
381 i = pos;
382 if (i < lastpos) {
383 uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
385 /* Here we need a more complex test than when setting bits. Even if
386 * something was changed, we must not blank bits in the upper level
387 * unless the lower-level word became entirely zero. So, remove pos
388 * from the upper-level range if bits remain set.
390 if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) {
391 changed = true;
392 } else {
393 pos++;
396 for (;;) {
397 start = next;
398 next += BITS_PER_LONG;
399 if (++i == lastpos) {
400 break;
402 changed |= (hb->levels[level][i] != 0);
403 hb->levels[level][i] = 0UL;
407 /* Same as above, this time for lastpos. */
408 if (hb_reset_elem(&hb->levels[level][i], start, last)) {
409 changed = true;
410 } else {
411 lastpos--;
414 if (level > 0 && changed) {
415 hb_reset_between(hb, level - 1, pos, lastpos);
418 return changed;
422 void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
424 /* Compute range in the last layer. */
425 uint64_t first;
426 uint64_t last = start + count - 1;
428 trace_hbitmap_reset(hb, start, count,
429 start >> hb->granularity, last >> hb->granularity);
431 first = start >> hb->granularity;
432 last >>= hb->granularity;
433 assert(last < hb->size);
435 hb->count -= hb_count_between(hb, first, last);
436 if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
437 hb->meta) {
438 hbitmap_set(hb->meta, start, count);
442 void hbitmap_reset_all(HBitmap *hb)
444 unsigned int i;
446 /* Same as hbitmap_alloc() except for memset() instead of malloc() */
447 for (i = HBITMAP_LEVELS; --i >= 1; ) {
448 memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long));
451 hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1);
452 hb->count = 0;
455 bool hbitmap_is_serializable(const HBitmap *hb)
457 /* Every serialized chunk must be aligned to 64 bits so that endianness
458 * requirements can be fulfilled on both 64 bit and 32 bit hosts.
459 * We have hbitmap_serialization_align() which converts this
460 * alignment requirement from bitmap bits to items covered (e.g. sectors).
461 * That value is:
462 * 64 << hb->granularity
463 * Since this value must not exceed UINT64_MAX, hb->granularity must be
464 * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
466 * In order for hbitmap_serialization_align() to always return a
467 * meaningful value, bitmaps that are to be serialized must have a
468 * granularity of less than 58. */
470 return hb->granularity < 58;
473 bool hbitmap_get(const HBitmap *hb, uint64_t item)
475 /* Compute position and bit in the last layer. */
476 uint64_t pos = item >> hb->granularity;
477 unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1));
478 assert(pos < hb->size);
480 return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;
483 uint64_t hbitmap_serialization_align(const HBitmap *hb)
485 assert(hbitmap_is_serializable(hb));
487 /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
488 * hosts. */
489 return UINT64_C(64) << hb->granularity;
492 /* Start should be aligned to serialization granularity, chunk size should be
493 * aligned to serialization granularity too, except for last chunk.
495 static void serialization_chunk(const HBitmap *hb,
496 uint64_t start, uint64_t count,
497 unsigned long **first_el, uint64_t *el_count)
499 uint64_t last = start + count - 1;
500 uint64_t gran = hbitmap_serialization_align(hb);
502 assert((start & (gran - 1)) == 0);
503 assert((last >> hb->granularity) < hb->size);
504 if ((last >> hb->granularity) != hb->size - 1) {
505 assert((count & (gran - 1)) == 0);
508 start = (start >> hb->granularity) >> BITS_PER_LEVEL;
509 last = (last >> hb->granularity) >> BITS_PER_LEVEL;
511 *first_el = &hb->levels[HBITMAP_LEVELS - 1][start];
512 *el_count = last - start + 1;
515 uint64_t hbitmap_serialization_size(const HBitmap *hb,
516 uint64_t start, uint64_t count)
518 uint64_t el_count;
519 unsigned long *cur;
521 if (!count) {
522 return 0;
524 serialization_chunk(hb, start, count, &cur, &el_count);
526 return el_count * sizeof(unsigned long);
529 void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf,
530 uint64_t start, uint64_t count)
532 uint64_t el_count;
533 unsigned long *cur, *end;
535 if (!count) {
536 return;
538 serialization_chunk(hb, start, count, &cur, &el_count);
539 end = cur + el_count;
541 while (cur != end) {
542 unsigned long el =
543 (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur));
545 memcpy(buf, &el, sizeof(el));
546 buf += sizeof(el);
547 cur++;
551 void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf,
552 uint64_t start, uint64_t count,
553 bool finish)
555 uint64_t el_count;
556 unsigned long *cur, *end;
558 if (!count) {
559 return;
561 serialization_chunk(hb, start, count, &cur, &el_count);
562 end = cur + el_count;
564 while (cur != end) {
565 memcpy(cur, buf, sizeof(*cur));
567 if (BITS_PER_LONG == 32) {
568 le32_to_cpus((uint32_t *)cur);
569 } else {
570 le64_to_cpus((uint64_t *)cur);
573 buf += sizeof(unsigned long);
574 cur++;
576 if (finish) {
577 hbitmap_deserialize_finish(hb);
581 void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count,
582 bool finish)
584 uint64_t el_count;
585 unsigned long *first;
587 if (!count) {
588 return;
590 serialization_chunk(hb, start, count, &first, &el_count);
592 memset(first, 0, el_count * sizeof(unsigned long));
593 if (finish) {
594 hbitmap_deserialize_finish(hb);
598 void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count,
599 bool finish)
601 uint64_t el_count;
602 unsigned long *first;
604 if (!count) {
605 return;
607 serialization_chunk(hb, start, count, &first, &el_count);
609 memset(first, 0xff, el_count * sizeof(unsigned long));
610 if (finish) {
611 hbitmap_deserialize_finish(hb);
615 void hbitmap_deserialize_finish(HBitmap *bitmap)
617 int64_t i, size, prev_size;
618 int lev;
620 /* restore levels starting from penultimate to zero level, assuming
621 * that the last level is ok */
622 size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
623 for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) {
624 prev_size = size;
625 size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
626 memset(bitmap->levels[lev], 0, size * sizeof(unsigned long));
628 for (i = 0; i < prev_size; ++i) {
629 if (bitmap->levels[lev + 1][i]) {
630 bitmap->levels[lev][i >> BITS_PER_LEVEL] |=
631 1UL << (i & (BITS_PER_LONG - 1));
636 bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
637 bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1);
640 void hbitmap_free(HBitmap *hb)
642 unsigned i;
643 assert(!hb->meta);
644 for (i = HBITMAP_LEVELS; i-- > 0; ) {
645 g_free(hb->levels[i]);
647 g_free(hb);
650 HBitmap *hbitmap_alloc(uint64_t size, int granularity)
652 HBitmap *hb = g_new0(struct HBitmap, 1);
653 unsigned i;
655 assert(granularity >= 0 && granularity < 64);
656 size = (size + (1ULL << granularity) - 1) >> granularity;
657 assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
659 hb->size = size;
660 hb->granularity = granularity;
661 for (i = HBITMAP_LEVELS; i-- > 0; ) {
662 size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
663 hb->sizes[i] = size;
664 hb->levels[i] = g_new0(unsigned long, size);
667 /* We necessarily have free bits in level 0 due to the definition
668 * of HBITMAP_LEVELS, so use one for a sentinel. This speeds up
669 * hbitmap_iter_skip_words.
671 assert(size == 1);
672 hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
673 return hb;
676 void hbitmap_truncate(HBitmap *hb, uint64_t size)
678 bool shrink;
679 unsigned i;
680 uint64_t num_elements = size;
681 uint64_t old;
683 /* Size comes in as logical elements, adjust for granularity. */
684 size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
685 assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
686 shrink = size < hb->size;
688 /* bit sizes are identical; nothing to do. */
689 if (size == hb->size) {
690 return;
693 /* If we're losing bits, let's clear those bits before we invalidate all of
694 * our invariants. This helps keep the bitcount consistent, and will prevent
695 * us from carrying around garbage bits beyond the end of the map.
697 if (shrink) {
698 /* Don't clear partial granularity groups;
699 * start at the first full one. */
700 uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity);
701 uint64_t fix_count = (hb->size << hb->granularity) - start;
703 assert(fix_count);
704 hbitmap_reset(hb, start, fix_count);
707 hb->size = size;
708 for (i = HBITMAP_LEVELS; i-- > 0; ) {
709 size = MAX(BITS_TO_LONGS(size), 1);
710 if (hb->sizes[i] == size) {
711 break;
713 old = hb->sizes[i];
714 hb->sizes[i] = size;
715 hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long));
716 if (!shrink) {
717 memset(&hb->levels[i][old], 0x00,
718 (size - old) * sizeof(*hb->levels[i]));
721 if (hb->meta) {
722 hbitmap_truncate(hb->meta, hb->size << hb->granularity);
726 bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
728 return (a->size == b->size) && (a->granularity == b->granularity);
732 * Given HBitmaps A and B, let A := A (BITOR) B.
733 * Bitmap B will not be modified.
735 * @return true if the merge was successful,
736 * false if it was not attempted.
738 bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)
740 int i;
741 uint64_t j;
743 if (!hbitmap_can_merge(a, b) || !hbitmap_can_merge(a, result)) {
744 return false;
746 assert(hbitmap_can_merge(b, result));
748 if (hbitmap_count(b) == 0) {
749 return true;
752 /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
753 * It may be possible to improve running times for sparsely populated maps
754 * by using hbitmap_iter_next, but this is suboptimal for dense maps.
756 for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
757 for (j = 0; j < a->sizes[i]; j++) {
758 result->levels[i][j] = a->levels[i][j] | b->levels[i][j];
762 /* Recompute the dirty count */
763 result->count = hb_count_between(result, 0, result->size - 1);
765 return true;
768 HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
770 assert(!(chunk_size & (chunk_size - 1)));
771 assert(!hb->meta);
772 hb->meta = hbitmap_alloc(hb->size << hb->granularity,
773 hb->granularity + ctz32(chunk_size));
774 return hb->meta;
777 void hbitmap_free_meta(HBitmap *hb)
779 assert(hb->meta);
780 hbitmap_free(hb->meta);
781 hb->meta = NULL;
784 char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)
786 size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);
787 char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];
788 char *hash = NULL;
789 qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);
791 return hash;