2 * Hierarchical Bitmap Data Type
4 * Copyright Red Hat, Inc., 2012
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or
9 * later. See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu/hbitmap.h"
14 #include "qemu/host-utils.h"
16 #include "crypto/hash.h"
18 /* HBitmaps provides an array of bits. The bits are stored as usual in an
19 * array of unsigned longs, but HBitmap is also optimized to provide fast
20 * iteration over set bits; going from one bit to the next is O(logB n)
21 * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
22 * that the number of levels is in fact fixed.
24 * In order to do this, it stacks multiple bitmaps with progressively coarser
25 * granularity; in all levels except the last, bit N is set iff the N-th
26 * unsigned long is nonzero in the immediately next level. When iteration
27 * completes on the last level it can examine the 2nd-last level to quickly
28 * skip entire words, and even do so recursively to skip blocks of 64 words or
29 * powers thereof (32 on 32-bit machines).
31 * Given an index in the bitmap, it can be split in group of bits like
32 * this (for the 64-bit case):
34 * bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word
35 * bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
36 * bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
38 * So it is easy to move up simply by shifting the index right by
39 * log2(BITS_PER_LONG) bits. To move down, you shift the index left
40 * similarly, and add the word index within the group. Iteration uses
41 * ffs (find first set bit) to find the next word to examine; this
42 * operation can be done in constant time in most current architectures.
44 * Setting or clearing a range of m bits on all levels, the work to perform
45 * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
47 * When iterating on a bitmap, each bit (on any level) is only visited
48 * once. Hence, The total cost of visiting a bitmap with m bits in it is
49 * the number of bits that are set in all bitmaps. Unless the bitmap is
50 * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
51 * cost of advancing from one bit to the next is usually constant (worst case
52 * O(logB n) as in the non-amortized complexity).
56 /* Number of total bits in the bottom level. */
59 /* Number of set bits in the bottom level. */
62 /* A scaling factor. Given a granularity of G, each bit in the bitmap will
63 * will actually represent a group of 2^G elements. Each operation on a
64 * range of bits first rounds the bits to determine which group they land
65 * in, and then affect the entire page; iteration will only visit the first
66 * bit of each group. Here is an example of operations in a size-16,
67 * granularity-1 HBitmap:
69 * initial state 00000000
70 * set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8)
71 * reset(start=1, count=3) 00111000 (iter: 4, 6, 8)
72 * set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10)
73 * reset(start=5, count=5) 00000000
75 * From an implementation point of view, when setting or resetting bits,
76 * the bitmap will scale bit numbers right by this amount of bits. When
77 * iterating, the bitmap will scale bit numbers left by this amount of
82 /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
85 /* A number of progressively less coarse bitmaps (i.e. level 0 is the
86 * coarsest). Each bit in level N represents a word in level N+1 that
87 * has a set bit, except the last level where each bit represents the
90 * Note that all bitmaps have the same number of levels. Even a 1-bit
91 * bitmap will still allocate HBITMAP_LEVELS arrays.
93 unsigned long *levels
[HBITMAP_LEVELS
];
95 /* The length of each levels[] array. */
96 uint64_t sizes
[HBITMAP_LEVELS
];
99 /* Advance hbi to the next nonzero word and return it. hbi->pos
100 * is updated. Returns zero if we reach the end of the bitmap.
102 unsigned long hbitmap_iter_skip_words(HBitmapIter
*hbi
)
104 size_t pos
= hbi
->pos
;
105 const HBitmap
*hb
= hbi
->hb
;
106 unsigned i
= HBITMAP_LEVELS
- 1;
111 pos
>>= BITS_PER_LEVEL
;
112 cur
= hbi
->cur
[i
] & hb
->levels
[i
][pos
];
115 /* Check for end of iteration. We always use fewer than BITS_PER_LONG
116 * bits in the level 0 bitmap; thus we can repurpose the most significant
117 * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures
118 * that the above loop ends even without an explicit check on i.
121 if (i
== 0 && cur
== (1UL << (BITS_PER_LONG
- 1))) {
124 for (; i
< HBITMAP_LEVELS
- 1; i
++) {
125 /* Shift back pos to the left, matching the right shifts above.
126 * The index of this word's least significant set bit provides
127 * the low-order bits.
130 pos
= (pos
<< BITS_PER_LEVEL
) + ctzl(cur
);
131 hbi
->cur
[i
] = cur
& (cur
- 1);
133 /* Set up next level for iteration. */
134 cur
= hb
->levels
[i
+ 1][pos
];
138 trace_hbitmap_iter_skip_words(hbi
->hb
, hbi
, pos
, cur
);
144 int64_t hbitmap_iter_next(HBitmapIter
*hbi
)
146 unsigned long cur
= hbi
->cur
[HBITMAP_LEVELS
- 1] &
147 hbi
->hb
->levels
[HBITMAP_LEVELS
- 1][hbi
->pos
];
151 cur
= hbitmap_iter_skip_words(hbi
);
157 /* The next call will resume work from the next bit. */
158 hbi
->cur
[HBITMAP_LEVELS
- 1] = cur
& (cur
- 1);
159 item
= ((uint64_t)hbi
->pos
<< BITS_PER_LEVEL
) + ctzl(cur
);
161 return item
<< hbi
->granularity
;
164 void hbitmap_iter_init(HBitmapIter
*hbi
, const HBitmap
*hb
, uint64_t first
)
170 pos
= first
>> hb
->granularity
;
171 assert(pos
< hb
->size
);
172 hbi
->pos
= pos
>> BITS_PER_LEVEL
;
173 hbi
->granularity
= hb
->granularity
;
175 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
176 bit
= pos
& (BITS_PER_LONG
- 1);
177 pos
>>= BITS_PER_LEVEL
;
179 /* Drop bits representing items before first. */
180 hbi
->cur
[i
] = hb
->levels
[i
][pos
] & ~((1UL << bit
) - 1);
182 /* We have already added level i+1, so the lowest set bit has
183 * been processed. Clear it.
185 if (i
!= HBITMAP_LEVELS
- 1) {
186 hbi
->cur
[i
] &= ~(1UL << bit
);
191 int64_t hbitmap_next_zero(const HBitmap
*hb
, uint64_t start
)
193 size_t pos
= (start
>> hb
->granularity
) >> BITS_PER_LEVEL
;
194 unsigned long *last_lev
= hb
->levels
[HBITMAP_LEVELS
- 1];
195 uint64_t sz
= hb
->sizes
[HBITMAP_LEVELS
- 1];
196 unsigned long cur
= last_lev
[pos
];
197 unsigned start_bit_offset
=
198 (start
>> hb
->granularity
) & (BITS_PER_LONG
- 1);
201 cur
|= (1UL << start_bit_offset
) - 1;
202 assert((start
>> hb
->granularity
) < hb
->size
);
204 if (cur
== (unsigned long)-1) {
207 } while (pos
< sz
&& last_lev
[pos
] == (unsigned long)-1);
216 res
= (pos
<< BITS_PER_LEVEL
) + ctol(cur
);
217 if (res
>= hb
->size
) {
221 res
= res
<< hb
->granularity
;
223 assert(((start
- res
) >> hb
->granularity
) == 0);
230 bool hbitmap_empty(const HBitmap
*hb
)
232 return hb
->count
== 0;
235 int hbitmap_granularity(const HBitmap
*hb
)
237 return hb
->granularity
;
240 uint64_t hbitmap_count(const HBitmap
*hb
)
242 return hb
->count
<< hb
->granularity
;
245 /* Count the number of set bits between start and end, not accounting for
246 * the granularity. Also an example of how to use hbitmap_iter_next_word.
248 static uint64_t hb_count_between(HBitmap
*hb
, uint64_t start
, uint64_t last
)
252 uint64_t end
= last
+ 1;
256 hbitmap_iter_init(&hbi
, hb
, start
<< hb
->granularity
);
258 pos
= hbitmap_iter_next_word(&hbi
, &cur
);
259 if (pos
>= (end
>> BITS_PER_LEVEL
)) {
262 count
+= ctpopl(cur
);
265 if (pos
== (end
>> BITS_PER_LEVEL
)) {
266 /* Drop bits representing the END-th and subsequent items. */
267 int bit
= end
& (BITS_PER_LONG
- 1);
268 cur
&= (1UL << bit
) - 1;
269 count
+= ctpopl(cur
);
275 /* Setting starts at the last layer and propagates up if an element
278 static inline bool hb_set_elem(unsigned long *elem
, uint64_t start
, uint64_t last
)
283 assert((last
>> BITS_PER_LEVEL
) == (start
>> BITS_PER_LEVEL
));
284 assert(start
<= last
);
286 mask
= 2UL << (last
& (BITS_PER_LONG
- 1));
287 mask
-= 1UL << (start
& (BITS_PER_LONG
- 1));
293 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
294 * Returns true if at least one bit is changed. */
295 static bool hb_set_between(HBitmap
*hb
, int level
, uint64_t start
,
298 size_t pos
= start
>> BITS_PER_LEVEL
;
299 size_t lastpos
= last
>> BITS_PER_LEVEL
;
300 bool changed
= false;
305 uint64_t next
= (start
| (BITS_PER_LONG
- 1)) + 1;
306 changed
|= hb_set_elem(&hb
->levels
[level
][i
], start
, next
- 1);
309 next
+= BITS_PER_LONG
;
310 if (++i
== lastpos
) {
313 changed
|= (hb
->levels
[level
][i
] == 0);
314 hb
->levels
[level
][i
] = ~0UL;
317 changed
|= hb_set_elem(&hb
->levels
[level
][i
], start
, last
);
319 /* If there was any change in this layer, we may have to update
322 if (level
> 0 && changed
) {
323 hb_set_between(hb
, level
- 1, pos
, lastpos
);
328 void hbitmap_set(HBitmap
*hb
, uint64_t start
, uint64_t count
)
330 /* Compute range in the last layer. */
332 uint64_t last
= start
+ count
- 1;
334 trace_hbitmap_set(hb
, start
, count
,
335 start
>> hb
->granularity
, last
>> hb
->granularity
);
337 first
= start
>> hb
->granularity
;
338 last
>>= hb
->granularity
;
339 assert(last
< hb
->size
);
340 n
= last
- first
+ 1;
342 hb
->count
+= n
- hb_count_between(hb
, first
, last
);
343 if (hb_set_between(hb
, HBITMAP_LEVELS
- 1, first
, last
) &&
345 hbitmap_set(hb
->meta
, start
, count
);
349 /* Resetting works the other way round: propagate up if the new
352 static inline bool hb_reset_elem(unsigned long *elem
, uint64_t start
, uint64_t last
)
357 assert((last
>> BITS_PER_LEVEL
) == (start
>> BITS_PER_LEVEL
));
358 assert(start
<= last
);
360 mask
= 2UL << (last
& (BITS_PER_LONG
- 1));
361 mask
-= 1UL << (start
& (BITS_PER_LONG
- 1));
362 blanked
= *elem
!= 0 && ((*elem
& ~mask
) == 0);
367 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
368 * Returns true if at least one bit is changed. */
369 static bool hb_reset_between(HBitmap
*hb
, int level
, uint64_t start
,
372 size_t pos
= start
>> BITS_PER_LEVEL
;
373 size_t lastpos
= last
>> BITS_PER_LEVEL
;
374 bool changed
= false;
379 uint64_t next
= (start
| (BITS_PER_LONG
- 1)) + 1;
381 /* Here we need a more complex test than when setting bits. Even if
382 * something was changed, we must not blank bits in the upper level
383 * unless the lower-level word became entirely zero. So, remove pos
384 * from the upper-level range if bits remain set.
386 if (hb_reset_elem(&hb
->levels
[level
][i
], start
, next
- 1)) {
394 next
+= BITS_PER_LONG
;
395 if (++i
== lastpos
) {
398 changed
|= (hb
->levels
[level
][i
] != 0);
399 hb
->levels
[level
][i
] = 0UL;
403 /* Same as above, this time for lastpos. */
404 if (hb_reset_elem(&hb
->levels
[level
][i
], start
, last
)) {
410 if (level
> 0 && changed
) {
411 hb_reset_between(hb
, level
- 1, pos
, lastpos
);
418 void hbitmap_reset(HBitmap
*hb
, uint64_t start
, uint64_t count
)
420 /* Compute range in the last layer. */
422 uint64_t last
= start
+ count
- 1;
424 trace_hbitmap_reset(hb
, start
, count
,
425 start
>> hb
->granularity
, last
>> hb
->granularity
);
427 first
= start
>> hb
->granularity
;
428 last
>>= hb
->granularity
;
429 assert(last
< hb
->size
);
431 hb
->count
-= hb_count_between(hb
, first
, last
);
432 if (hb_reset_between(hb
, HBITMAP_LEVELS
- 1, first
, last
) &&
434 hbitmap_set(hb
->meta
, start
, count
);
438 void hbitmap_reset_all(HBitmap
*hb
)
442 /* Same as hbitmap_alloc() except for memset() instead of malloc() */
443 for (i
= HBITMAP_LEVELS
; --i
>= 1; ) {
444 memset(hb
->levels
[i
], 0, hb
->sizes
[i
] * sizeof(unsigned long));
447 hb
->levels
[0][0] = 1UL << (BITS_PER_LONG
- 1);
451 bool hbitmap_is_serializable(const HBitmap
*hb
)
453 /* Every serialized chunk must be aligned to 64 bits so that endianness
454 * requirements can be fulfilled on both 64 bit and 32 bit hosts.
455 * We have hbitmap_serialization_align() which converts this
456 * alignment requirement from bitmap bits to items covered (e.g. sectors).
458 * 64 << hb->granularity
459 * Since this value must not exceed UINT64_MAX, hb->granularity must be
460 * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
462 * In order for hbitmap_serialization_align() to always return a
463 * meaningful value, bitmaps that are to be serialized must have a
464 * granularity of less than 58. */
466 return hb
->granularity
< 58;
469 bool hbitmap_get(const HBitmap
*hb
, uint64_t item
)
471 /* Compute position and bit in the last layer. */
472 uint64_t pos
= item
>> hb
->granularity
;
473 unsigned long bit
= 1UL << (pos
& (BITS_PER_LONG
- 1));
474 assert(pos
< hb
->size
);
476 return (hb
->levels
[HBITMAP_LEVELS
- 1][pos
>> BITS_PER_LEVEL
] & bit
) != 0;
479 uint64_t hbitmap_serialization_align(const HBitmap
*hb
)
481 assert(hbitmap_is_serializable(hb
));
483 /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
485 return UINT64_C(64) << hb
->granularity
;
488 /* Start should be aligned to serialization granularity, chunk size should be
489 * aligned to serialization granularity too, except for last chunk.
491 static void serialization_chunk(const HBitmap
*hb
,
492 uint64_t start
, uint64_t count
,
493 unsigned long **first_el
, uint64_t *el_count
)
495 uint64_t last
= start
+ count
- 1;
496 uint64_t gran
= hbitmap_serialization_align(hb
);
498 assert((start
& (gran
- 1)) == 0);
499 assert((last
>> hb
->granularity
) < hb
->size
);
500 if ((last
>> hb
->granularity
) != hb
->size
- 1) {
501 assert((count
& (gran
- 1)) == 0);
504 start
= (start
>> hb
->granularity
) >> BITS_PER_LEVEL
;
505 last
= (last
>> hb
->granularity
) >> BITS_PER_LEVEL
;
507 *first_el
= &hb
->levels
[HBITMAP_LEVELS
- 1][start
];
508 *el_count
= last
- start
+ 1;
511 uint64_t hbitmap_serialization_size(const HBitmap
*hb
,
512 uint64_t start
, uint64_t count
)
520 serialization_chunk(hb
, start
, count
, &cur
, &el_count
);
522 return el_count
* sizeof(unsigned long);
525 void hbitmap_serialize_part(const HBitmap
*hb
, uint8_t *buf
,
526 uint64_t start
, uint64_t count
)
529 unsigned long *cur
, *end
;
534 serialization_chunk(hb
, start
, count
, &cur
, &el_count
);
535 end
= cur
+ el_count
;
539 (BITS_PER_LONG
== 32 ? cpu_to_le32(*cur
) : cpu_to_le64(*cur
));
541 memcpy(buf
, &el
, sizeof(el
));
547 void hbitmap_deserialize_part(HBitmap
*hb
, uint8_t *buf
,
548 uint64_t start
, uint64_t count
,
552 unsigned long *cur
, *end
;
557 serialization_chunk(hb
, start
, count
, &cur
, &el_count
);
558 end
= cur
+ el_count
;
561 memcpy(cur
, buf
, sizeof(*cur
));
563 if (BITS_PER_LONG
== 32) {
564 le32_to_cpus((uint32_t *)cur
);
566 le64_to_cpus((uint64_t *)cur
);
569 buf
+= sizeof(unsigned long);
573 hbitmap_deserialize_finish(hb
);
577 void hbitmap_deserialize_zeroes(HBitmap
*hb
, uint64_t start
, uint64_t count
,
581 unsigned long *first
;
586 serialization_chunk(hb
, start
, count
, &first
, &el_count
);
588 memset(first
, 0, el_count
* sizeof(unsigned long));
590 hbitmap_deserialize_finish(hb
);
594 void hbitmap_deserialize_ones(HBitmap
*hb
, uint64_t start
, uint64_t count
,
598 unsigned long *first
;
603 serialization_chunk(hb
, start
, count
, &first
, &el_count
);
605 memset(first
, 0xff, el_count
* sizeof(unsigned long));
607 hbitmap_deserialize_finish(hb
);
611 void hbitmap_deserialize_finish(HBitmap
*bitmap
)
613 int64_t i
, size
, prev_size
;
616 /* restore levels starting from penultimate to zero level, assuming
617 * that the last level is ok */
618 size
= MAX((bitmap
->size
+ BITS_PER_LONG
- 1) >> BITS_PER_LEVEL
, 1);
619 for (lev
= HBITMAP_LEVELS
- 1; lev
-- > 0; ) {
621 size
= MAX((size
+ BITS_PER_LONG
- 1) >> BITS_PER_LEVEL
, 1);
622 memset(bitmap
->levels
[lev
], 0, size
* sizeof(unsigned long));
624 for (i
= 0; i
< prev_size
; ++i
) {
625 if (bitmap
->levels
[lev
+ 1][i
]) {
626 bitmap
->levels
[lev
][i
>> BITS_PER_LEVEL
] |=
627 1UL << (i
& (BITS_PER_LONG
- 1));
632 bitmap
->levels
[0][0] |= 1UL << (BITS_PER_LONG
- 1);
635 void hbitmap_free(HBitmap
*hb
)
639 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
640 g_free(hb
->levels
[i
]);
645 HBitmap
*hbitmap_alloc(uint64_t size
, int granularity
)
647 HBitmap
*hb
= g_new0(struct HBitmap
, 1);
650 assert(granularity
>= 0 && granularity
< 64);
651 size
= (size
+ (1ULL << granularity
) - 1) >> granularity
;
652 assert(size
<= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE
));
655 hb
->granularity
= granularity
;
656 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
657 size
= MAX((size
+ BITS_PER_LONG
- 1) >> BITS_PER_LEVEL
, 1);
659 hb
->levels
[i
] = g_new0(unsigned long, size
);
662 /* We necessarily have free bits in level 0 due to the definition
663 * of HBITMAP_LEVELS, so use one for a sentinel. This speeds up
664 * hbitmap_iter_skip_words.
667 hb
->levels
[0][0] |= 1UL << (BITS_PER_LONG
- 1);
671 void hbitmap_truncate(HBitmap
*hb
, uint64_t size
)
675 uint64_t num_elements
= size
;
678 /* Size comes in as logical elements, adjust for granularity. */
679 size
= (size
+ (1ULL << hb
->granularity
) - 1) >> hb
->granularity
;
680 assert(size
<= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE
));
681 shrink
= size
< hb
->size
;
683 /* bit sizes are identical; nothing to do. */
684 if (size
== hb
->size
) {
688 /* If we're losing bits, let's clear those bits before we invalidate all of
689 * our invariants. This helps keep the bitcount consistent, and will prevent
690 * us from carrying around garbage bits beyond the end of the map.
693 /* Don't clear partial granularity groups;
694 * start at the first full one. */
695 uint64_t start
= ROUND_UP(num_elements
, UINT64_C(1) << hb
->granularity
);
696 uint64_t fix_count
= (hb
->size
<< hb
->granularity
) - start
;
699 hbitmap_reset(hb
, start
, fix_count
);
703 for (i
= HBITMAP_LEVELS
; i
-- > 0; ) {
704 size
= MAX(BITS_TO_LONGS(size
), 1);
705 if (hb
->sizes
[i
] == size
) {
710 hb
->levels
[i
] = g_realloc(hb
->levels
[i
], size
* sizeof(unsigned long));
712 memset(&hb
->levels
[i
][old
], 0x00,
713 (size
- old
) * sizeof(*hb
->levels
[i
]));
717 hbitmap_truncate(hb
->meta
, hb
->size
<< hb
->granularity
);
723 * Given HBitmaps A and B, let A := A (BITOR) B.
724 * Bitmap B will not be modified.
726 * @return true if the merge was successful,
727 * false if it was not attempted.
729 bool hbitmap_merge(HBitmap
*a
, const HBitmap
*b
)
734 if ((a
->size
!= b
->size
) || (a
->granularity
!= b
->granularity
)) {
738 if (hbitmap_count(b
) == 0) {
742 /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
743 * It may be possible to improve running times for sparsely populated maps
744 * by using hbitmap_iter_next, but this is suboptimal for dense maps.
746 for (i
= HBITMAP_LEVELS
- 1; i
>= 0; i
--) {
747 for (j
= 0; j
< a
->sizes
[i
]; j
++) {
748 a
->levels
[i
][j
] |= b
->levels
[i
][j
];
755 HBitmap
*hbitmap_create_meta(HBitmap
*hb
, int chunk_size
)
757 assert(!(chunk_size
& (chunk_size
- 1)));
759 hb
->meta
= hbitmap_alloc(hb
->size
<< hb
->granularity
,
760 hb
->granularity
+ ctz32(chunk_size
));
764 void hbitmap_free_meta(HBitmap
*hb
)
767 hbitmap_free(hb
->meta
);
771 char *hbitmap_sha256(const HBitmap
*bitmap
, Error
**errp
)
773 size_t size
= bitmap
->sizes
[HBITMAP_LEVELS
- 1] * sizeof(unsigned long);
774 char *data
= (char *)bitmap
->levels
[HBITMAP_LEVELS
- 1];
776 qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256
, data
, size
, &hash
, errp
);