split dev_queue
[cor.git] / lib / sbitmap.c
blob33feec8989f14054e0fc6d94762e75b871c88ad9
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
13 * See if we have deferred clears that we can batch move
15 static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
17 unsigned long mask, val;
18 bool ret = false;
19 unsigned long flags;
21 spin_lock_irqsave(&sb->map[index].swap_lock, flags);
23 if (!sb->map[index].cleared)
24 goto out_unlock;
27 * First get a stable cleared mask, setting the old mask to 0.
29 mask = xchg(&sb->map[index].cleared, 0);
32 * Now clear the masked bits in our free word
34 do {
35 val = sb->map[index].word;
36 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
38 ret = true;
39 out_unlock:
40 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
41 return ret;
44 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
45 gfp_t flags, int node)
47 unsigned int bits_per_word;
48 unsigned int i;
50 if (shift < 0) {
51 shift = ilog2(BITS_PER_LONG);
53 * If the bitmap is small, shrink the number of bits per word so
54 * we spread over a few cachelines, at least. If less than 4
55 * bits, just forget about it, it's not going to work optimally
56 * anyway.
58 if (depth >= 4) {
59 while ((4U << shift) > depth)
60 shift--;
63 bits_per_word = 1U << shift;
64 if (bits_per_word > BITS_PER_LONG)
65 return -EINVAL;
67 sb->shift = shift;
68 sb->depth = depth;
69 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
71 if (depth == 0) {
72 sb->map = NULL;
73 return 0;
76 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
77 if (!sb->map)
78 return -ENOMEM;
80 for (i = 0; i < sb->map_nr; i++) {
81 sb->map[i].depth = min(depth, bits_per_word);
82 depth -= sb->map[i].depth;
83 spin_lock_init(&sb->map[i].swap_lock);
85 return 0;
87 EXPORT_SYMBOL_GPL(sbitmap_init_node);
89 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
91 unsigned int bits_per_word = 1U << sb->shift;
92 unsigned int i;
94 for (i = 0; i < sb->map_nr; i++)
95 sbitmap_deferred_clear(sb, i);
97 sb->depth = depth;
98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
100 for (i = 0; i < sb->map_nr; i++) {
101 sb->map[i].depth = min(depth, bits_per_word);
102 depth -= sb->map[i].depth;
105 EXPORT_SYMBOL_GPL(sbitmap_resize);
107 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
108 unsigned int hint, bool wrap)
110 unsigned int orig_hint = hint;
111 int nr;
113 while (1) {
114 nr = find_next_zero_bit(word, depth, hint);
115 if (unlikely(nr >= depth)) {
117 * We started with an offset, and we didn't reset the
118 * offset to 0 in a failure case, so start from 0 to
119 * exhaust the map.
121 if (orig_hint && hint && wrap) {
122 hint = orig_hint = 0;
123 continue;
125 return -1;
128 if (!test_and_set_bit_lock(nr, word))
129 break;
131 hint = nr + 1;
132 if (hint >= depth - 1)
133 hint = 0;
136 return nr;
139 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
140 unsigned int alloc_hint, bool round_robin)
142 int nr;
144 do {
145 nr = __sbitmap_get_word(&sb->map[index].word,
146 sb->map[index].depth, alloc_hint,
147 !round_robin);
148 if (nr != -1)
149 break;
150 if (!sbitmap_deferred_clear(sb, index))
151 break;
152 } while (1);
154 return nr;
157 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
159 unsigned int i, index;
160 int nr = -1;
162 index = SB_NR_TO_INDEX(sb, alloc_hint);
165 * Unless we're doing round robin tag allocation, just use the
166 * alloc_hint to find the right word index. No point in looping
167 * twice in find_next_zero_bit() for that case.
169 if (round_robin)
170 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
171 else
172 alloc_hint = 0;
174 for (i = 0; i < sb->map_nr; i++) {
175 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
176 round_robin);
177 if (nr != -1) {
178 nr += index << sb->shift;
179 break;
182 /* Jump to next index. */
183 alloc_hint = 0;
184 if (++index >= sb->map_nr)
185 index = 0;
188 return nr;
190 EXPORT_SYMBOL_GPL(sbitmap_get);
192 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
193 unsigned long shallow_depth)
195 unsigned int i, index;
196 int nr = -1;
198 index = SB_NR_TO_INDEX(sb, alloc_hint);
200 for (i = 0; i < sb->map_nr; i++) {
201 again:
202 nr = __sbitmap_get_word(&sb->map[index].word,
203 min(sb->map[index].depth, shallow_depth),
204 SB_NR_TO_BIT(sb, alloc_hint), true);
205 if (nr != -1) {
206 nr += index << sb->shift;
207 break;
210 if (sbitmap_deferred_clear(sb, index))
211 goto again;
213 /* Jump to next index. */
214 index++;
215 alloc_hint = index << sb->shift;
217 if (index >= sb->map_nr) {
218 index = 0;
219 alloc_hint = 0;
223 return nr;
225 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
227 bool sbitmap_any_bit_set(const struct sbitmap *sb)
229 unsigned int i;
231 for (i = 0; i < sb->map_nr; i++) {
232 if (sb->map[i].word & ~sb->map[i].cleared)
233 return true;
235 return false;
237 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
239 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
241 unsigned int i, weight = 0;
243 for (i = 0; i < sb->map_nr; i++) {
244 const struct sbitmap_word *word = &sb->map[i];
246 if (set)
247 weight += bitmap_weight(&word->word, word->depth);
248 else
249 weight += bitmap_weight(&word->cleared, word->depth);
251 return weight;
254 static unsigned int sbitmap_weight(const struct sbitmap *sb)
256 return __sbitmap_weight(sb, true);
259 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
261 return __sbitmap_weight(sb, false);
264 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
266 seq_printf(m, "depth=%u\n", sb->depth);
267 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
268 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
269 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
270 seq_printf(m, "map_nr=%u\n", sb->map_nr);
272 EXPORT_SYMBOL_GPL(sbitmap_show);
274 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
276 if ((offset & 0xf) == 0) {
277 if (offset != 0)
278 seq_putc(m, '\n');
279 seq_printf(m, "%08x:", offset);
281 if ((offset & 0x1) == 0)
282 seq_putc(m, ' ');
283 seq_printf(m, "%02x", byte);
286 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
288 u8 byte = 0;
289 unsigned int byte_bits = 0;
290 unsigned int offset = 0;
291 int i;
293 for (i = 0; i < sb->map_nr; i++) {
294 unsigned long word = READ_ONCE(sb->map[i].word);
295 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
297 while (word_bits > 0) {
298 unsigned int bits = min(8 - byte_bits, word_bits);
300 byte |= (word & (BIT(bits) - 1)) << byte_bits;
301 byte_bits += bits;
302 if (byte_bits == 8) {
303 emit_byte(m, offset, byte);
304 byte = 0;
305 byte_bits = 0;
306 offset++;
308 word >>= bits;
309 word_bits -= bits;
312 if (byte_bits) {
313 emit_byte(m, offset, byte);
314 offset++;
316 if (offset)
317 seq_putc(m, '\n');
319 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
321 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
322 unsigned int depth)
324 unsigned int wake_batch;
325 unsigned int shallow_depth;
328 * For each batch, we wake up one queue. We need to make sure that our
329 * batch size is small enough that the full depth of the bitmap,
330 * potentially limited by a shallow depth, is enough to wake up all of
331 * the queues.
333 * Each full word of the bitmap has bits_per_word bits, and there might
334 * be a partial word. There are depth / bits_per_word full words and
335 * depth % bits_per_word bits left over. In bitwise arithmetic:
337 * bits_per_word = 1 << shift
338 * depth / bits_per_word = depth >> shift
339 * depth % bits_per_word = depth & ((1 << shift) - 1)
341 * Each word can be limited to sbq->min_shallow_depth bits.
343 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
344 depth = ((depth >> sbq->sb.shift) * shallow_depth +
345 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
346 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
347 SBQ_WAKE_BATCH);
349 return wake_batch;
352 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
353 int shift, bool round_robin, gfp_t flags, int node)
355 int ret;
356 int i;
358 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
359 if (ret)
360 return ret;
362 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
363 if (!sbq->alloc_hint) {
364 sbitmap_free(&sbq->sb);
365 return -ENOMEM;
368 if (depth && !round_robin) {
369 for_each_possible_cpu(i)
370 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
373 sbq->min_shallow_depth = UINT_MAX;
374 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
375 atomic_set(&sbq->wake_index, 0);
376 atomic_set(&sbq->ws_active, 0);
378 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
379 if (!sbq->ws) {
380 free_percpu(sbq->alloc_hint);
381 sbitmap_free(&sbq->sb);
382 return -ENOMEM;
385 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
386 init_waitqueue_head(&sbq->ws[i].wait);
387 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
390 sbq->round_robin = round_robin;
391 return 0;
393 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
395 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
396 unsigned int depth)
398 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
399 int i;
401 if (sbq->wake_batch != wake_batch) {
402 WRITE_ONCE(sbq->wake_batch, wake_batch);
404 * Pairs with the memory barrier in sbitmap_queue_wake_up()
405 * to ensure that the batch size is updated before the wait
406 * counts.
408 smp_mb();
409 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
410 atomic_set(&sbq->ws[i].wait_cnt, 1);
414 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
416 sbitmap_queue_update_wake_batch(sbq, depth);
417 sbitmap_resize(&sbq->sb, depth);
419 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
421 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
423 unsigned int hint, depth;
424 int nr;
426 hint = this_cpu_read(*sbq->alloc_hint);
427 depth = READ_ONCE(sbq->sb.depth);
428 if (unlikely(hint >= depth)) {
429 hint = depth ? prandom_u32() % depth : 0;
430 this_cpu_write(*sbq->alloc_hint, hint);
432 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
434 if (nr == -1) {
435 /* If the map is full, a hint won't do us much good. */
436 this_cpu_write(*sbq->alloc_hint, 0);
437 } else if (nr == hint || unlikely(sbq->round_robin)) {
438 /* Only update the hint if we used it. */
439 hint = nr + 1;
440 if (hint >= depth - 1)
441 hint = 0;
442 this_cpu_write(*sbq->alloc_hint, hint);
445 return nr;
447 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
449 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
450 unsigned int shallow_depth)
452 unsigned int hint, depth;
453 int nr;
455 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
457 hint = this_cpu_read(*sbq->alloc_hint);
458 depth = READ_ONCE(sbq->sb.depth);
459 if (unlikely(hint >= depth)) {
460 hint = depth ? prandom_u32() % depth : 0;
461 this_cpu_write(*sbq->alloc_hint, hint);
463 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
465 if (nr == -1) {
466 /* If the map is full, a hint won't do us much good. */
467 this_cpu_write(*sbq->alloc_hint, 0);
468 } else if (nr == hint || unlikely(sbq->round_robin)) {
469 /* Only update the hint if we used it. */
470 hint = nr + 1;
471 if (hint >= depth - 1)
472 hint = 0;
473 this_cpu_write(*sbq->alloc_hint, hint);
476 return nr;
478 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
480 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
481 unsigned int min_shallow_depth)
483 sbq->min_shallow_depth = min_shallow_depth;
484 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
486 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
488 static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
490 int i, wake_index;
492 if (!atomic_read(&sbq->ws_active))
493 return NULL;
495 wake_index = atomic_read(&sbq->wake_index);
496 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
497 struct sbq_wait_state *ws = &sbq->ws[wake_index];
499 if (waitqueue_active(&ws->wait)) {
500 if (wake_index != atomic_read(&sbq->wake_index))
501 atomic_set(&sbq->wake_index, wake_index);
502 return ws;
505 wake_index = sbq_index_inc(wake_index);
508 return NULL;
511 static bool __sbq_wake_up(struct sbitmap_queue *sbq)
513 struct sbq_wait_state *ws;
514 unsigned int wake_batch;
515 int wait_cnt;
517 ws = sbq_wake_ptr(sbq);
518 if (!ws)
519 return false;
521 wait_cnt = atomic_dec_return(&ws->wait_cnt);
522 if (wait_cnt <= 0) {
523 int ret;
525 wake_batch = READ_ONCE(sbq->wake_batch);
528 * Pairs with the memory barrier in sbitmap_queue_resize() to
529 * ensure that we see the batch size update before the wait
530 * count is reset.
532 smp_mb__before_atomic();
535 * For concurrent callers of this, the one that failed the
536 * atomic_cmpxhcg() race should call this function again
537 * to wakeup a new batch on a different 'ws'.
539 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
540 if (ret == wait_cnt) {
541 sbq_index_atomic_inc(&sbq->wake_index);
542 wake_up_nr(&ws->wait, wake_batch);
543 return false;
546 return true;
549 return false;
552 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
554 while (__sbq_wake_up(sbq))
557 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
559 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
560 unsigned int cpu)
563 * Once the clear bit is set, the bit may be allocated out.
565 * Orders READ/WRITE on the asssociated instance(such as request
566 * of blk_mq) by this bit for avoiding race with re-allocation,
567 * and its pair is the memory barrier implied in __sbitmap_get_word.
569 * One invariant is that the clear bit has to be zero when the bit
570 * is in use.
572 smp_mb__before_atomic();
573 sbitmap_deferred_clear_bit(&sbq->sb, nr);
576 * Pairs with the memory barrier in set_current_state() to ensure the
577 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
578 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
579 * waiter. See the comment on waitqueue_active().
581 smp_mb__after_atomic();
582 sbitmap_queue_wake_up(sbq);
584 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
585 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
587 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
589 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
591 int i, wake_index;
594 * Pairs with the memory barrier in set_current_state() like in
595 * sbitmap_queue_wake_up().
597 smp_mb();
598 wake_index = atomic_read(&sbq->wake_index);
599 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
600 struct sbq_wait_state *ws = &sbq->ws[wake_index];
602 if (waitqueue_active(&ws->wait))
603 wake_up(&ws->wait);
605 wake_index = sbq_index_inc(wake_index);
608 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
610 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
612 bool first;
613 int i;
615 sbitmap_show(&sbq->sb, m);
617 seq_puts(m, "alloc_hint={");
618 first = true;
619 for_each_possible_cpu(i) {
620 if (!first)
621 seq_puts(m, ", ");
622 first = false;
623 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
625 seq_puts(m, "}\n");
627 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
628 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
629 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
631 seq_puts(m, "ws={\n");
632 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
633 struct sbq_wait_state *ws = &sbq->ws[i];
635 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
636 atomic_read(&ws->wait_cnt),
637 waitqueue_active(&ws->wait) ? "active" : "inactive");
639 seq_puts(m, "}\n");
641 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
642 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
644 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
646 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
647 struct sbq_wait_state *ws,
648 struct sbq_wait *sbq_wait)
650 if (!sbq_wait->sbq) {
651 sbq_wait->sbq = sbq;
652 atomic_inc(&sbq->ws_active);
654 add_wait_queue(&ws->wait, &sbq_wait->wait);
656 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
658 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
660 list_del_init(&sbq_wait->wait.entry);
661 if (sbq_wait->sbq) {
662 atomic_dec(&sbq_wait->sbq->ws_active);
663 sbq_wait->sbq = NULL;
666 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
668 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
669 struct sbq_wait_state *ws,
670 struct sbq_wait *sbq_wait, int state)
672 if (!sbq_wait->sbq) {
673 atomic_inc(&sbq->ws_active);
674 sbq_wait->sbq = sbq;
676 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
678 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
680 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
681 struct sbq_wait *sbq_wait)
683 finish_wait(&ws->wait, &sbq_wait->wait);
684 if (sbq_wait->sbq) {
685 atomic_dec(&sbq->ws_active);
686 sbq_wait->sbq = NULL;
689 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);