ext4: Change unsigned long to unsigned int
[linux-2.6/linux-2.6-openrd.git] / fs / ext4 / mballoc.c
blob1d78435ce388afa7bf9d11d7cf0c141eac7adb43
1 /*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
21 * mballoc.c contains the multiblocks allocation routines
24 #include "mballoc.h"
26 * MUSTDO:
27 * - test ext4_ext_search_left() and ext4_ext_search_right()
28 * - search for metadata in few groups
30 * TODO v4:
31 * - normalization should take into account whether file is still open
32 * - discard preallocations if no free space left (policy?)
33 * - don't normalize tails
34 * - quota
35 * - reservation for superuser
37 * TODO v3:
38 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
39 * - track min/max extents in each group for better group selection
40 * - mb_mark_used() may allocate chunk right after splitting buddy
41 * - tree of groups sorted by number of free blocks
42 * - error handling
46 * The allocation request involve request for multiple number of blocks
47 * near to the goal(block) value specified.
49 * During initialization phase of the allocator we decide to use the group
50 * preallocation or inode preallocation depending on the size file. The
51 * size of the file could be the resulting file size we would have after
52 * allocation or the current file size which ever is larger. If the size is
53 * less that sbi->s_mb_stream_request we select the group
54 * preallocation. The default value of s_mb_stream_request is 16
55 * blocks. This can also be tuned via
56 * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms
57 * of number of blocks.
59 * The main motivation for having small file use group preallocation is to
60 * ensure that we have small file closer in the disk.
62 * First stage the allocator looks at the inode prealloc list
63 * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for
64 * this particular inode. The inode prealloc space is represented as:
66 * pa_lstart -> the logical start block for this prealloc space
67 * pa_pstart -> the physical start block for this prealloc space
68 * pa_len -> lenght for this prealloc space
69 * pa_free -> free space available in this prealloc space
71 * The inode preallocation space is used looking at the _logical_ start
72 * block. If only the logical file block falls within the range of prealloc
73 * space we will consume the particular prealloc space. This make sure that
74 * that the we have contiguous physical blocks representing the file blocks
76 * The important thing to be noted in case of inode prealloc space is that
77 * we don't modify the values associated to inode prealloc space except
78 * pa_free.
80 * If we are not able to find blocks in the inode prealloc space and if we
81 * have the group allocation flag set then we look at the locality group
82 * prealloc space. These are per CPU prealloc list repreasented as
84 * ext4_sb_info.s_locality_groups[smp_processor_id()]
86 * The reason for having a per cpu locality group is to reduce the contention
87 * between CPUs. It is possible to get scheduled at this point.
89 * The locality group prealloc space is used looking at whether we have
90 * enough free space (pa_free) withing the prealloc space.
92 * If we can't allocate blocks via inode prealloc or/and locality group
93 * prealloc then we look at the buddy cache. The buddy cache is represented
94 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
95 * mapped to the buddy and bitmap information regarding different
96 * groups. The buddy information is attached to buddy cache inode so that
97 * we can access them through the page cache. The information regarding
98 * each group is loaded via ext4_mb_load_buddy. The information involve
99 * block bitmap and buddy information. The information are stored in the
100 * inode as:
102 * { page }
103 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
106 * one block each for bitmap and buddy information. So for each group we
107 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
108 * blocksize) blocks. So it can have information regarding groups_per_page
109 * which is blocks_per_page/2
111 * The buddy cache inode is not stored on disk. The inode is thrown
112 * away when the filesystem is unmounted.
114 * We look for count number of blocks in the buddy cache. If we were able
115 * to locate that many free blocks we return with additional information
116 * regarding rest of the contiguous physical block available
118 * Before allocating blocks via buddy cache we normalize the request
119 * blocks. This ensure we ask for more blocks that we needed. The extra
120 * blocks that we get after allocation is added to the respective prealloc
121 * list. In case of inode preallocation we follow a list of heuristics
122 * based on file size. This can be found in ext4_mb_normalize_request. If
123 * we are doing a group prealloc we try to normalize the request to
124 * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to
125 * 512 blocks. This can be tuned via
126 * /proc/fs/ext4/<partition/group_prealloc. The value is represented in
127 * terms of number of blocks. If we have mounted the file system with -O
128 * stripe=<value> option the group prealloc request is normalized to the
129 * stripe value (sbi->s_stripe)
131 * The regular allocator(using the buddy cache) support few tunables.
133 * /proc/fs/ext4/<partition>/min_to_scan
134 * /proc/fs/ext4/<partition>/max_to_scan
135 * /proc/fs/ext4/<partition>/order2_req
137 * The regular allocator use buddy scan only if the request len is power of
138 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
139 * value of s_mb_order2_reqs can be tuned via
140 * /proc/fs/ext4/<partition>/order2_req. If the request len is equal to
141 * stripe size (sbi->s_stripe), we try to search for contigous block in
142 * stripe size. This should result in better allocation on RAID setup. If
143 * not we search in the specific group using bitmap for best extents. The
144 * tunable min_to_scan and max_to_scan controll the behaviour here.
145 * min_to_scan indicate how long the mballoc __must__ look for a best
146 * extent and max_to_scanindicate how long the mballoc __can__ look for a
147 * best extent in the found extents. Searching for the blocks starts with
148 * the group specified as the goal value in allocation context via
149 * ac_g_ex. Each group is first checked based on the criteria whether it
150 * can used for allocation. ext4_mb_good_group explains how the groups are
151 * checked.
153 * Both the prealloc space are getting populated as above. So for the first
154 * request we will hit the buddy cache which will result in this prealloc
155 * space getting filled. The prealloc space is then later used for the
156 * subsequent request.
160 * mballoc operates on the following data:
161 * - on-disk bitmap
162 * - in-core buddy (actually includes buddy and bitmap)
163 * - preallocation descriptors (PAs)
165 * there are two types of preallocations:
166 * - inode
167 * assiged to specific inode and can be used for this inode only.
168 * it describes part of inode's space preallocated to specific
169 * physical blocks. any block from that preallocated can be used
170 * independent. the descriptor just tracks number of blocks left
171 * unused. so, before taking some block from descriptor, one must
172 * make sure corresponded logical block isn't allocated yet. this
173 * also means that freeing any block within descriptor's range
174 * must discard all preallocated blocks.
175 * - locality group
176 * assigned to specific locality group which does not translate to
177 * permanent set of inodes: inode can join and leave group. space
178 * from this type of preallocation can be used for any inode. thus
179 * it's consumed from the beginning to the end.
181 * relation between them can be expressed as:
182 * in-core buddy = on-disk bitmap + preallocation descriptors
184 * this mean blocks mballoc considers used are:
185 * - allocated blocks (persistent)
186 * - preallocated blocks (non-persistent)
188 * consistency in mballoc world means that at any time a block is either
189 * free or used in ALL structures. notice: "any time" should not be read
190 * literally -- time is discrete and delimited by locks.
192 * to keep it simple, we don't use block numbers, instead we count number of
193 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
195 * all operations can be expressed as:
196 * - init buddy: buddy = on-disk + PAs
197 * - new PA: buddy += N; PA = N
198 * - use inode PA: on-disk += N; PA -= N
199 * - discard inode PA buddy -= on-disk - PA; PA = 0
200 * - use locality group PA on-disk += N; PA -= N
201 * - discard locality group PA buddy -= PA; PA = 0
202 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203 * is used in real operation because we can't know actual used
204 * bits from PA, only from on-disk bitmap
206 * if we follow this strict logic, then all operations above should be atomic.
207 * given some of them can block, we'd have to use something like semaphores
208 * killing performance on high-end SMP hardware. let's try to relax it using
209 * the following knowledge:
210 * 1) if buddy is referenced, it's already initialized
211 * 2) while block is used in buddy and the buddy is referenced,
212 * nobody can re-allocate that block
213 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
214 * bit set and PA claims same block, it's OK. IOW, one can set bit in
215 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
216 * block
218 * so, now we're building a concurrency table:
219 * - init buddy vs.
220 * - new PA
221 * blocks for PA are allocated in the buddy, buddy must be referenced
222 * until PA is linked to allocation group to avoid concurrent buddy init
223 * - use inode PA
224 * we need to make sure that either on-disk bitmap or PA has uptodate data
225 * given (3) we care that PA-=N operation doesn't interfere with init
226 * - discard inode PA
227 * the simplest way would be to have buddy initialized by the discard
228 * - use locality group PA
229 * again PA-=N must be serialized with init
230 * - discard locality group PA
231 * the simplest way would be to have buddy initialized by the discard
232 * - new PA vs.
233 * - use inode PA
234 * i_data_sem serializes them
235 * - discard inode PA
236 * discard process must wait until PA isn't used by another process
237 * - use locality group PA
238 * some mutex should serialize them
239 * - discard locality group PA
240 * discard process must wait until PA isn't used by another process
241 * - use inode PA
242 * - use inode PA
243 * i_data_sem or another mutex should serializes them
244 * - discard inode PA
245 * discard process must wait until PA isn't used by another process
246 * - use locality group PA
247 * nothing wrong here -- they're different PAs covering different blocks
248 * - discard locality group PA
249 * discard process must wait until PA isn't used by another process
251 * now we're ready to make few consequences:
252 * - PA is referenced and while it is no discard is possible
253 * - PA is referenced until block isn't marked in on-disk bitmap
254 * - PA changes only after on-disk bitmap
255 * - discard must not compete with init. either init is done before
256 * any discard or they're serialized somehow
257 * - buddy init as sum of on-disk bitmap and PAs is done atomically
259 * a special case when we've used PA to emptiness. no need to modify buddy
260 * in this case, but we should care about concurrent init
265 * Logic in few words:
267 * - allocation:
268 * load group
269 * find blocks
270 * mark bits in on-disk bitmap
271 * release group
273 * - use preallocation:
274 * find proper PA (per-inode or group)
275 * load group
276 * mark bits in on-disk bitmap
277 * release group
278 * release PA
280 * - free:
281 * load group
282 * mark bits in on-disk bitmap
283 * release group
285 * - discard preallocations in group:
286 * mark PAs deleted
287 * move them onto local list
288 * load on-disk bitmap
289 * load group
290 * remove PA from object (inode or locality group)
291 * mark free blocks in-core
293 * - discard inode's preallocations:
297 * Locking rules
299 * Locks:
300 * - bitlock on a group (group)
301 * - object (inode/locality) (object)
302 * - per-pa lock (pa)
304 * Paths:
305 * - new pa
306 * object
307 * group
309 * - find and use pa:
310 * pa
312 * - release consumed pa:
313 * pa
314 * group
315 * object
317 * - generate in-core bitmap:
318 * group
319 * pa
321 * - discard all for given object (inode, locality group):
322 * object
323 * pa
324 * group
326 * - discard all for given group:
327 * group
328 * pa
329 * group
330 * object
334 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
336 #if BITS_PER_LONG == 64
337 *bit += ((unsigned long) addr & 7UL) << 3;
338 addr = (void *) ((unsigned long) addr & ~7UL);
339 #elif BITS_PER_LONG == 32
340 *bit += ((unsigned long) addr & 3UL) << 3;
341 addr = (void *) ((unsigned long) addr & ~3UL);
342 #else
343 #error "how many bits you are?!"
344 #endif
345 return addr;
348 static inline int mb_test_bit(int bit, void *addr)
351 * ext4_test_bit on architecture like powerpc
352 * needs unsigned long aligned address
354 addr = mb_correct_addr_and_bit(&bit, addr);
355 return ext4_test_bit(bit, addr);
358 static inline void mb_set_bit(int bit, void *addr)
360 addr = mb_correct_addr_and_bit(&bit, addr);
361 ext4_set_bit(bit, addr);
364 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
366 addr = mb_correct_addr_and_bit(&bit, addr);
367 ext4_set_bit_atomic(lock, bit, addr);
370 static inline void mb_clear_bit(int bit, void *addr)
372 addr = mb_correct_addr_and_bit(&bit, addr);
373 ext4_clear_bit(bit, addr);
376 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
378 addr = mb_correct_addr_and_bit(&bit, addr);
379 ext4_clear_bit_atomic(lock, bit, addr);
382 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
384 int fix = 0, ret, tmpmax;
385 addr = mb_correct_addr_and_bit(&fix, addr);
386 tmpmax = max + fix;
387 start += fix;
389 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
390 if (ret > max)
391 return max;
392 return ret;
395 static inline int mb_find_next_bit(void *addr, int max, int start)
397 int fix = 0, ret, tmpmax;
398 addr = mb_correct_addr_and_bit(&fix, addr);
399 tmpmax = max + fix;
400 start += fix;
402 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
403 if (ret > max)
404 return max;
405 return ret;
408 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
410 char *bb;
412 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
413 BUG_ON(max == NULL);
415 if (order > e4b->bd_blkbits + 1) {
416 *max = 0;
417 return NULL;
420 /* at order 0 we see each particular block */
421 *max = 1 << (e4b->bd_blkbits + 3);
422 if (order == 0)
423 return EXT4_MB_BITMAP(e4b);
425 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
426 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
428 return bb;
431 #ifdef DOUBLE_CHECK
432 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
433 int first, int count)
435 int i;
436 struct super_block *sb = e4b->bd_sb;
438 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
439 return;
440 BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
441 for (i = 0; i < count; i++) {
442 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
443 ext4_fsblk_t blocknr;
444 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
445 blocknr += first + i;
446 blocknr +=
447 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
449 ext4_error(sb, __func__, "double-free of inode"
450 " %lu's block %llu(bit %u in group %u)",
451 inode ? inode->i_ino : 0, blocknr,
452 first + i, e4b->bd_group);
454 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
458 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
460 int i;
462 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
463 return;
464 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
465 for (i = 0; i < count; i++) {
466 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
467 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
471 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
473 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
474 unsigned char *b1, *b2;
475 int i;
476 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
477 b2 = (unsigned char *) bitmap;
478 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
479 if (b1[i] != b2[i]) {
480 printk(KERN_ERR "corruption in group %u "
481 "at byte %u(%u): %x in copy != %x "
482 "on disk/prealloc\n",
483 e4b->bd_group, i, i * 8, b1[i], b2[i]);
484 BUG();
490 #else
491 static inline void mb_free_blocks_double(struct inode *inode,
492 struct ext4_buddy *e4b, int first, int count)
494 return;
496 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
497 int first, int count)
499 return;
501 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
503 return;
505 #endif
507 #ifdef AGGRESSIVE_CHECK
509 #define MB_CHECK_ASSERT(assert) \
510 do { \
511 if (!(assert)) { \
512 printk(KERN_EMERG \
513 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
514 function, file, line, # assert); \
515 BUG(); \
517 } while (0)
519 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
520 const char *function, int line)
522 struct super_block *sb = e4b->bd_sb;
523 int order = e4b->bd_blkbits + 1;
524 int max;
525 int max2;
526 int i;
527 int j;
528 int k;
529 int count;
530 struct ext4_group_info *grp;
531 int fragments = 0;
532 int fstart;
533 struct list_head *cur;
534 void *buddy;
535 void *buddy2;
538 static int mb_check_counter;
539 if (mb_check_counter++ % 100 != 0)
540 return 0;
543 while (order > 1) {
544 buddy = mb_find_buddy(e4b, order, &max);
545 MB_CHECK_ASSERT(buddy);
546 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
547 MB_CHECK_ASSERT(buddy2);
548 MB_CHECK_ASSERT(buddy != buddy2);
549 MB_CHECK_ASSERT(max * 2 == max2);
551 count = 0;
552 for (i = 0; i < max; i++) {
554 if (mb_test_bit(i, buddy)) {
555 /* only single bit in buddy2 may be 1 */
556 if (!mb_test_bit(i << 1, buddy2)) {
557 MB_CHECK_ASSERT(
558 mb_test_bit((i<<1)+1, buddy2));
559 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
560 MB_CHECK_ASSERT(
561 mb_test_bit(i << 1, buddy2));
563 continue;
566 /* both bits in buddy2 must be 0 */
567 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
568 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
570 for (j = 0; j < (1 << order); j++) {
571 k = (i * (1 << order)) + j;
572 MB_CHECK_ASSERT(
573 !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
575 count++;
577 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
578 order--;
581 fstart = -1;
582 buddy = mb_find_buddy(e4b, 0, &max);
583 for (i = 0; i < max; i++) {
584 if (!mb_test_bit(i, buddy)) {
585 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
586 if (fstart == -1) {
587 fragments++;
588 fstart = i;
590 continue;
592 fstart = -1;
593 /* check used bits only */
594 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
595 buddy2 = mb_find_buddy(e4b, j, &max2);
596 k = i >> j;
597 MB_CHECK_ASSERT(k < max2);
598 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
601 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
602 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
604 grp = ext4_get_group_info(sb, e4b->bd_group);
605 buddy = mb_find_buddy(e4b, 0, &max);
606 list_for_each(cur, &grp->bb_prealloc_list) {
607 ext4_group_t groupnr;
608 struct ext4_prealloc_space *pa;
609 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
610 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
611 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
612 for (i = 0; i < pa->pa_len; i++)
613 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
615 return 0;
617 #undef MB_CHECK_ASSERT
618 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
619 __FILE__, __func__, __LINE__)
620 #else
621 #define mb_check_buddy(e4b)
622 #endif
624 /* FIXME!! need more doc */
625 static void ext4_mb_mark_free_simple(struct super_block *sb,
626 void *buddy, unsigned first, int len,
627 struct ext4_group_info *grp)
629 struct ext4_sb_info *sbi = EXT4_SB(sb);
630 unsigned short min;
631 unsigned short max;
632 unsigned short chunk;
633 unsigned short border;
635 BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
637 border = 2 << sb->s_blocksize_bits;
639 while (len > 0) {
640 /* find how many blocks can be covered since this position */
641 max = ffs(first | border) - 1;
643 /* find how many blocks of power 2 we need to mark */
644 min = fls(len) - 1;
646 if (max < min)
647 min = max;
648 chunk = 1 << min;
650 /* mark multiblock chunks only */
651 grp->bb_counters[min]++;
652 if (min > 0)
653 mb_clear_bit(first >> min,
654 buddy + sbi->s_mb_offsets[min]);
656 len -= chunk;
657 first += chunk;
661 static void ext4_mb_generate_buddy(struct super_block *sb,
662 void *buddy, void *bitmap, ext4_group_t group)
664 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
665 unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
666 unsigned short i = 0;
667 unsigned short first;
668 unsigned short len;
669 unsigned free = 0;
670 unsigned fragments = 0;
671 unsigned long long period = get_cycles();
673 /* initialize buddy from bitmap which is aggregation
674 * of on-disk bitmap and preallocations */
675 i = mb_find_next_zero_bit(bitmap, max, 0);
676 grp->bb_first_free = i;
677 while (i < max) {
678 fragments++;
679 first = i;
680 i = mb_find_next_bit(bitmap, max, i);
681 len = i - first;
682 free += len;
683 if (len > 1)
684 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
685 else
686 grp->bb_counters[0]++;
687 if (i < max)
688 i = mb_find_next_zero_bit(bitmap, max, i);
690 grp->bb_fragments = fragments;
692 if (free != grp->bb_free) {
693 ext4_error(sb, __func__,
694 "EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
695 group, free, grp->bb_free);
697 * If we intent to continue, we consider group descritor
698 * corrupt and update bb_free using bitmap value
700 grp->bb_free = free;
703 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
705 period = get_cycles() - period;
706 spin_lock(&EXT4_SB(sb)->s_bal_lock);
707 EXT4_SB(sb)->s_mb_buddies_generated++;
708 EXT4_SB(sb)->s_mb_generation_time += period;
709 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
712 /* The buddy information is attached the buddy cache inode
713 * for convenience. The information regarding each group
714 * is loaded via ext4_mb_load_buddy. The information involve
715 * block bitmap and buddy information. The information are
716 * stored in the inode as
718 * { page }
719 * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
722 * one block each for bitmap and buddy information.
723 * So for each group we take up 2 blocks. A page can
724 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
725 * So it can have information regarding groups_per_page which
726 * is blocks_per_page/2
729 static int ext4_mb_init_cache(struct page *page, char *incore)
731 int blocksize;
732 int blocks_per_page;
733 int groups_per_page;
734 int err = 0;
735 int i;
736 ext4_group_t first_group;
737 int first_block;
738 struct super_block *sb;
739 struct buffer_head *bhs;
740 struct buffer_head **bh;
741 struct inode *inode;
742 char *data;
743 char *bitmap;
745 mb_debug("init page %lu\n", page->index);
747 inode = page->mapping->host;
748 sb = inode->i_sb;
749 blocksize = 1 << inode->i_blkbits;
750 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
752 groups_per_page = blocks_per_page >> 1;
753 if (groups_per_page == 0)
754 groups_per_page = 1;
756 /* allocate buffer_heads to read bitmaps */
757 if (groups_per_page > 1) {
758 err = -ENOMEM;
759 i = sizeof(struct buffer_head *) * groups_per_page;
760 bh = kzalloc(i, GFP_NOFS);
761 if (bh == NULL)
762 goto out;
763 } else
764 bh = &bhs;
766 first_group = page->index * blocks_per_page / 2;
768 /* read all groups the page covers into the cache */
769 for (i = 0; i < groups_per_page; i++) {
770 struct ext4_group_desc *desc;
772 if (first_group + i >= EXT4_SB(sb)->s_groups_count)
773 break;
775 err = -EIO;
776 desc = ext4_get_group_desc(sb, first_group + i, NULL);
777 if (desc == NULL)
778 goto out;
780 err = -ENOMEM;
781 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
782 if (bh[i] == NULL)
783 goto out;
785 if (buffer_uptodate(bh[i]) &&
786 !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
787 continue;
789 lock_buffer(bh[i]);
790 spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
791 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
792 ext4_init_block_bitmap(sb, bh[i],
793 first_group + i, desc);
794 set_buffer_uptodate(bh[i]);
795 unlock_buffer(bh[i]);
796 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
797 continue;
799 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
800 get_bh(bh[i]);
801 bh[i]->b_end_io = end_buffer_read_sync;
802 submit_bh(READ, bh[i]);
803 mb_debug("read bitmap for group %u\n", first_group + i);
806 /* wait for I/O completion */
807 for (i = 0; i < groups_per_page && bh[i]; i++)
808 wait_on_buffer(bh[i]);
810 err = -EIO;
811 for (i = 0; i < groups_per_page && bh[i]; i++)
812 if (!buffer_uptodate(bh[i]))
813 goto out;
815 err = 0;
816 first_block = page->index * blocks_per_page;
817 for (i = 0; i < blocks_per_page; i++) {
818 int group;
819 struct ext4_group_info *grinfo;
821 group = (first_block + i) >> 1;
822 if (group >= EXT4_SB(sb)->s_groups_count)
823 break;
826 * data carry information regarding this
827 * particular group in the format specified
828 * above
831 data = page_address(page) + (i * blocksize);
832 bitmap = bh[group - first_group]->b_data;
835 * We place the buddy block and bitmap block
836 * close together
838 if ((first_block + i) & 1) {
839 /* this is block of buddy */
840 BUG_ON(incore == NULL);
841 mb_debug("put buddy for group %u in page %lu/%x\n",
842 group, page->index, i * blocksize);
843 memset(data, 0xff, blocksize);
844 grinfo = ext4_get_group_info(sb, group);
845 grinfo->bb_fragments = 0;
846 memset(grinfo->bb_counters, 0,
847 sizeof(unsigned short)*(sb->s_blocksize_bits+2));
849 * incore got set to the group block bitmap below
851 ext4_mb_generate_buddy(sb, data, incore, group);
852 incore = NULL;
853 } else {
854 /* this is block of bitmap */
855 BUG_ON(incore != NULL);
856 mb_debug("put bitmap for group %u in page %lu/%x\n",
857 group, page->index, i * blocksize);
859 /* see comments in ext4_mb_put_pa() */
860 ext4_lock_group(sb, group);
861 memcpy(data, bitmap, blocksize);
863 /* mark all preallocated blks used in in-core bitmap */
864 ext4_mb_generate_from_pa(sb, data, group);
865 ext4_unlock_group(sb, group);
867 /* set incore so that the buddy information can be
868 * generated using this
870 incore = data;
873 SetPageUptodate(page);
875 out:
876 if (bh) {
877 for (i = 0; i < groups_per_page && bh[i]; i++)
878 brelse(bh[i]);
879 if (bh != &bhs)
880 kfree(bh);
882 return err;
885 static noinline_for_stack int
886 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
887 struct ext4_buddy *e4b)
889 struct ext4_sb_info *sbi = EXT4_SB(sb);
890 struct inode *inode = sbi->s_buddy_cache;
891 int blocks_per_page;
892 int block;
893 int pnum;
894 int poff;
895 struct page *page;
896 int ret;
898 mb_debug("load group %u\n", group);
900 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
902 e4b->bd_blkbits = sb->s_blocksize_bits;
903 e4b->bd_info = ext4_get_group_info(sb, group);
904 e4b->bd_sb = sb;
905 e4b->bd_group = group;
906 e4b->bd_buddy_page = NULL;
907 e4b->bd_bitmap_page = NULL;
910 * the buddy cache inode stores the block bitmap
911 * and buddy information in consecutive blocks.
912 * So for each group we need two blocks.
914 block = group * 2;
915 pnum = block / blocks_per_page;
916 poff = block % blocks_per_page;
918 /* we could use find_or_create_page(), but it locks page
919 * what we'd like to avoid in fast path ... */
920 page = find_get_page(inode->i_mapping, pnum);
921 if (page == NULL || !PageUptodate(page)) {
922 if (page)
923 page_cache_release(page);
924 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
925 if (page) {
926 BUG_ON(page->mapping != inode->i_mapping);
927 if (!PageUptodate(page)) {
928 ret = ext4_mb_init_cache(page, NULL);
929 if (ret) {
930 unlock_page(page);
931 goto err;
933 mb_cmp_bitmaps(e4b, page_address(page) +
934 (poff * sb->s_blocksize));
936 unlock_page(page);
939 if (page == NULL || !PageUptodate(page)) {
940 ret = -EIO;
941 goto err;
943 e4b->bd_bitmap_page = page;
944 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
945 mark_page_accessed(page);
947 block++;
948 pnum = block / blocks_per_page;
949 poff = block % blocks_per_page;
951 page = find_get_page(inode->i_mapping, pnum);
952 if (page == NULL || !PageUptodate(page)) {
953 if (page)
954 page_cache_release(page);
955 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
956 if (page) {
957 BUG_ON(page->mapping != inode->i_mapping);
958 if (!PageUptodate(page)) {
959 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
960 if (ret) {
961 unlock_page(page);
962 goto err;
965 unlock_page(page);
968 if (page == NULL || !PageUptodate(page)) {
969 ret = -EIO;
970 goto err;
972 e4b->bd_buddy_page = page;
973 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
974 mark_page_accessed(page);
976 BUG_ON(e4b->bd_bitmap_page == NULL);
977 BUG_ON(e4b->bd_buddy_page == NULL);
979 return 0;
981 err:
982 if (e4b->bd_bitmap_page)
983 page_cache_release(e4b->bd_bitmap_page);
984 if (e4b->bd_buddy_page)
985 page_cache_release(e4b->bd_buddy_page);
986 e4b->bd_buddy = NULL;
987 e4b->bd_bitmap = NULL;
988 return ret;
991 static void ext4_mb_release_desc(struct ext4_buddy *e4b)
993 if (e4b->bd_bitmap_page)
994 page_cache_release(e4b->bd_bitmap_page);
995 if (e4b->bd_buddy_page)
996 page_cache_release(e4b->bd_buddy_page);
1000 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1002 int order = 1;
1003 void *bb;
1005 BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1006 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1008 bb = EXT4_MB_BUDDY(e4b);
1009 while (order <= e4b->bd_blkbits + 1) {
1010 block = block >> 1;
1011 if (!mb_test_bit(block, bb)) {
1012 /* this block is part of buddy of order 'order' */
1013 return order;
1015 bb += 1 << (e4b->bd_blkbits - order);
1016 order++;
1018 return 0;
1021 static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1023 __u32 *addr;
1025 len = cur + len;
1026 while (cur < len) {
1027 if ((cur & 31) == 0 && (len - cur) >= 32) {
1028 /* fast path: clear whole word at once */
1029 addr = bm + (cur >> 3);
1030 *addr = 0;
1031 cur += 32;
1032 continue;
1034 mb_clear_bit_atomic(lock, cur, bm);
1035 cur++;
1039 static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1041 __u32 *addr;
1043 len = cur + len;
1044 while (cur < len) {
1045 if ((cur & 31) == 0 && (len - cur) >= 32) {
1046 /* fast path: set whole word at once */
1047 addr = bm + (cur >> 3);
1048 *addr = 0xffffffff;
1049 cur += 32;
1050 continue;
1052 mb_set_bit_atomic(lock, cur, bm);
1053 cur++;
1057 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1058 int first, int count)
1060 int block = 0;
1061 int max = 0;
1062 int order;
1063 void *buddy;
1064 void *buddy2;
1065 struct super_block *sb = e4b->bd_sb;
1067 BUG_ON(first + count > (sb->s_blocksize << 3));
1068 BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1069 mb_check_buddy(e4b);
1070 mb_free_blocks_double(inode, e4b, first, count);
1072 e4b->bd_info->bb_free += count;
1073 if (first < e4b->bd_info->bb_first_free)
1074 e4b->bd_info->bb_first_free = first;
1076 /* let's maintain fragments counter */
1077 if (first != 0)
1078 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1079 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1080 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1081 if (block && max)
1082 e4b->bd_info->bb_fragments--;
1083 else if (!block && !max)
1084 e4b->bd_info->bb_fragments++;
1086 /* let's maintain buddy itself */
1087 while (count-- > 0) {
1088 block = first++;
1089 order = 0;
1091 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1092 ext4_fsblk_t blocknr;
1093 blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1094 blocknr += block;
1095 blocknr +=
1096 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1097 ext4_unlock_group(sb, e4b->bd_group);
1098 ext4_error(sb, __func__, "double-free of inode"
1099 " %lu's block %llu(bit %u in group %u)",
1100 inode ? inode->i_ino : 0, blocknr, block,
1101 e4b->bd_group);
1102 ext4_lock_group(sb, e4b->bd_group);
1104 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1105 e4b->bd_info->bb_counters[order]++;
1107 /* start of the buddy */
1108 buddy = mb_find_buddy(e4b, order, &max);
1110 do {
1111 block &= ~1UL;
1112 if (mb_test_bit(block, buddy) ||
1113 mb_test_bit(block + 1, buddy))
1114 break;
1116 /* both the buddies are free, try to coalesce them */
1117 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1119 if (!buddy2)
1120 break;
1122 if (order > 0) {
1123 /* for special purposes, we don't set
1124 * free bits in bitmap */
1125 mb_set_bit(block, buddy);
1126 mb_set_bit(block + 1, buddy);
1128 e4b->bd_info->bb_counters[order]--;
1129 e4b->bd_info->bb_counters[order]--;
1131 block = block >> 1;
1132 order++;
1133 e4b->bd_info->bb_counters[order]++;
1135 mb_clear_bit(block, buddy2);
1136 buddy = buddy2;
1137 } while (1);
1139 mb_check_buddy(e4b);
1142 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1143 int needed, struct ext4_free_extent *ex)
1145 int next = block;
1146 int max;
1147 int ord;
1148 void *buddy;
1150 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1151 BUG_ON(ex == NULL);
1153 buddy = mb_find_buddy(e4b, order, &max);
1154 BUG_ON(buddy == NULL);
1155 BUG_ON(block >= max);
1156 if (mb_test_bit(block, buddy)) {
1157 ex->fe_len = 0;
1158 ex->fe_start = 0;
1159 ex->fe_group = 0;
1160 return 0;
1163 /* FIXME dorp order completely ? */
1164 if (likely(order == 0)) {
1165 /* find actual order */
1166 order = mb_find_order_for_block(e4b, block);
1167 block = block >> order;
1170 ex->fe_len = 1 << order;
1171 ex->fe_start = block << order;
1172 ex->fe_group = e4b->bd_group;
1174 /* calc difference from given start */
1175 next = next - ex->fe_start;
1176 ex->fe_len -= next;
1177 ex->fe_start += next;
1179 while (needed > ex->fe_len &&
1180 (buddy = mb_find_buddy(e4b, order, &max))) {
1182 if (block + 1 >= max)
1183 break;
1185 next = (block + 1) * (1 << order);
1186 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1187 break;
1189 ord = mb_find_order_for_block(e4b, next);
1191 order = ord;
1192 block = next >> order;
1193 ex->fe_len += 1 << order;
1196 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1197 return ex->fe_len;
1200 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1202 int ord;
1203 int mlen = 0;
1204 int max = 0;
1205 int cur;
1206 int start = ex->fe_start;
1207 int len = ex->fe_len;
1208 unsigned ret = 0;
1209 int len0 = len;
1210 void *buddy;
1212 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1213 BUG_ON(e4b->bd_group != ex->fe_group);
1214 BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1215 mb_check_buddy(e4b);
1216 mb_mark_used_double(e4b, start, len);
1218 e4b->bd_info->bb_free -= len;
1219 if (e4b->bd_info->bb_first_free == start)
1220 e4b->bd_info->bb_first_free += len;
1222 /* let's maintain fragments counter */
1223 if (start != 0)
1224 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1225 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1226 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1227 if (mlen && max)
1228 e4b->bd_info->bb_fragments++;
1229 else if (!mlen && !max)
1230 e4b->bd_info->bb_fragments--;
1232 /* let's maintain buddy itself */
1233 while (len) {
1234 ord = mb_find_order_for_block(e4b, start);
1236 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1237 /* the whole chunk may be allocated at once! */
1238 mlen = 1 << ord;
1239 buddy = mb_find_buddy(e4b, ord, &max);
1240 BUG_ON((start >> ord) >= max);
1241 mb_set_bit(start >> ord, buddy);
1242 e4b->bd_info->bb_counters[ord]--;
1243 start += mlen;
1244 len -= mlen;
1245 BUG_ON(len < 0);
1246 continue;
1249 /* store for history */
1250 if (ret == 0)
1251 ret = len | (ord << 16);
1253 /* we have to split large buddy */
1254 BUG_ON(ord <= 0);
1255 buddy = mb_find_buddy(e4b, ord, &max);
1256 mb_set_bit(start >> ord, buddy);
1257 e4b->bd_info->bb_counters[ord]--;
1259 ord--;
1260 cur = (start >> ord) & ~1U;
1261 buddy = mb_find_buddy(e4b, ord, &max);
1262 mb_clear_bit(cur, buddy);
1263 mb_clear_bit(cur + 1, buddy);
1264 e4b->bd_info->bb_counters[ord]++;
1265 e4b->bd_info->bb_counters[ord]++;
1268 mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1269 EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1270 mb_check_buddy(e4b);
1272 return ret;
1276 * Must be called under group lock!
1278 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1279 struct ext4_buddy *e4b)
1281 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1282 int ret;
1284 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1285 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1287 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1288 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1289 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1291 /* preallocation can change ac_b_ex, thus we store actually
1292 * allocated blocks for history */
1293 ac->ac_f_ex = ac->ac_b_ex;
1295 ac->ac_status = AC_STATUS_FOUND;
1296 ac->ac_tail = ret & 0xffff;
1297 ac->ac_buddy = ret >> 16;
1299 /* XXXXXXX: SUCH A HORRIBLE **CK */
1300 /*FIXME!! Why ? */
1301 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1302 get_page(ac->ac_bitmap_page);
1303 ac->ac_buddy_page = e4b->bd_buddy_page;
1304 get_page(ac->ac_buddy_page);
1306 /* store last allocated for subsequent stream allocation */
1307 if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1308 spin_lock(&sbi->s_md_lock);
1309 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1310 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1311 spin_unlock(&sbi->s_md_lock);
1316 * regular allocator, for general purposes allocation
1319 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1320 struct ext4_buddy *e4b,
1321 int finish_group)
1323 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1324 struct ext4_free_extent *bex = &ac->ac_b_ex;
1325 struct ext4_free_extent *gex = &ac->ac_g_ex;
1326 struct ext4_free_extent ex;
1327 int max;
1329 if (ac->ac_status == AC_STATUS_FOUND)
1330 return;
1332 * We don't want to scan for a whole year
1334 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1335 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1336 ac->ac_status = AC_STATUS_BREAK;
1337 return;
1341 * Haven't found good chunk so far, let's continue
1343 if (bex->fe_len < gex->fe_len)
1344 return;
1346 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1347 && bex->fe_group == e4b->bd_group) {
1348 /* recheck chunk's availability - we don't know
1349 * when it was found (within this lock-unlock
1350 * period or not) */
1351 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1352 if (max >= gex->fe_len) {
1353 ext4_mb_use_best_found(ac, e4b);
1354 return;
1360 * The routine checks whether found extent is good enough. If it is,
1361 * then the extent gets marked used and flag is set to the context
1362 * to stop scanning. Otherwise, the extent is compared with the
1363 * previous found extent and if new one is better, then it's stored
1364 * in the context. Later, the best found extent will be used, if
1365 * mballoc can't find good enough extent.
1367 * FIXME: real allocation policy is to be designed yet!
1369 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1370 struct ext4_free_extent *ex,
1371 struct ext4_buddy *e4b)
1373 struct ext4_free_extent *bex = &ac->ac_b_ex;
1374 struct ext4_free_extent *gex = &ac->ac_g_ex;
1376 BUG_ON(ex->fe_len <= 0);
1377 BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1378 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1379 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1381 ac->ac_found++;
1384 * The special case - take what you catch first
1386 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1387 *bex = *ex;
1388 ext4_mb_use_best_found(ac, e4b);
1389 return;
1393 * Let's check whether the chuck is good enough
1395 if (ex->fe_len == gex->fe_len) {
1396 *bex = *ex;
1397 ext4_mb_use_best_found(ac, e4b);
1398 return;
1402 * If this is first found extent, just store it in the context
1404 if (bex->fe_len == 0) {
1405 *bex = *ex;
1406 return;
1410 * If new found extent is better, store it in the context
1412 if (bex->fe_len < gex->fe_len) {
1413 /* if the request isn't satisfied, any found extent
1414 * larger than previous best one is better */
1415 if (ex->fe_len > bex->fe_len)
1416 *bex = *ex;
1417 } else if (ex->fe_len > gex->fe_len) {
1418 /* if the request is satisfied, then we try to find
1419 * an extent that still satisfy the request, but is
1420 * smaller than previous one */
1421 if (ex->fe_len < bex->fe_len)
1422 *bex = *ex;
1425 ext4_mb_check_limits(ac, e4b, 0);
1428 static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1429 struct ext4_buddy *e4b)
1431 struct ext4_free_extent ex = ac->ac_b_ex;
1432 ext4_group_t group = ex.fe_group;
1433 int max;
1434 int err;
1436 BUG_ON(ex.fe_len <= 0);
1437 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1438 if (err)
1439 return err;
1441 ext4_lock_group(ac->ac_sb, group);
1442 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1444 if (max > 0) {
1445 ac->ac_b_ex = ex;
1446 ext4_mb_use_best_found(ac, e4b);
1449 ext4_unlock_group(ac->ac_sb, group);
1450 ext4_mb_release_desc(e4b);
1452 return 0;
1455 static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1456 struct ext4_buddy *e4b)
1458 ext4_group_t group = ac->ac_g_ex.fe_group;
1459 int max;
1460 int err;
1461 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1462 struct ext4_super_block *es = sbi->s_es;
1463 struct ext4_free_extent ex;
1465 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1466 return 0;
1468 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1469 if (err)
1470 return err;
1472 ext4_lock_group(ac->ac_sb, group);
1473 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1474 ac->ac_g_ex.fe_len, &ex);
1476 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1477 ext4_fsblk_t start;
1479 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1480 ex.fe_start + le32_to_cpu(es->s_first_data_block);
1481 /* use do_div to get remainder (would be 64-bit modulo) */
1482 if (do_div(start, sbi->s_stripe) == 0) {
1483 ac->ac_found++;
1484 ac->ac_b_ex = ex;
1485 ext4_mb_use_best_found(ac, e4b);
1487 } else if (max >= ac->ac_g_ex.fe_len) {
1488 BUG_ON(ex.fe_len <= 0);
1489 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1490 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1491 ac->ac_found++;
1492 ac->ac_b_ex = ex;
1493 ext4_mb_use_best_found(ac, e4b);
1494 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1495 /* Sometimes, caller may want to merge even small
1496 * number of blocks to an existing extent */
1497 BUG_ON(ex.fe_len <= 0);
1498 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1499 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1500 ac->ac_found++;
1501 ac->ac_b_ex = ex;
1502 ext4_mb_use_best_found(ac, e4b);
1504 ext4_unlock_group(ac->ac_sb, group);
1505 ext4_mb_release_desc(e4b);
1507 return 0;
1511 * The routine scans buddy structures (not bitmap!) from given order
1512 * to max order and tries to find big enough chunk to satisfy the req
1514 static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1515 struct ext4_buddy *e4b)
1517 struct super_block *sb = ac->ac_sb;
1518 struct ext4_group_info *grp = e4b->bd_info;
1519 void *buddy;
1520 int i;
1521 int k;
1522 int max;
1524 BUG_ON(ac->ac_2order <= 0);
1525 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1526 if (grp->bb_counters[i] == 0)
1527 continue;
1529 buddy = mb_find_buddy(e4b, i, &max);
1530 BUG_ON(buddy == NULL);
1532 k = mb_find_next_zero_bit(buddy, max, 0);
1533 BUG_ON(k >= max);
1535 ac->ac_found++;
1537 ac->ac_b_ex.fe_len = 1 << i;
1538 ac->ac_b_ex.fe_start = k << i;
1539 ac->ac_b_ex.fe_group = e4b->bd_group;
1541 ext4_mb_use_best_found(ac, e4b);
1543 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1545 if (EXT4_SB(sb)->s_mb_stats)
1546 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1548 break;
1553 * The routine scans the group and measures all found extents.
1554 * In order to optimize scanning, caller must pass number of
1555 * free blocks in the group, so the routine can know upper limit.
1557 static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1558 struct ext4_buddy *e4b)
1560 struct super_block *sb = ac->ac_sb;
1561 void *bitmap = EXT4_MB_BITMAP(e4b);
1562 struct ext4_free_extent ex;
1563 int i;
1564 int free;
1566 free = e4b->bd_info->bb_free;
1567 BUG_ON(free <= 0);
1569 i = e4b->bd_info->bb_first_free;
1571 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1572 i = mb_find_next_zero_bit(bitmap,
1573 EXT4_BLOCKS_PER_GROUP(sb), i);
1574 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1576 * IF we have corrupt bitmap, we won't find any
1577 * free blocks even though group info says we
1578 * we have free blocks
1580 ext4_error(sb, __func__, "%d free blocks as per "
1581 "group info. But bitmap says 0",
1582 free);
1583 break;
1586 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1587 BUG_ON(ex.fe_len <= 0);
1588 if (free < ex.fe_len) {
1589 ext4_error(sb, __func__, "%d free blocks as per "
1590 "group info. But got %d blocks",
1591 free, ex.fe_len);
1593 * The number of free blocks differs. This mostly
1594 * indicate that the bitmap is corrupt. So exit
1595 * without claiming the space.
1597 break;
1600 ext4_mb_measure_extent(ac, &ex, e4b);
1602 i += ex.fe_len;
1603 free -= ex.fe_len;
1606 ext4_mb_check_limits(ac, e4b, 1);
1610 * This is a special case for storages like raid5
1611 * we try to find stripe-aligned chunks for stripe-size requests
1612 * XXX should do so at least for multiples of stripe size as well
1614 static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1615 struct ext4_buddy *e4b)
1617 struct super_block *sb = ac->ac_sb;
1618 struct ext4_sb_info *sbi = EXT4_SB(sb);
1619 void *bitmap = EXT4_MB_BITMAP(e4b);
1620 struct ext4_free_extent ex;
1621 ext4_fsblk_t first_group_block;
1622 ext4_fsblk_t a;
1623 ext4_grpblk_t i;
1624 int max;
1626 BUG_ON(sbi->s_stripe == 0);
1628 /* find first stripe-aligned block in group */
1629 first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1630 + le32_to_cpu(sbi->s_es->s_first_data_block);
1631 a = first_group_block + sbi->s_stripe - 1;
1632 do_div(a, sbi->s_stripe);
1633 i = (a * sbi->s_stripe) - first_group_block;
1635 while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1636 if (!mb_test_bit(i, bitmap)) {
1637 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1638 if (max >= sbi->s_stripe) {
1639 ac->ac_found++;
1640 ac->ac_b_ex = ex;
1641 ext4_mb_use_best_found(ac, e4b);
1642 break;
1645 i += sbi->s_stripe;
1649 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1650 ext4_group_t group, int cr)
1652 unsigned free, fragments;
1653 unsigned i, bits;
1654 struct ext4_group_desc *desc;
1655 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1657 BUG_ON(cr < 0 || cr >= 4);
1658 BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1660 free = grp->bb_free;
1661 fragments = grp->bb_fragments;
1662 if (free == 0)
1663 return 0;
1664 if (fragments == 0)
1665 return 0;
1667 switch (cr) {
1668 case 0:
1669 BUG_ON(ac->ac_2order == 0);
1670 /* If this group is uninitialized, skip it initially */
1671 desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1672 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1673 return 0;
1675 bits = ac->ac_sb->s_blocksize_bits + 1;
1676 for (i = ac->ac_2order; i <= bits; i++)
1677 if (grp->bb_counters[i] > 0)
1678 return 1;
1679 break;
1680 case 1:
1681 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1682 return 1;
1683 break;
1684 case 2:
1685 if (free >= ac->ac_g_ex.fe_len)
1686 return 1;
1687 break;
1688 case 3:
1689 return 1;
1690 default:
1691 BUG();
1694 return 0;
1697 static noinline_for_stack int
1698 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1700 ext4_group_t group;
1701 ext4_group_t i;
1702 int cr;
1703 int err = 0;
1704 int bsbits;
1705 struct ext4_sb_info *sbi;
1706 struct super_block *sb;
1707 struct ext4_buddy e4b;
1708 loff_t size, isize;
1710 sb = ac->ac_sb;
1711 sbi = EXT4_SB(sb);
1712 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1714 /* first, try the goal */
1715 err = ext4_mb_find_by_goal(ac, &e4b);
1716 if (err || ac->ac_status == AC_STATUS_FOUND)
1717 goto out;
1719 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1720 goto out;
1723 * ac->ac2_order is set only if the fe_len is a power of 2
1724 * if ac2_order is set we also set criteria to 0 so that we
1725 * try exact allocation using buddy.
1727 i = fls(ac->ac_g_ex.fe_len);
1728 ac->ac_2order = 0;
1730 * We search using buddy data only if the order of the request
1731 * is greater than equal to the sbi_s_mb_order2_reqs
1732 * You can tune it via /proc/fs/ext4/<partition>/order2_req
1734 if (i >= sbi->s_mb_order2_reqs) {
1736 * This should tell if fe_len is exactly power of 2
1738 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1739 ac->ac_2order = i - 1;
1742 bsbits = ac->ac_sb->s_blocksize_bits;
1743 /* if stream allocation is enabled, use global goal */
1744 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1745 isize = i_size_read(ac->ac_inode) >> bsbits;
1746 if (size < isize)
1747 size = isize;
1749 if (size < sbi->s_mb_stream_request &&
1750 (ac->ac_flags & EXT4_MB_HINT_DATA)) {
1751 /* TBD: may be hot point */
1752 spin_lock(&sbi->s_md_lock);
1753 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1754 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1755 spin_unlock(&sbi->s_md_lock);
1757 /* Let's just scan groups to find more-less suitable blocks */
1758 cr = ac->ac_2order ? 0 : 1;
1760 * cr == 0 try to get exact allocation,
1761 * cr == 3 try to get anything
1763 repeat:
1764 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1765 ac->ac_criteria = cr;
1767 * searching for the right group start
1768 * from the goal value specified
1770 group = ac->ac_g_ex.fe_group;
1772 for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
1773 struct ext4_group_info *grp;
1774 struct ext4_group_desc *desc;
1776 if (group == EXT4_SB(sb)->s_groups_count)
1777 group = 0;
1779 /* quick check to skip empty groups */
1780 grp = ext4_get_group_info(ac->ac_sb, group);
1781 if (grp->bb_free == 0)
1782 continue;
1785 * if the group is already init we check whether it is
1786 * a good group and if not we don't load the buddy
1788 if (EXT4_MB_GRP_NEED_INIT(grp)) {
1790 * we need full data about the group
1791 * to make a good selection
1793 err = ext4_mb_load_buddy(sb, group, &e4b);
1794 if (err)
1795 goto out;
1796 ext4_mb_release_desc(&e4b);
1800 * If the particular group doesn't satisfy our
1801 * criteria we continue with the next group
1803 if (!ext4_mb_good_group(ac, group, cr))
1804 continue;
1806 err = ext4_mb_load_buddy(sb, group, &e4b);
1807 if (err)
1808 goto out;
1810 ext4_lock_group(sb, group);
1811 if (!ext4_mb_good_group(ac, group, cr)) {
1812 /* someone did allocation from this group */
1813 ext4_unlock_group(sb, group);
1814 ext4_mb_release_desc(&e4b);
1815 continue;
1818 ac->ac_groups_scanned++;
1819 desc = ext4_get_group_desc(sb, group, NULL);
1820 if (cr == 0 || (desc->bg_flags &
1821 cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
1822 ac->ac_2order != 0))
1823 ext4_mb_simple_scan_group(ac, &e4b);
1824 else if (cr == 1 &&
1825 ac->ac_g_ex.fe_len == sbi->s_stripe)
1826 ext4_mb_scan_aligned(ac, &e4b);
1827 else
1828 ext4_mb_complex_scan_group(ac, &e4b);
1830 ext4_unlock_group(sb, group);
1831 ext4_mb_release_desc(&e4b);
1833 if (ac->ac_status != AC_STATUS_CONTINUE)
1834 break;
1838 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
1839 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1841 * We've been searching too long. Let's try to allocate
1842 * the best chunk we've found so far
1845 ext4_mb_try_best_found(ac, &e4b);
1846 if (ac->ac_status != AC_STATUS_FOUND) {
1848 * Someone more lucky has already allocated it.
1849 * The only thing we can do is just take first
1850 * found block(s)
1851 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
1853 ac->ac_b_ex.fe_group = 0;
1854 ac->ac_b_ex.fe_start = 0;
1855 ac->ac_b_ex.fe_len = 0;
1856 ac->ac_status = AC_STATUS_CONTINUE;
1857 ac->ac_flags |= EXT4_MB_HINT_FIRST;
1858 cr = 3;
1859 atomic_inc(&sbi->s_mb_lost_chunks);
1860 goto repeat;
1863 out:
1864 return err;
1867 #ifdef EXT4_MB_HISTORY
1868 struct ext4_mb_proc_session {
1869 struct ext4_mb_history *history;
1870 struct super_block *sb;
1871 int start;
1872 int max;
1875 static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
1876 struct ext4_mb_history *hs,
1877 int first)
1879 if (hs == s->history + s->max)
1880 hs = s->history;
1881 if (!first && hs == s->history + s->start)
1882 return NULL;
1883 while (hs->orig.fe_len == 0) {
1884 hs++;
1885 if (hs == s->history + s->max)
1886 hs = s->history;
1887 if (hs == s->history + s->start)
1888 return NULL;
1890 return hs;
1893 static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
1895 struct ext4_mb_proc_session *s = seq->private;
1896 struct ext4_mb_history *hs;
1897 int l = *pos;
1899 if (l == 0)
1900 return SEQ_START_TOKEN;
1901 hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1902 if (!hs)
1903 return NULL;
1904 while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
1905 return hs;
1908 static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
1909 loff_t *pos)
1911 struct ext4_mb_proc_session *s = seq->private;
1912 struct ext4_mb_history *hs = v;
1914 ++*pos;
1915 if (v == SEQ_START_TOKEN)
1916 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1917 else
1918 return ext4_mb_history_skip_empty(s, ++hs, 0);
1921 static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
1923 char buf[25], buf2[25], buf3[25], *fmt;
1924 struct ext4_mb_history *hs = v;
1926 if (v == SEQ_START_TOKEN) {
1927 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
1928 "%-5s %-2s %-5s %-5s %-5s %-6s\n",
1929 "pid", "inode", "original", "goal", "result", "found",
1930 "grps", "cr", "flags", "merge", "tail", "broken");
1931 return 0;
1934 if (hs->op == EXT4_MB_HISTORY_ALLOC) {
1935 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
1936 "%-5u %-5s %-5u %-6u\n";
1937 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
1938 hs->result.fe_start, hs->result.fe_len,
1939 hs->result.fe_logical);
1940 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
1941 hs->orig.fe_start, hs->orig.fe_len,
1942 hs->orig.fe_logical);
1943 sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
1944 hs->goal.fe_start, hs->goal.fe_len,
1945 hs->goal.fe_logical);
1946 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
1947 hs->found, hs->groups, hs->cr, hs->flags,
1948 hs->merged ? "M" : "", hs->tail,
1949 hs->buddy ? 1 << hs->buddy : 0);
1950 } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
1951 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
1952 sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
1953 hs->result.fe_start, hs->result.fe_len,
1954 hs->result.fe_logical);
1955 sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
1956 hs->orig.fe_start, hs->orig.fe_len,
1957 hs->orig.fe_logical);
1958 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
1959 } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
1960 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
1961 hs->result.fe_start, hs->result.fe_len);
1962 seq_printf(seq, "%-5u %-8u %-23s discard\n",
1963 hs->pid, hs->ino, buf2);
1964 } else if (hs->op == EXT4_MB_HISTORY_FREE) {
1965 sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
1966 hs->result.fe_start, hs->result.fe_len);
1967 seq_printf(seq, "%-5u %-8u %-23s free\n",
1968 hs->pid, hs->ino, buf2);
1970 return 0;
1973 static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
1977 static struct seq_operations ext4_mb_seq_history_ops = {
1978 .start = ext4_mb_seq_history_start,
1979 .next = ext4_mb_seq_history_next,
1980 .stop = ext4_mb_seq_history_stop,
1981 .show = ext4_mb_seq_history_show,
1984 static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
1986 struct super_block *sb = PDE(inode)->data;
1987 struct ext4_sb_info *sbi = EXT4_SB(sb);
1988 struct ext4_mb_proc_session *s;
1989 int rc;
1990 int size;
1992 if (unlikely(sbi->s_mb_history == NULL))
1993 return -ENOMEM;
1994 s = kmalloc(sizeof(*s), GFP_KERNEL);
1995 if (s == NULL)
1996 return -ENOMEM;
1997 s->sb = sb;
1998 size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
1999 s->history = kmalloc(size, GFP_KERNEL);
2000 if (s->history == NULL) {
2001 kfree(s);
2002 return -ENOMEM;
2005 spin_lock(&sbi->s_mb_history_lock);
2006 memcpy(s->history, sbi->s_mb_history, size);
2007 s->max = sbi->s_mb_history_max;
2008 s->start = sbi->s_mb_history_cur % s->max;
2009 spin_unlock(&sbi->s_mb_history_lock);
2011 rc = seq_open(file, &ext4_mb_seq_history_ops);
2012 if (rc == 0) {
2013 struct seq_file *m = (struct seq_file *)file->private_data;
2014 m->private = s;
2015 } else {
2016 kfree(s->history);
2017 kfree(s);
2019 return rc;
2023 static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2025 struct seq_file *seq = (struct seq_file *)file->private_data;
2026 struct ext4_mb_proc_session *s = seq->private;
2027 kfree(s->history);
2028 kfree(s);
2029 return seq_release(inode, file);
2032 static ssize_t ext4_mb_seq_history_write(struct file *file,
2033 const char __user *buffer,
2034 size_t count, loff_t *ppos)
2036 struct seq_file *seq = (struct seq_file *)file->private_data;
2037 struct ext4_mb_proc_session *s = seq->private;
2038 struct super_block *sb = s->sb;
2039 char str[32];
2040 int value;
2042 if (count >= sizeof(str)) {
2043 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2044 "mb_history", (int)sizeof(str));
2045 return -EOVERFLOW;
2048 if (copy_from_user(str, buffer, count))
2049 return -EFAULT;
2051 value = simple_strtol(str, NULL, 0);
2052 if (value < 0)
2053 return -ERANGE;
2054 EXT4_SB(sb)->s_mb_history_filter = value;
2056 return count;
2059 static struct file_operations ext4_mb_seq_history_fops = {
2060 .owner = THIS_MODULE,
2061 .open = ext4_mb_seq_history_open,
2062 .read = seq_read,
2063 .write = ext4_mb_seq_history_write,
2064 .llseek = seq_lseek,
2065 .release = ext4_mb_seq_history_release,
2068 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2070 struct super_block *sb = seq->private;
2071 struct ext4_sb_info *sbi = EXT4_SB(sb);
2072 ext4_group_t group;
2074 if (*pos < 0 || *pos >= sbi->s_groups_count)
2075 return NULL;
2077 group = *pos + 1;
2078 return (void *) ((unsigned long) group);
2081 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2083 struct super_block *sb = seq->private;
2084 struct ext4_sb_info *sbi = EXT4_SB(sb);
2085 ext4_group_t group;
2087 ++*pos;
2088 if (*pos < 0 || *pos >= sbi->s_groups_count)
2089 return NULL;
2090 group = *pos + 1;
2091 return (void *) ((unsigned long) group);
2094 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2096 struct super_block *sb = seq->private;
2097 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2098 int i;
2099 int err;
2100 struct ext4_buddy e4b;
2101 struct sg {
2102 struct ext4_group_info info;
2103 unsigned short counters[16];
2104 } sg;
2106 group--;
2107 if (group == 0)
2108 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2109 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2110 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2111 "group", "free", "frags", "first",
2112 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2113 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2115 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2116 sizeof(struct ext4_group_info);
2117 err = ext4_mb_load_buddy(sb, group, &e4b);
2118 if (err) {
2119 seq_printf(seq, "#%-5u: I/O error\n", group);
2120 return 0;
2122 ext4_lock_group(sb, group);
2123 memcpy(&sg, ext4_get_group_info(sb, group), i);
2124 ext4_unlock_group(sb, group);
2125 ext4_mb_release_desc(&e4b);
2127 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2128 sg.info.bb_fragments, sg.info.bb_first_free);
2129 for (i = 0; i <= 13; i++)
2130 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2131 sg.info.bb_counters[i] : 0);
2132 seq_printf(seq, " ]\n");
2134 return 0;
2137 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2141 static struct seq_operations ext4_mb_seq_groups_ops = {
2142 .start = ext4_mb_seq_groups_start,
2143 .next = ext4_mb_seq_groups_next,
2144 .stop = ext4_mb_seq_groups_stop,
2145 .show = ext4_mb_seq_groups_show,
2148 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2150 struct super_block *sb = PDE(inode)->data;
2151 int rc;
2153 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2154 if (rc == 0) {
2155 struct seq_file *m = (struct seq_file *)file->private_data;
2156 m->private = sb;
2158 return rc;
2162 static struct file_operations ext4_mb_seq_groups_fops = {
2163 .owner = THIS_MODULE,
2164 .open = ext4_mb_seq_groups_open,
2165 .read = seq_read,
2166 .llseek = seq_lseek,
2167 .release = seq_release,
2170 static void ext4_mb_history_release(struct super_block *sb)
2172 struct ext4_sb_info *sbi = EXT4_SB(sb);
2174 if (sbi->s_proc != NULL) {
2175 remove_proc_entry("mb_groups", sbi->s_proc);
2176 remove_proc_entry("mb_history", sbi->s_proc);
2178 kfree(sbi->s_mb_history);
2181 static void ext4_mb_history_init(struct super_block *sb)
2183 struct ext4_sb_info *sbi = EXT4_SB(sb);
2184 int i;
2186 if (sbi->s_proc != NULL) {
2187 proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
2188 &ext4_mb_seq_history_fops, sb);
2189 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2190 &ext4_mb_seq_groups_fops, sb);
2193 sbi->s_mb_history_max = 1000;
2194 sbi->s_mb_history_cur = 0;
2195 spin_lock_init(&sbi->s_mb_history_lock);
2196 i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2197 sbi->s_mb_history = kzalloc(i, GFP_KERNEL);
2198 /* if we can't allocate history, then we simple won't use it */
2201 static noinline_for_stack void
2202 ext4_mb_store_history(struct ext4_allocation_context *ac)
2204 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2205 struct ext4_mb_history h;
2207 if (unlikely(sbi->s_mb_history == NULL))
2208 return;
2210 if (!(ac->ac_op & sbi->s_mb_history_filter))
2211 return;
2213 h.op = ac->ac_op;
2214 h.pid = current->pid;
2215 h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2216 h.orig = ac->ac_o_ex;
2217 h.result = ac->ac_b_ex;
2218 h.flags = ac->ac_flags;
2219 h.found = ac->ac_found;
2220 h.groups = ac->ac_groups_scanned;
2221 h.cr = ac->ac_criteria;
2222 h.tail = ac->ac_tail;
2223 h.buddy = ac->ac_buddy;
2224 h.merged = 0;
2225 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2226 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2227 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2228 h.merged = 1;
2229 h.goal = ac->ac_g_ex;
2230 h.result = ac->ac_f_ex;
2233 spin_lock(&sbi->s_mb_history_lock);
2234 memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2235 if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2236 sbi->s_mb_history_cur = 0;
2237 spin_unlock(&sbi->s_mb_history_lock);
2240 #else
2241 #define ext4_mb_history_release(sb)
2242 #define ext4_mb_history_init(sb)
2243 #endif
2246 /* Create and initialize ext4_group_info data for the given group. */
2247 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2248 struct ext4_group_desc *desc)
2250 int i, len;
2251 int metalen = 0;
2252 struct ext4_sb_info *sbi = EXT4_SB(sb);
2253 struct ext4_group_info **meta_group_info;
2256 * First check if this group is the first of a reserved block.
2257 * If it's true, we have to allocate a new table of pointers
2258 * to ext4_group_info structures
2260 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2261 metalen = sizeof(*meta_group_info) <<
2262 EXT4_DESC_PER_BLOCK_BITS(sb);
2263 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2264 if (meta_group_info == NULL) {
2265 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2266 "buddy group\n");
2267 goto exit_meta_group_info;
2269 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2270 meta_group_info;
2274 * calculate needed size. if change bb_counters size,
2275 * don't forget about ext4_mb_generate_buddy()
2277 len = offsetof(typeof(**meta_group_info),
2278 bb_counters[sb->s_blocksize_bits + 2]);
2280 meta_group_info =
2281 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2282 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2284 meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2285 if (meta_group_info[i] == NULL) {
2286 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2287 goto exit_group_info;
2289 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2290 &(meta_group_info[i]->bb_state));
2293 * initialize bb_free to be able to skip
2294 * empty groups without initialization
2296 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2297 meta_group_info[i]->bb_free =
2298 ext4_free_blocks_after_init(sb, group, desc);
2299 } else {
2300 meta_group_info[i]->bb_free =
2301 le16_to_cpu(desc->bg_free_blocks_count);
2304 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2305 meta_group_info[i]->bb_free_root.rb_node = NULL;;
2307 #ifdef DOUBLE_CHECK
2309 struct buffer_head *bh;
2310 meta_group_info[i]->bb_bitmap =
2311 kmalloc(sb->s_blocksize, GFP_KERNEL);
2312 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2313 bh = ext4_read_block_bitmap(sb, group);
2314 BUG_ON(bh == NULL);
2315 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2316 sb->s_blocksize);
2317 put_bh(bh);
2319 #endif
2321 return 0;
2323 exit_group_info:
2324 /* If a meta_group_info table has been allocated, release it now */
2325 if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2326 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2327 exit_meta_group_info:
2328 return -ENOMEM;
2329 } /* ext4_mb_add_groupinfo */
2332 * Add a group to the existing groups.
2333 * This function is used for online resize
2335 int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
2336 struct ext4_group_desc *desc)
2338 struct ext4_sb_info *sbi = EXT4_SB(sb);
2339 struct inode *inode = sbi->s_buddy_cache;
2340 int blocks_per_page;
2341 int block;
2342 int pnum;
2343 struct page *page;
2344 int err;
2346 /* Add group based on group descriptor*/
2347 err = ext4_mb_add_groupinfo(sb, group, desc);
2348 if (err)
2349 return err;
2352 * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
2353 * datas) are set not up to date so that they will be re-initilaized
2354 * during the next call to ext4_mb_load_buddy
2357 /* Set buddy page as not up to date */
2358 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
2359 block = group * 2;
2360 pnum = block / blocks_per_page;
2361 page = find_get_page(inode->i_mapping, pnum);
2362 if (page != NULL) {
2363 ClearPageUptodate(page);
2364 page_cache_release(page);
2367 /* Set bitmap page as not up to date */
2368 block++;
2369 pnum = block / blocks_per_page;
2370 page = find_get_page(inode->i_mapping, pnum);
2371 if (page != NULL) {
2372 ClearPageUptodate(page);
2373 page_cache_release(page);
2376 return 0;
2380 * Update an existing group.
2381 * This function is used for online resize
2383 void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
2385 grp->bb_free += add;
2388 static int ext4_mb_init_backend(struct super_block *sb)
2390 ext4_group_t i;
2391 int metalen;
2392 struct ext4_sb_info *sbi = EXT4_SB(sb);
2393 struct ext4_super_block *es = sbi->s_es;
2394 int num_meta_group_infos;
2395 int num_meta_group_infos_max;
2396 int array_size;
2397 struct ext4_group_info **meta_group_info;
2398 struct ext4_group_desc *desc;
2400 /* This is the number of blocks used by GDT */
2401 num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) -
2402 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2405 * This is the total number of blocks used by GDT including
2406 * the number of reserved blocks for GDT.
2407 * The s_group_info array is allocated with this value
2408 * to allow a clean online resize without a complex
2409 * manipulation of pointer.
2410 * The drawback is the unused memory when no resize
2411 * occurs but it's very low in terms of pages
2412 * (see comments below)
2413 * Need to handle this properly when META_BG resizing is allowed
2415 num_meta_group_infos_max = num_meta_group_infos +
2416 le16_to_cpu(es->s_reserved_gdt_blocks);
2419 * array_size is the size of s_group_info array. We round it
2420 * to the next power of two because this approximation is done
2421 * internally by kmalloc so we can have some more memory
2422 * for free here (e.g. may be used for META_BG resize).
2424 array_size = 1;
2425 while (array_size < sizeof(*sbi->s_group_info) *
2426 num_meta_group_infos_max)
2427 array_size = array_size << 1;
2428 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2429 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2430 * So a two level scheme suffices for now. */
2431 sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
2432 if (sbi->s_group_info == NULL) {
2433 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2434 return -ENOMEM;
2436 sbi->s_buddy_cache = new_inode(sb);
2437 if (sbi->s_buddy_cache == NULL) {
2438 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2439 goto err_freesgi;
2441 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2443 metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2444 for (i = 0; i < num_meta_group_infos; i++) {
2445 if ((i + 1) == num_meta_group_infos)
2446 metalen = sizeof(*meta_group_info) *
2447 (sbi->s_groups_count -
2448 (i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2449 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2450 if (meta_group_info == NULL) {
2451 printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2452 "buddy group\n");
2453 goto err_freemeta;
2455 sbi->s_group_info[i] = meta_group_info;
2458 for (i = 0; i < sbi->s_groups_count; i++) {
2459 desc = ext4_get_group_desc(sb, i, NULL);
2460 if (desc == NULL) {
2461 printk(KERN_ERR
2462 "EXT4-fs: can't read descriptor %u\n", i);
2463 goto err_freebuddy;
2465 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2466 goto err_freebuddy;
2469 return 0;
2471 err_freebuddy:
2472 while (i-- > 0)
2473 kfree(ext4_get_group_info(sb, i));
2474 i = num_meta_group_infos;
2475 err_freemeta:
2476 while (i-- > 0)
2477 kfree(sbi->s_group_info[i]);
2478 iput(sbi->s_buddy_cache);
2479 err_freesgi:
2480 kfree(sbi->s_group_info);
2481 return -ENOMEM;
2484 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2486 struct ext4_sb_info *sbi = EXT4_SB(sb);
2487 unsigned i, j;
2488 unsigned offset;
2489 unsigned max;
2490 int ret;
2492 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2494 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2495 if (sbi->s_mb_offsets == NULL) {
2496 return -ENOMEM;
2499 i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
2500 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2501 if (sbi->s_mb_maxs == NULL) {
2502 kfree(sbi->s_mb_maxs);
2503 return -ENOMEM;
2506 /* order 0 is regular bitmap */
2507 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2508 sbi->s_mb_offsets[0] = 0;
2510 i = 1;
2511 offset = 0;
2512 max = sb->s_blocksize << 2;
2513 do {
2514 sbi->s_mb_offsets[i] = offset;
2515 sbi->s_mb_maxs[i] = max;
2516 offset += 1 << (sb->s_blocksize_bits - i);
2517 max = max >> 1;
2518 i++;
2519 } while (i <= sb->s_blocksize_bits + 1);
2521 /* init file for buddy data */
2522 ret = ext4_mb_init_backend(sb);
2523 if (ret != 0) {
2524 kfree(sbi->s_mb_offsets);
2525 kfree(sbi->s_mb_maxs);
2526 return ret;
2529 spin_lock_init(&sbi->s_md_lock);
2530 spin_lock_init(&sbi->s_bal_lock);
2532 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2533 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2534 sbi->s_mb_stats = MB_DEFAULT_STATS;
2535 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2536 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2537 sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2538 sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2540 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2541 if (sbi->s_locality_groups == NULL) {
2542 kfree(sbi->s_mb_offsets);
2543 kfree(sbi->s_mb_maxs);
2544 return -ENOMEM;
2546 for_each_possible_cpu(i) {
2547 struct ext4_locality_group *lg;
2548 lg = per_cpu_ptr(sbi->s_locality_groups, i);
2549 mutex_init(&lg->lg_mutex);
2550 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2551 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2552 spin_lock_init(&lg->lg_prealloc_lock);
2555 ext4_mb_init_per_dev_proc(sb);
2556 ext4_mb_history_init(sb);
2558 if (sbi->s_journal)
2559 sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2561 printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
2562 return 0;
2565 /* need to called with ext4 group lock (ext4_lock_group) */
2566 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2568 struct ext4_prealloc_space *pa;
2569 struct list_head *cur, *tmp;
2570 int count = 0;
2572 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2573 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2574 list_del(&pa->pa_group_list);
2575 count++;
2576 kmem_cache_free(ext4_pspace_cachep, pa);
2578 if (count)
2579 mb_debug("mballoc: %u PAs left\n", count);
2583 int ext4_mb_release(struct super_block *sb)
2585 ext4_group_t i;
2586 int num_meta_group_infos;
2587 struct ext4_group_info *grinfo;
2588 struct ext4_sb_info *sbi = EXT4_SB(sb);
2590 if (sbi->s_group_info) {
2591 for (i = 0; i < sbi->s_groups_count; i++) {
2592 grinfo = ext4_get_group_info(sb, i);
2593 #ifdef DOUBLE_CHECK
2594 kfree(grinfo->bb_bitmap);
2595 #endif
2596 ext4_lock_group(sb, i);
2597 ext4_mb_cleanup_pa(grinfo);
2598 ext4_unlock_group(sb, i);
2599 kfree(grinfo);
2601 num_meta_group_infos = (sbi->s_groups_count +
2602 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2603 EXT4_DESC_PER_BLOCK_BITS(sb);
2604 for (i = 0; i < num_meta_group_infos; i++)
2605 kfree(sbi->s_group_info[i]);
2606 kfree(sbi->s_group_info);
2608 kfree(sbi->s_mb_offsets);
2609 kfree(sbi->s_mb_maxs);
2610 if (sbi->s_buddy_cache)
2611 iput(sbi->s_buddy_cache);
2612 if (sbi->s_mb_stats) {
2613 printk(KERN_INFO
2614 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2615 atomic_read(&sbi->s_bal_allocated),
2616 atomic_read(&sbi->s_bal_reqs),
2617 atomic_read(&sbi->s_bal_success));
2618 printk(KERN_INFO
2619 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2620 "%u 2^N hits, %u breaks, %u lost\n",
2621 atomic_read(&sbi->s_bal_ex_scanned),
2622 atomic_read(&sbi->s_bal_goals),
2623 atomic_read(&sbi->s_bal_2orders),
2624 atomic_read(&sbi->s_bal_breaks),
2625 atomic_read(&sbi->s_mb_lost_chunks));
2626 printk(KERN_INFO
2627 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2628 sbi->s_mb_buddies_generated++,
2629 sbi->s_mb_generation_time);
2630 printk(KERN_INFO
2631 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2632 atomic_read(&sbi->s_mb_preallocated),
2633 atomic_read(&sbi->s_mb_discarded));
2636 free_percpu(sbi->s_locality_groups);
2637 ext4_mb_history_release(sb);
2638 ext4_mb_destroy_per_dev_proc(sb);
2640 return 0;
2644 * This function is called by the jbd2 layer once the commit has finished,
2645 * so we know we can free the blocks that were released with that commit.
2647 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2649 struct super_block *sb = journal->j_private;
2650 struct ext4_buddy e4b;
2651 struct ext4_group_info *db;
2652 int err, count = 0, count2 = 0;
2653 struct ext4_free_data *entry;
2654 ext4_fsblk_t discard_block;
2655 struct list_head *l, *ltmp;
2657 list_for_each_safe(l, ltmp, &txn->t_private_list) {
2658 entry = list_entry(l, struct ext4_free_data, list);
2660 mb_debug("gonna free %u blocks in group %u (0x%p):",
2661 entry->count, entry->group, entry);
2663 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2664 /* we expect to find existing buddy because it's pinned */
2665 BUG_ON(err != 0);
2667 db = e4b.bd_info;
2668 /* there are blocks to put in buddy to make them really free */
2669 count += entry->count;
2670 count2++;
2671 ext4_lock_group(sb, entry->group);
2672 /* Take it out of per group rb tree */
2673 rb_erase(&entry->node, &(db->bb_free_root));
2674 mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2676 if (!db->bb_free_root.rb_node) {
2677 /* No more items in the per group rb tree
2678 * balance refcounts from ext4_mb_free_metadata()
2680 page_cache_release(e4b.bd_buddy_page);
2681 page_cache_release(e4b.bd_bitmap_page);
2683 ext4_unlock_group(sb, entry->group);
2684 discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
2685 + entry->start_blk
2686 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
2687 trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u", sb->s_id,
2688 (unsigned long long) discard_block, entry->count);
2689 sb_issue_discard(sb, discard_block, entry->count);
2691 kmem_cache_free(ext4_free_ext_cachep, entry);
2692 ext4_mb_release_desc(&e4b);
2695 mb_debug("freed %u blocks in %u structures\n", count, count2);
2698 #define EXT4_MB_STATS_NAME "stats"
2699 #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan"
2700 #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan"
2701 #define EXT4_MB_ORDER2_REQ "order2_req"
2702 #define EXT4_MB_STREAM_REQ "stream_req"
2703 #define EXT4_MB_GROUP_PREALLOC "group_prealloc"
2705 static int ext4_mb_init_per_dev_proc(struct super_block *sb)
2707 #ifdef CONFIG_PROC_FS
2708 mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
2709 struct ext4_sb_info *sbi = EXT4_SB(sb);
2710 struct proc_dir_entry *proc;
2712 if (sbi->s_proc == NULL)
2713 return -EINVAL;
2715 EXT4_PROC_HANDLER(EXT4_MB_STATS_NAME, mb_stats);
2716 EXT4_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, mb_max_to_scan);
2717 EXT4_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, mb_min_to_scan);
2718 EXT4_PROC_HANDLER(EXT4_MB_ORDER2_REQ, mb_order2_reqs);
2719 EXT4_PROC_HANDLER(EXT4_MB_STREAM_REQ, mb_stream_request);
2720 EXT4_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, mb_group_prealloc);
2721 return 0;
2723 err_out:
2724 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2725 remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2726 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2727 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2728 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2729 remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
2730 return -ENOMEM;
2731 #else
2732 return 0;
2733 #endif
2736 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
2738 #ifdef CONFIG_PROC_FS
2739 struct ext4_sb_info *sbi = EXT4_SB(sb);
2741 if (sbi->s_proc == NULL)
2742 return -EINVAL;
2744 remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2745 remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2746 remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2747 remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2748 remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2749 remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
2750 #endif
2751 return 0;
2754 int __init init_ext4_mballoc(void)
2756 ext4_pspace_cachep =
2757 kmem_cache_create("ext4_prealloc_space",
2758 sizeof(struct ext4_prealloc_space),
2759 0, SLAB_RECLAIM_ACCOUNT, NULL);
2760 if (ext4_pspace_cachep == NULL)
2761 return -ENOMEM;
2763 ext4_ac_cachep =
2764 kmem_cache_create("ext4_alloc_context",
2765 sizeof(struct ext4_allocation_context),
2766 0, SLAB_RECLAIM_ACCOUNT, NULL);
2767 if (ext4_ac_cachep == NULL) {
2768 kmem_cache_destroy(ext4_pspace_cachep);
2769 return -ENOMEM;
2772 ext4_free_ext_cachep =
2773 kmem_cache_create("ext4_free_block_extents",
2774 sizeof(struct ext4_free_data),
2775 0, SLAB_RECLAIM_ACCOUNT, NULL);
2776 if (ext4_free_ext_cachep == NULL) {
2777 kmem_cache_destroy(ext4_pspace_cachep);
2778 kmem_cache_destroy(ext4_ac_cachep);
2779 return -ENOMEM;
2781 return 0;
2784 void exit_ext4_mballoc(void)
2786 /* XXX: synchronize_rcu(); */
2787 kmem_cache_destroy(ext4_pspace_cachep);
2788 kmem_cache_destroy(ext4_ac_cachep);
2789 kmem_cache_destroy(ext4_free_ext_cachep);
2794 * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2795 * Returns 0 if success or error code
2797 static noinline_for_stack int
2798 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2799 handle_t *handle, unsigned int reserv_blks)
2801 struct buffer_head *bitmap_bh = NULL;
2802 struct ext4_super_block *es;
2803 struct ext4_group_desc *gdp;
2804 struct buffer_head *gdp_bh;
2805 struct ext4_sb_info *sbi;
2806 struct super_block *sb;
2807 ext4_fsblk_t block;
2808 int err, len;
2810 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2811 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2813 sb = ac->ac_sb;
2814 sbi = EXT4_SB(sb);
2815 es = sbi->s_es;
2818 err = -EIO;
2819 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2820 if (!bitmap_bh)
2821 goto out_err;
2823 err = ext4_journal_get_write_access(handle, bitmap_bh);
2824 if (err)
2825 goto out_err;
2827 err = -EIO;
2828 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2829 if (!gdp)
2830 goto out_err;
2832 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
2833 gdp->bg_free_blocks_count);
2835 err = ext4_journal_get_write_access(handle, gdp_bh);
2836 if (err)
2837 goto out_err;
2839 block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
2840 + ac->ac_b_ex.fe_start
2841 + le32_to_cpu(es->s_first_data_block);
2843 len = ac->ac_b_ex.fe_len;
2844 if (in_range(ext4_block_bitmap(sb, gdp), block, len) ||
2845 in_range(ext4_inode_bitmap(sb, gdp), block, len) ||
2846 in_range(block, ext4_inode_table(sb, gdp),
2847 EXT4_SB(sb)->s_itb_per_group) ||
2848 in_range(block + len - 1, ext4_inode_table(sb, gdp),
2849 EXT4_SB(sb)->s_itb_per_group)) {
2850 ext4_error(sb, __func__,
2851 "Allocating block in system zone - block = %llu",
2852 block);
2853 /* File system mounted not to panic on error
2854 * Fix the bitmap and repeat the block allocation
2855 * We leak some of the blocks here.
2857 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
2858 bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2859 ac->ac_b_ex.fe_len);
2860 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2861 if (!err)
2862 err = -EAGAIN;
2863 goto out_err;
2865 #ifdef AGGRESSIVE_CHECK
2867 int i;
2868 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2869 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2870 bitmap_bh->b_data));
2873 #endif
2874 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
2875 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
2877 spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2878 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2879 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2880 gdp->bg_free_blocks_count =
2881 cpu_to_le16(ext4_free_blocks_after_init(sb,
2882 ac->ac_b_ex.fe_group,
2883 gdp));
2885 le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
2886 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2887 spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2888 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
2890 * Now reduce the dirty block count also. Should not go negative
2892 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2893 /* release all the reserved blocks if non delalloc */
2894 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
2895 else
2896 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
2897 ac->ac_b_ex.fe_len);
2899 if (sbi->s_log_groups_per_flex) {
2900 ext4_group_t flex_group = ext4_flex_group(sbi,
2901 ac->ac_b_ex.fe_group);
2902 spin_lock(sb_bgl_lock(sbi, flex_group));
2903 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
2904 spin_unlock(sb_bgl_lock(sbi, flex_group));
2907 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
2908 if (err)
2909 goto out_err;
2910 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
2912 out_err:
2913 sb->s_dirt = 1;
2914 brelse(bitmap_bh);
2915 return err;
2919 * here we normalize request for locality group
2920 * Group request are normalized to s_strip size if we set the same via mount
2921 * option. If not we set it to s_mb_group_prealloc which can be configured via
2922 * /proc/fs/ext4/<partition>/group_prealloc
2924 * XXX: should we try to preallocate more than the group has now?
2926 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2928 struct super_block *sb = ac->ac_sb;
2929 struct ext4_locality_group *lg = ac->ac_lg;
2931 BUG_ON(lg == NULL);
2932 if (EXT4_SB(sb)->s_stripe)
2933 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
2934 else
2935 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2936 mb_debug("#%u: goal %u blocks for locality group\n",
2937 current->pid, ac->ac_g_ex.fe_len);
2941 * Normalization means making request better in terms of
2942 * size and alignment
2944 static noinline_for_stack void
2945 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2946 struct ext4_allocation_request *ar)
2948 int bsbits, max;
2949 ext4_lblk_t end;
2950 loff_t size, orig_size, start_off;
2951 ext4_lblk_t start, orig_start;
2952 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2953 struct ext4_prealloc_space *pa;
2955 /* do normalize only data requests, metadata requests
2956 do not need preallocation */
2957 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2958 return;
2960 /* sometime caller may want exact blocks */
2961 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2962 return;
2964 /* caller may indicate that preallocation isn't
2965 * required (it's a tail, for example) */
2966 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2967 return;
2969 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2970 ext4_mb_normalize_group_request(ac);
2971 return ;
2974 bsbits = ac->ac_sb->s_blocksize_bits;
2976 /* first, let's learn actual file size
2977 * given current request is allocated */
2978 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2979 size = size << bsbits;
2980 if (size < i_size_read(ac->ac_inode))
2981 size = i_size_read(ac->ac_inode);
2983 /* max size of free chunks */
2984 max = 2 << bsbits;
2986 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
2987 (req <= (size) || max <= (chunk_size))
2989 /* first, try to predict filesize */
2990 /* XXX: should this table be tunable? */
2991 start_off = 0;
2992 if (size <= 16 * 1024) {
2993 size = 16 * 1024;
2994 } else if (size <= 32 * 1024) {
2995 size = 32 * 1024;
2996 } else if (size <= 64 * 1024) {
2997 size = 64 * 1024;
2998 } else if (size <= 128 * 1024) {
2999 size = 128 * 1024;
3000 } else if (size <= 256 * 1024) {
3001 size = 256 * 1024;
3002 } else if (size <= 512 * 1024) {
3003 size = 512 * 1024;
3004 } else if (size <= 1024 * 1024) {
3005 size = 1024 * 1024;
3006 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3007 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3008 (21 - bsbits)) << 21;
3009 size = 2 * 1024 * 1024;
3010 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3011 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3012 (22 - bsbits)) << 22;
3013 size = 4 * 1024 * 1024;
3014 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3015 (8<<20)>>bsbits, max, 8 * 1024)) {
3016 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3017 (23 - bsbits)) << 23;
3018 size = 8 * 1024 * 1024;
3019 } else {
3020 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3021 size = ac->ac_o_ex.fe_len << bsbits;
3023 orig_size = size = size >> bsbits;
3024 orig_start = start = start_off >> bsbits;
3026 /* don't cover already allocated blocks in selected range */
3027 if (ar->pleft && start <= ar->lleft) {
3028 size -= ar->lleft + 1 - start;
3029 start = ar->lleft + 1;
3031 if (ar->pright && start + size - 1 >= ar->lright)
3032 size -= start + size - ar->lright;
3034 end = start + size;
3036 /* check we don't cross already preallocated blocks */
3037 rcu_read_lock();
3038 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3039 ext4_lblk_t pa_end;
3041 if (pa->pa_deleted)
3042 continue;
3043 spin_lock(&pa->pa_lock);
3044 if (pa->pa_deleted) {
3045 spin_unlock(&pa->pa_lock);
3046 continue;
3049 pa_end = pa->pa_lstart + pa->pa_len;
3051 /* PA must not overlap original request */
3052 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3053 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3055 /* skip PA normalized request doesn't overlap with */
3056 if (pa->pa_lstart >= end) {
3057 spin_unlock(&pa->pa_lock);
3058 continue;
3060 if (pa_end <= start) {
3061 spin_unlock(&pa->pa_lock);
3062 continue;
3064 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3066 if (pa_end <= ac->ac_o_ex.fe_logical) {
3067 BUG_ON(pa_end < start);
3068 start = pa_end;
3071 if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3072 BUG_ON(pa->pa_lstart > end);
3073 end = pa->pa_lstart;
3075 spin_unlock(&pa->pa_lock);
3077 rcu_read_unlock();
3078 size = end - start;
3080 /* XXX: extra loop to check we really don't overlap preallocations */
3081 rcu_read_lock();
3082 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3083 ext4_lblk_t pa_end;
3084 spin_lock(&pa->pa_lock);
3085 if (pa->pa_deleted == 0) {
3086 pa_end = pa->pa_lstart + pa->pa_len;
3087 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3089 spin_unlock(&pa->pa_lock);
3091 rcu_read_unlock();
3093 if (start + size <= ac->ac_o_ex.fe_logical &&
3094 start > ac->ac_o_ex.fe_logical) {
3095 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3096 (unsigned long) start, (unsigned long) size,
3097 (unsigned long) ac->ac_o_ex.fe_logical);
3099 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3100 start > ac->ac_o_ex.fe_logical);
3101 BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3103 /* now prepare goal request */
3105 /* XXX: is it better to align blocks WRT to logical
3106 * placement or satisfy big request as is */
3107 ac->ac_g_ex.fe_logical = start;
3108 ac->ac_g_ex.fe_len = size;
3110 /* define goal start in order to merge */
3111 if (ar->pright && (ar->lright == (start + size))) {
3112 /* merge to the right */
3113 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3114 &ac->ac_f_ex.fe_group,
3115 &ac->ac_f_ex.fe_start);
3116 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3118 if (ar->pleft && (ar->lleft + 1 == start)) {
3119 /* merge to the left */
3120 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3121 &ac->ac_f_ex.fe_group,
3122 &ac->ac_f_ex.fe_start);
3123 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3126 mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3127 (unsigned) orig_size, (unsigned) start);
3130 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3132 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3134 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3135 atomic_inc(&sbi->s_bal_reqs);
3136 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3137 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3138 atomic_inc(&sbi->s_bal_success);
3139 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3140 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3141 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3142 atomic_inc(&sbi->s_bal_goals);
3143 if (ac->ac_found > sbi->s_mb_max_to_scan)
3144 atomic_inc(&sbi->s_bal_breaks);
3147 ext4_mb_store_history(ac);
3151 * use blocks preallocated to inode
3153 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3154 struct ext4_prealloc_space *pa)
3156 ext4_fsblk_t start;
3157 ext4_fsblk_t end;
3158 int len;
3160 /* found preallocated blocks, use them */
3161 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3162 end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3163 len = end - start;
3164 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3165 &ac->ac_b_ex.fe_start);
3166 ac->ac_b_ex.fe_len = len;
3167 ac->ac_status = AC_STATUS_FOUND;
3168 ac->ac_pa = pa;
3170 BUG_ON(start < pa->pa_pstart);
3171 BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3172 BUG_ON(pa->pa_free < len);
3173 pa->pa_free -= len;
3175 mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
3179 * use blocks preallocated to locality group
3181 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3182 struct ext4_prealloc_space *pa)
3184 unsigned int len = ac->ac_o_ex.fe_len;
3186 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3187 &ac->ac_b_ex.fe_group,
3188 &ac->ac_b_ex.fe_start);
3189 ac->ac_b_ex.fe_len = len;
3190 ac->ac_status = AC_STATUS_FOUND;
3191 ac->ac_pa = pa;
3193 /* we don't correct pa_pstart or pa_plen here to avoid
3194 * possible race when the group is being loaded concurrently
3195 * instead we correct pa later, after blocks are marked
3196 * in on-disk bitmap -- see ext4_mb_release_context()
3197 * Other CPUs are prevented from allocating from this pa by lg_mutex
3199 mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3203 * Return the prealloc space that have minimal distance
3204 * from the goal block. @cpa is the prealloc
3205 * space that is having currently known minimal distance
3206 * from the goal block.
3208 static struct ext4_prealloc_space *
3209 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3210 struct ext4_prealloc_space *pa,
3211 struct ext4_prealloc_space *cpa)
3213 ext4_fsblk_t cur_distance, new_distance;
3215 if (cpa == NULL) {
3216 atomic_inc(&pa->pa_count);
3217 return pa;
3219 cur_distance = abs(goal_block - cpa->pa_pstart);
3220 new_distance = abs(goal_block - pa->pa_pstart);
3222 if (cur_distance < new_distance)
3223 return cpa;
3225 /* drop the previous reference */
3226 atomic_dec(&cpa->pa_count);
3227 atomic_inc(&pa->pa_count);
3228 return pa;
3232 * search goal blocks in preallocated space
3234 static noinline_for_stack int
3235 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3237 int order, i;
3238 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3239 struct ext4_locality_group *lg;
3240 struct ext4_prealloc_space *pa, *cpa = NULL;
3241 ext4_fsblk_t goal_block;
3243 /* only data can be preallocated */
3244 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3245 return 0;
3247 /* first, try per-file preallocation */
3248 rcu_read_lock();
3249 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3251 /* all fields in this condition don't change,
3252 * so we can skip locking for them */
3253 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3254 ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3255 continue;
3257 /* found preallocated blocks, use them */
3258 spin_lock(&pa->pa_lock);
3259 if (pa->pa_deleted == 0 && pa->pa_free) {
3260 atomic_inc(&pa->pa_count);
3261 ext4_mb_use_inode_pa(ac, pa);
3262 spin_unlock(&pa->pa_lock);
3263 ac->ac_criteria = 10;
3264 rcu_read_unlock();
3265 return 1;
3267 spin_unlock(&pa->pa_lock);
3269 rcu_read_unlock();
3271 /* can we use group allocation? */
3272 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3273 return 0;
3275 /* inode may have no locality group for some reason */
3276 lg = ac->ac_lg;
3277 if (lg == NULL)
3278 return 0;
3279 order = fls(ac->ac_o_ex.fe_len) - 1;
3280 if (order > PREALLOC_TB_SIZE - 1)
3281 /* The max size of hash table is PREALLOC_TB_SIZE */
3282 order = PREALLOC_TB_SIZE - 1;
3284 goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
3285 ac->ac_g_ex.fe_start +
3286 le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3288 * search for the prealloc space that is having
3289 * minimal distance from the goal block.
3291 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3292 rcu_read_lock();
3293 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3294 pa_inode_list) {
3295 spin_lock(&pa->pa_lock);
3296 if (pa->pa_deleted == 0 &&
3297 pa->pa_free >= ac->ac_o_ex.fe_len) {
3299 cpa = ext4_mb_check_group_pa(goal_block,
3300 pa, cpa);
3302 spin_unlock(&pa->pa_lock);
3304 rcu_read_unlock();
3306 if (cpa) {
3307 ext4_mb_use_group_pa(ac, cpa);
3308 ac->ac_criteria = 20;
3309 return 1;
3311 return 0;
3315 * the function goes through all preallocation in this group and marks them
3316 * used in in-core bitmap. buddy must be generated from this bitmap
3317 * Need to be called with ext4 group lock (ext4_lock_group)
3319 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3320 ext4_group_t group)
3322 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3323 struct ext4_prealloc_space *pa;
3324 struct list_head *cur;
3325 ext4_group_t groupnr;
3326 ext4_grpblk_t start;
3327 int preallocated = 0;
3328 int count = 0;
3329 int len;
3331 /* all form of preallocation discards first load group,
3332 * so the only competing code is preallocation use.
3333 * we don't need any locking here
3334 * notice we do NOT ignore preallocations with pa_deleted
3335 * otherwise we could leave used blocks available for
3336 * allocation in buddy when concurrent ext4_mb_put_pa()
3337 * is dropping preallocation
3339 list_for_each(cur, &grp->bb_prealloc_list) {
3340 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3341 spin_lock(&pa->pa_lock);
3342 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3343 &groupnr, &start);
3344 len = pa->pa_len;
3345 spin_unlock(&pa->pa_lock);
3346 if (unlikely(len == 0))
3347 continue;
3348 BUG_ON(groupnr != group);
3349 mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3350 bitmap, start, len);
3351 preallocated += len;
3352 count++;
3354 mb_debug("prellocated %u for group %u\n", preallocated, group);
3357 static void ext4_mb_pa_callback(struct rcu_head *head)
3359 struct ext4_prealloc_space *pa;
3360 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3361 kmem_cache_free(ext4_pspace_cachep, pa);
3365 * drops a reference to preallocated space descriptor
3366 * if this was the last reference and the space is consumed
3368 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3369 struct super_block *sb, struct ext4_prealloc_space *pa)
3371 ext4_group_t grp;
3373 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3374 return;
3376 /* in this short window concurrent discard can set pa_deleted */
3377 spin_lock(&pa->pa_lock);
3378 if (pa->pa_deleted == 1) {
3379 spin_unlock(&pa->pa_lock);
3380 return;
3383 pa->pa_deleted = 1;
3384 spin_unlock(&pa->pa_lock);
3386 /* -1 is to protect from crossing allocation group */
3387 ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3390 * possible race:
3392 * P1 (buddy init) P2 (regular allocation)
3393 * find block B in PA
3394 * copy on-disk bitmap to buddy
3395 * mark B in on-disk bitmap
3396 * drop PA from group
3397 * mark all PAs in buddy
3399 * thus, P1 initializes buddy with B available. to prevent this
3400 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3401 * against that pair
3403 ext4_lock_group(sb, grp);
3404 list_del(&pa->pa_group_list);
3405 ext4_unlock_group(sb, grp);
3407 spin_lock(pa->pa_obj_lock);
3408 list_del_rcu(&pa->pa_inode_list);
3409 spin_unlock(pa->pa_obj_lock);
3411 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3415 * creates new preallocated space for given inode
3417 static noinline_for_stack int
3418 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3420 struct super_block *sb = ac->ac_sb;
3421 struct ext4_prealloc_space *pa;
3422 struct ext4_group_info *grp;
3423 struct ext4_inode_info *ei;
3425 /* preallocate only when found space is larger then requested */
3426 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3427 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3428 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3430 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3431 if (pa == NULL)
3432 return -ENOMEM;
3434 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3435 int winl;
3436 int wins;
3437 int win;
3438 int offs;
3440 /* we can't allocate as much as normalizer wants.
3441 * so, found space must get proper lstart
3442 * to cover original request */
3443 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3444 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3446 /* we're limited by original request in that
3447 * logical block must be covered any way
3448 * winl is window we can move our chunk within */
3449 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3451 /* also, we should cover whole original request */
3452 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3454 /* the smallest one defines real window */
3455 win = min(winl, wins);
3457 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3458 if (offs && offs < win)
3459 win = offs;
3461 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3462 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3463 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3466 /* preallocation can change ac_b_ex, thus we store actually
3467 * allocated blocks for history */
3468 ac->ac_f_ex = ac->ac_b_ex;
3470 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3471 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3472 pa->pa_len = ac->ac_b_ex.fe_len;
3473 pa->pa_free = pa->pa_len;
3474 atomic_set(&pa->pa_count, 1);
3475 spin_lock_init(&pa->pa_lock);
3476 pa->pa_deleted = 0;
3477 pa->pa_linear = 0;
3479 mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3480 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3482 ext4_mb_use_inode_pa(ac, pa);
3483 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3485 ei = EXT4_I(ac->ac_inode);
3486 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3488 pa->pa_obj_lock = &ei->i_prealloc_lock;
3489 pa->pa_inode = ac->ac_inode;
3491 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3492 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3493 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3495 spin_lock(pa->pa_obj_lock);
3496 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3497 spin_unlock(pa->pa_obj_lock);
3499 return 0;
3503 * creates new preallocated space for locality group inodes belongs to
3505 static noinline_for_stack int
3506 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3508 struct super_block *sb = ac->ac_sb;
3509 struct ext4_locality_group *lg;
3510 struct ext4_prealloc_space *pa;
3511 struct ext4_group_info *grp;
3513 /* preallocate only when found space is larger then requested */
3514 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3515 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3516 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3518 BUG_ON(ext4_pspace_cachep == NULL);
3519 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3520 if (pa == NULL)
3521 return -ENOMEM;
3523 /* preallocation can change ac_b_ex, thus we store actually
3524 * allocated blocks for history */
3525 ac->ac_f_ex = ac->ac_b_ex;
3527 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3528 pa->pa_lstart = pa->pa_pstart;
3529 pa->pa_len = ac->ac_b_ex.fe_len;
3530 pa->pa_free = pa->pa_len;
3531 atomic_set(&pa->pa_count, 1);
3532 spin_lock_init(&pa->pa_lock);
3533 INIT_LIST_HEAD(&pa->pa_inode_list);
3534 pa->pa_deleted = 0;
3535 pa->pa_linear = 1;
3537 mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3538 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3540 ext4_mb_use_group_pa(ac, pa);
3541 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3543 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3544 lg = ac->ac_lg;
3545 BUG_ON(lg == NULL);
3547 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3548 pa->pa_inode = NULL;
3550 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3551 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3552 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3555 * We will later add the new pa to the right bucket
3556 * after updating the pa_free in ext4_mb_release_context
3558 return 0;
3561 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3563 int err;
3565 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3566 err = ext4_mb_new_group_pa(ac);
3567 else
3568 err = ext4_mb_new_inode_pa(ac);
3569 return err;
3573 * finds all unused blocks in on-disk bitmap, frees them in
3574 * in-core bitmap and buddy.
3575 * @pa must be unlinked from inode and group lists, so that
3576 * nobody else can find/use it.
3577 * the caller MUST hold group/inode locks.
3578 * TODO: optimize the case when there are no in-core structures yet
3580 static noinline_for_stack int
3581 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3582 struct ext4_prealloc_space *pa,
3583 struct ext4_allocation_context *ac)
3585 struct super_block *sb = e4b->bd_sb;
3586 struct ext4_sb_info *sbi = EXT4_SB(sb);
3587 unsigned int end;
3588 unsigned int next;
3589 ext4_group_t group;
3590 ext4_grpblk_t bit;
3591 sector_t start;
3592 int err = 0;
3593 int free = 0;
3595 BUG_ON(pa->pa_deleted == 0);
3596 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3597 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3598 end = bit + pa->pa_len;
3600 if (ac) {
3601 ac->ac_sb = sb;
3602 ac->ac_inode = pa->pa_inode;
3603 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3606 while (bit < end) {
3607 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3608 if (bit >= end)
3609 break;
3610 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3611 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3612 le32_to_cpu(sbi->s_es->s_first_data_block);
3613 mb_debug(" free preallocated %u/%u in group %u\n",
3614 (unsigned) start, (unsigned) next - bit,
3615 (unsigned) group);
3616 free += next - bit;
3618 if (ac) {
3619 ac->ac_b_ex.fe_group = group;
3620 ac->ac_b_ex.fe_start = bit;
3621 ac->ac_b_ex.fe_len = next - bit;
3622 ac->ac_b_ex.fe_logical = 0;
3623 ext4_mb_store_history(ac);
3626 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3627 bit = next + 1;
3629 if (free != pa->pa_free) {
3630 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3631 pa, (unsigned long) pa->pa_lstart,
3632 (unsigned long) pa->pa_pstart,
3633 (unsigned long) pa->pa_len);
3634 ext4_error(sb, __func__, "free %u, pa_free %u",
3635 free, pa->pa_free);
3637 * pa is already deleted so we use the value obtained
3638 * from the bitmap and continue.
3641 atomic_add(free, &sbi->s_mb_discarded);
3643 return err;
3646 static noinline_for_stack int
3647 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3648 struct ext4_prealloc_space *pa,
3649 struct ext4_allocation_context *ac)
3651 struct super_block *sb = e4b->bd_sb;
3652 ext4_group_t group;
3653 ext4_grpblk_t bit;
3655 if (ac)
3656 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3658 BUG_ON(pa->pa_deleted == 0);
3659 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3660 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3661 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3662 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3664 if (ac) {
3665 ac->ac_sb = sb;
3666 ac->ac_inode = NULL;
3667 ac->ac_b_ex.fe_group = group;
3668 ac->ac_b_ex.fe_start = bit;
3669 ac->ac_b_ex.fe_len = pa->pa_len;
3670 ac->ac_b_ex.fe_logical = 0;
3671 ext4_mb_store_history(ac);
3674 return 0;
3678 * releases all preallocations in given group
3680 * first, we need to decide discard policy:
3681 * - when do we discard
3682 * 1) ENOSPC
3683 * - how many do we discard
3684 * 1) how many requested
3686 static noinline_for_stack int
3687 ext4_mb_discard_group_preallocations(struct super_block *sb,
3688 ext4_group_t group, int needed)
3690 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3691 struct buffer_head *bitmap_bh = NULL;
3692 struct ext4_prealloc_space *pa, *tmp;
3693 struct ext4_allocation_context *ac;
3694 struct list_head list;
3695 struct ext4_buddy e4b;
3696 int err;
3697 int busy = 0;
3698 int free = 0;
3700 mb_debug("discard preallocation for group %u\n", group);
3702 if (list_empty(&grp->bb_prealloc_list))
3703 return 0;
3705 bitmap_bh = ext4_read_block_bitmap(sb, group);
3706 if (bitmap_bh == NULL) {
3707 ext4_error(sb, __func__, "Error in reading block "
3708 "bitmap for %u", group);
3709 return 0;
3712 err = ext4_mb_load_buddy(sb, group, &e4b);
3713 if (err) {
3714 ext4_error(sb, __func__, "Error in loading buddy "
3715 "information for %u", group);
3716 put_bh(bitmap_bh);
3717 return 0;
3720 if (needed == 0)
3721 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3723 INIT_LIST_HEAD(&list);
3724 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3725 repeat:
3726 ext4_lock_group(sb, group);
3727 list_for_each_entry_safe(pa, tmp,
3728 &grp->bb_prealloc_list, pa_group_list) {
3729 spin_lock(&pa->pa_lock);
3730 if (atomic_read(&pa->pa_count)) {
3731 spin_unlock(&pa->pa_lock);
3732 busy = 1;
3733 continue;
3735 if (pa->pa_deleted) {
3736 spin_unlock(&pa->pa_lock);
3737 continue;
3740 /* seems this one can be freed ... */
3741 pa->pa_deleted = 1;
3743 /* we can trust pa_free ... */
3744 free += pa->pa_free;
3746 spin_unlock(&pa->pa_lock);
3748 list_del(&pa->pa_group_list);
3749 list_add(&pa->u.pa_tmp_list, &list);
3752 /* if we still need more blocks and some PAs were used, try again */
3753 if (free < needed && busy) {
3754 busy = 0;
3755 ext4_unlock_group(sb, group);
3757 * Yield the CPU here so that we don't get soft lockup
3758 * in non preempt case.
3760 yield();
3761 goto repeat;
3764 /* found anything to free? */
3765 if (list_empty(&list)) {
3766 BUG_ON(free != 0);
3767 goto out;
3770 /* now free all selected PAs */
3771 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3773 /* remove from object (inode or locality group) */
3774 spin_lock(pa->pa_obj_lock);
3775 list_del_rcu(&pa->pa_inode_list);
3776 spin_unlock(pa->pa_obj_lock);
3778 if (pa->pa_linear)
3779 ext4_mb_release_group_pa(&e4b, pa, ac);
3780 else
3781 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3783 list_del(&pa->u.pa_tmp_list);
3784 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3787 out:
3788 ext4_unlock_group(sb, group);
3789 if (ac)
3790 kmem_cache_free(ext4_ac_cachep, ac);
3791 ext4_mb_release_desc(&e4b);
3792 put_bh(bitmap_bh);
3793 return free;
3797 * releases all non-used preallocated blocks for given inode
3799 * It's important to discard preallocations under i_data_sem
3800 * We don't want another block to be served from the prealloc
3801 * space when we are discarding the inode prealloc space.
3803 * FIXME!! Make sure it is valid at all the call sites
3805 void ext4_discard_preallocations(struct inode *inode)
3807 struct ext4_inode_info *ei = EXT4_I(inode);
3808 struct super_block *sb = inode->i_sb;
3809 struct buffer_head *bitmap_bh = NULL;
3810 struct ext4_prealloc_space *pa, *tmp;
3811 struct ext4_allocation_context *ac;
3812 ext4_group_t group = 0;
3813 struct list_head list;
3814 struct ext4_buddy e4b;
3815 int err;
3817 if (!S_ISREG(inode->i_mode)) {
3818 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3819 return;
3822 mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
3824 INIT_LIST_HEAD(&list);
3826 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3827 repeat:
3828 /* first, collect all pa's in the inode */
3829 spin_lock(&ei->i_prealloc_lock);
3830 while (!list_empty(&ei->i_prealloc_list)) {
3831 pa = list_entry(ei->i_prealloc_list.next,
3832 struct ext4_prealloc_space, pa_inode_list);
3833 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3834 spin_lock(&pa->pa_lock);
3835 if (atomic_read(&pa->pa_count)) {
3836 /* this shouldn't happen often - nobody should
3837 * use preallocation while we're discarding it */
3838 spin_unlock(&pa->pa_lock);
3839 spin_unlock(&ei->i_prealloc_lock);
3840 printk(KERN_ERR "uh-oh! used pa while discarding\n");
3841 WARN_ON(1);
3842 schedule_timeout_uninterruptible(HZ);
3843 goto repeat;
3846 if (pa->pa_deleted == 0) {
3847 pa->pa_deleted = 1;
3848 spin_unlock(&pa->pa_lock);
3849 list_del_rcu(&pa->pa_inode_list);
3850 list_add(&pa->u.pa_tmp_list, &list);
3851 continue;
3854 /* someone is deleting pa right now */
3855 spin_unlock(&pa->pa_lock);
3856 spin_unlock(&ei->i_prealloc_lock);
3858 /* we have to wait here because pa_deleted
3859 * doesn't mean pa is already unlinked from
3860 * the list. as we might be called from
3861 * ->clear_inode() the inode will get freed
3862 * and concurrent thread which is unlinking
3863 * pa from inode's list may access already
3864 * freed memory, bad-bad-bad */
3866 /* XXX: if this happens too often, we can
3867 * add a flag to force wait only in case
3868 * of ->clear_inode(), but not in case of
3869 * regular truncate */
3870 schedule_timeout_uninterruptible(HZ);
3871 goto repeat;
3873 spin_unlock(&ei->i_prealloc_lock);
3875 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3876 BUG_ON(pa->pa_linear != 0);
3877 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3879 err = ext4_mb_load_buddy(sb, group, &e4b);
3880 if (err) {
3881 ext4_error(sb, __func__, "Error in loading buddy "
3882 "information for %u", group);
3883 continue;
3886 bitmap_bh = ext4_read_block_bitmap(sb, group);
3887 if (bitmap_bh == NULL) {
3888 ext4_error(sb, __func__, "Error in reading block "
3889 "bitmap for %u", group);
3890 ext4_mb_release_desc(&e4b);
3891 continue;
3894 ext4_lock_group(sb, group);
3895 list_del(&pa->pa_group_list);
3896 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3897 ext4_unlock_group(sb, group);
3899 ext4_mb_release_desc(&e4b);
3900 put_bh(bitmap_bh);
3902 list_del(&pa->u.pa_tmp_list);
3903 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3905 if (ac)
3906 kmem_cache_free(ext4_ac_cachep, ac);
3910 * finds all preallocated spaces and return blocks being freed to them
3911 * if preallocated space becomes full (no block is used from the space)
3912 * then the function frees space in buddy
3913 * XXX: at the moment, truncate (which is the only way to free blocks)
3914 * discards all preallocations
3916 static void ext4_mb_return_to_preallocation(struct inode *inode,
3917 struct ext4_buddy *e4b,
3918 sector_t block, int count)
3920 BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
3922 #ifdef MB_DEBUG
3923 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3925 struct super_block *sb = ac->ac_sb;
3926 ext4_group_t i;
3928 printk(KERN_ERR "EXT4-fs: Can't allocate:"
3929 " Allocation context details:\n");
3930 printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
3931 ac->ac_status, ac->ac_flags);
3932 printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
3933 "best %lu/%lu/%lu@%lu cr %d\n",
3934 (unsigned long)ac->ac_o_ex.fe_group,
3935 (unsigned long)ac->ac_o_ex.fe_start,
3936 (unsigned long)ac->ac_o_ex.fe_len,
3937 (unsigned long)ac->ac_o_ex.fe_logical,
3938 (unsigned long)ac->ac_g_ex.fe_group,
3939 (unsigned long)ac->ac_g_ex.fe_start,
3940 (unsigned long)ac->ac_g_ex.fe_len,
3941 (unsigned long)ac->ac_g_ex.fe_logical,
3942 (unsigned long)ac->ac_b_ex.fe_group,
3943 (unsigned long)ac->ac_b_ex.fe_start,
3944 (unsigned long)ac->ac_b_ex.fe_len,
3945 (unsigned long)ac->ac_b_ex.fe_logical,
3946 (int)ac->ac_criteria);
3947 printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
3948 ac->ac_found);
3949 printk(KERN_ERR "EXT4-fs: groups: \n");
3950 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
3951 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3952 struct ext4_prealloc_space *pa;
3953 ext4_grpblk_t start;
3954 struct list_head *cur;
3955 ext4_lock_group(sb, i);
3956 list_for_each(cur, &grp->bb_prealloc_list) {
3957 pa = list_entry(cur, struct ext4_prealloc_space,
3958 pa_group_list);
3959 spin_lock(&pa->pa_lock);
3960 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3961 NULL, &start);
3962 spin_unlock(&pa->pa_lock);
3963 printk(KERN_ERR "PA:%lu:%d:%u \n", i,
3964 start, pa->pa_len);
3966 ext4_unlock_group(sb, i);
3968 if (grp->bb_free == 0)
3969 continue;
3970 printk(KERN_ERR "%lu: %d/%d \n",
3971 i, grp->bb_free, grp->bb_fragments);
3973 printk(KERN_ERR "\n");
3975 #else
3976 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3978 return;
3980 #endif
3983 * We use locality group preallocation for small size file. The size of the
3984 * file is determined by the current size or the resulting size after
3985 * allocation which ever is larger
3987 * One can tune this size via /proc/fs/ext4/<partition>/stream_req
3989 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3991 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3992 int bsbits = ac->ac_sb->s_blocksize_bits;
3993 loff_t size, isize;
3995 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3996 return;
3998 size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3999 isize = i_size_read(ac->ac_inode) >> bsbits;
4000 size = max(size, isize);
4002 /* don't use group allocation for large files */
4003 if (size >= sbi->s_mb_stream_request)
4004 return;
4006 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4007 return;
4009 BUG_ON(ac->ac_lg != NULL);
4011 * locality group prealloc space are per cpu. The reason for having
4012 * per cpu locality group is to reduce the contention between block
4013 * request from multiple CPUs.
4015 ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
4017 /* we're going to use group allocation */
4018 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4020 /* serialize all allocations in the group */
4021 mutex_lock(&ac->ac_lg->lg_mutex);
4024 static noinline_for_stack int
4025 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4026 struct ext4_allocation_request *ar)
4028 struct super_block *sb = ar->inode->i_sb;
4029 struct ext4_sb_info *sbi = EXT4_SB(sb);
4030 struct ext4_super_block *es = sbi->s_es;
4031 ext4_group_t group;
4032 unsigned int len;
4033 ext4_fsblk_t goal;
4034 ext4_grpblk_t block;
4036 /* we can't allocate > group size */
4037 len = ar->len;
4039 /* just a dirty hack to filter too big requests */
4040 if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4041 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4043 /* start searching from the goal */
4044 goal = ar->goal;
4045 if (goal < le32_to_cpu(es->s_first_data_block) ||
4046 goal >= ext4_blocks_count(es))
4047 goal = le32_to_cpu(es->s_first_data_block);
4048 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4050 /* set up allocation goals */
4051 ac->ac_b_ex.fe_logical = ar->logical;
4052 ac->ac_b_ex.fe_group = 0;
4053 ac->ac_b_ex.fe_start = 0;
4054 ac->ac_b_ex.fe_len = 0;
4055 ac->ac_status = AC_STATUS_CONTINUE;
4056 ac->ac_groups_scanned = 0;
4057 ac->ac_ex_scanned = 0;
4058 ac->ac_found = 0;
4059 ac->ac_sb = sb;
4060 ac->ac_inode = ar->inode;
4061 ac->ac_o_ex.fe_logical = ar->logical;
4062 ac->ac_o_ex.fe_group = group;
4063 ac->ac_o_ex.fe_start = block;
4064 ac->ac_o_ex.fe_len = len;
4065 ac->ac_g_ex.fe_logical = ar->logical;
4066 ac->ac_g_ex.fe_group = group;
4067 ac->ac_g_ex.fe_start = block;
4068 ac->ac_g_ex.fe_len = len;
4069 ac->ac_f_ex.fe_len = 0;
4070 ac->ac_flags = ar->flags;
4071 ac->ac_2order = 0;
4072 ac->ac_criteria = 0;
4073 ac->ac_pa = NULL;
4074 ac->ac_bitmap_page = NULL;
4075 ac->ac_buddy_page = NULL;
4076 ac->ac_lg = NULL;
4078 /* we have to define context: we'll we work with a file or
4079 * locality group. this is a policy, actually */
4080 ext4_mb_group_or_file(ac);
4082 mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4083 "left: %u/%u, right %u/%u to %swritable\n",
4084 (unsigned) ar->len, (unsigned) ar->logical,
4085 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4086 (unsigned) ar->lleft, (unsigned) ar->pleft,
4087 (unsigned) ar->lright, (unsigned) ar->pright,
4088 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4089 return 0;
4093 static noinline_for_stack void
4094 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4095 struct ext4_locality_group *lg,
4096 int order, int total_entries)
4098 ext4_group_t group = 0;
4099 struct ext4_buddy e4b;
4100 struct list_head discard_list;
4101 struct ext4_prealloc_space *pa, *tmp;
4102 struct ext4_allocation_context *ac;
4104 mb_debug("discard locality group preallocation\n");
4106 INIT_LIST_HEAD(&discard_list);
4107 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4109 spin_lock(&lg->lg_prealloc_lock);
4110 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4111 pa_inode_list) {
4112 spin_lock(&pa->pa_lock);
4113 if (atomic_read(&pa->pa_count)) {
4115 * This is the pa that we just used
4116 * for block allocation. So don't
4117 * free that
4119 spin_unlock(&pa->pa_lock);
4120 continue;
4122 if (pa->pa_deleted) {
4123 spin_unlock(&pa->pa_lock);
4124 continue;
4126 /* only lg prealloc space */
4127 BUG_ON(!pa->pa_linear);
4129 /* seems this one can be freed ... */
4130 pa->pa_deleted = 1;
4131 spin_unlock(&pa->pa_lock);
4133 list_del_rcu(&pa->pa_inode_list);
4134 list_add(&pa->u.pa_tmp_list, &discard_list);
4136 total_entries--;
4137 if (total_entries <= 5) {
4139 * we want to keep only 5 entries
4140 * allowing it to grow to 8. This
4141 * mak sure we don't call discard
4142 * soon for this list.
4144 break;
4147 spin_unlock(&lg->lg_prealloc_lock);
4149 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4151 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4152 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4153 ext4_error(sb, __func__, "Error in loading buddy "
4154 "information for %u", group);
4155 continue;
4157 ext4_lock_group(sb, group);
4158 list_del(&pa->pa_group_list);
4159 ext4_mb_release_group_pa(&e4b, pa, ac);
4160 ext4_unlock_group(sb, group);
4162 ext4_mb_release_desc(&e4b);
4163 list_del(&pa->u.pa_tmp_list);
4164 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4166 if (ac)
4167 kmem_cache_free(ext4_ac_cachep, ac);
4171 * We have incremented pa_count. So it cannot be freed at this
4172 * point. Also we hold lg_mutex. So no parallel allocation is
4173 * possible from this lg. That means pa_free cannot be updated.
4175 * A parallel ext4_mb_discard_group_preallocations is possible.
4176 * which can cause the lg_prealloc_list to be updated.
4179 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4181 int order, added = 0, lg_prealloc_count = 1;
4182 struct super_block *sb = ac->ac_sb;
4183 struct ext4_locality_group *lg = ac->ac_lg;
4184 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4186 order = fls(pa->pa_free) - 1;
4187 if (order > PREALLOC_TB_SIZE - 1)
4188 /* The max size of hash table is PREALLOC_TB_SIZE */
4189 order = PREALLOC_TB_SIZE - 1;
4190 /* Add the prealloc space to lg */
4191 rcu_read_lock();
4192 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4193 pa_inode_list) {
4194 spin_lock(&tmp_pa->pa_lock);
4195 if (tmp_pa->pa_deleted) {
4196 spin_unlock(&pa->pa_lock);
4197 continue;
4199 if (!added && pa->pa_free < tmp_pa->pa_free) {
4200 /* Add to the tail of the previous entry */
4201 list_add_tail_rcu(&pa->pa_inode_list,
4202 &tmp_pa->pa_inode_list);
4203 added = 1;
4205 * we want to count the total
4206 * number of entries in the list
4209 spin_unlock(&tmp_pa->pa_lock);
4210 lg_prealloc_count++;
4212 if (!added)
4213 list_add_tail_rcu(&pa->pa_inode_list,
4214 &lg->lg_prealloc_list[order]);
4215 rcu_read_unlock();
4217 /* Now trim the list to be not more than 8 elements */
4218 if (lg_prealloc_count > 8) {
4219 ext4_mb_discard_lg_preallocations(sb, lg,
4220 order, lg_prealloc_count);
4221 return;
4223 return ;
4227 * release all resource we used in allocation
4229 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4231 struct ext4_prealloc_space *pa = ac->ac_pa;
4232 if (pa) {
4233 if (pa->pa_linear) {
4234 /* see comment in ext4_mb_use_group_pa() */
4235 spin_lock(&pa->pa_lock);
4236 pa->pa_pstart += ac->ac_b_ex.fe_len;
4237 pa->pa_lstart += ac->ac_b_ex.fe_len;
4238 pa->pa_free -= ac->ac_b_ex.fe_len;
4239 pa->pa_len -= ac->ac_b_ex.fe_len;
4240 spin_unlock(&pa->pa_lock);
4242 * We want to add the pa to the right bucket.
4243 * Remove it from the list and while adding
4244 * make sure the list to which we are adding
4245 * doesn't grow big.
4247 if (likely(pa->pa_free)) {
4248 spin_lock(pa->pa_obj_lock);
4249 list_del_rcu(&pa->pa_inode_list);
4250 spin_unlock(pa->pa_obj_lock);
4251 ext4_mb_add_n_trim(ac);
4254 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4256 if (ac->ac_bitmap_page)
4257 page_cache_release(ac->ac_bitmap_page);
4258 if (ac->ac_buddy_page)
4259 page_cache_release(ac->ac_buddy_page);
4260 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4261 mutex_unlock(&ac->ac_lg->lg_mutex);
4262 ext4_mb_collect_stats(ac);
4263 return 0;
4266 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4268 ext4_group_t i;
4269 int ret;
4270 int freed = 0;
4272 for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4273 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4274 freed += ret;
4275 needed -= ret;
4278 return freed;
4282 * Main entry point into mballoc to allocate blocks
4283 * it tries to use preallocation first, then falls back
4284 * to usual allocation
4286 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4287 struct ext4_allocation_request *ar, int *errp)
4289 int freed;
4290 struct ext4_allocation_context *ac = NULL;
4291 struct ext4_sb_info *sbi;
4292 struct super_block *sb;
4293 ext4_fsblk_t block = 0;
4294 unsigned int inquota;
4295 unsigned int reserv_blks = 0;
4297 sb = ar->inode->i_sb;
4298 sbi = EXT4_SB(sb);
4300 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
4302 * With delalloc we already reserved the blocks
4304 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4305 /* let others to free the space */
4306 yield();
4307 ar->len = ar->len >> 1;
4309 if (!ar->len) {
4310 *errp = -ENOSPC;
4311 return 0;
4313 reserv_blks = ar->len;
4315 while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4316 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4317 ar->len--;
4319 if (ar->len == 0) {
4320 *errp = -EDQUOT;
4321 return 0;
4323 inquota = ar->len;
4325 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4326 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4328 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4329 if (!ac) {
4330 ar->len = 0;
4331 *errp = -ENOMEM;
4332 goto out1;
4335 *errp = ext4_mb_initialize_context(ac, ar);
4336 if (*errp) {
4337 ar->len = 0;
4338 goto out2;
4341 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4342 if (!ext4_mb_use_preallocated(ac)) {
4343 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4344 ext4_mb_normalize_request(ac, ar);
4345 repeat:
4346 /* allocate space in core */
4347 ext4_mb_regular_allocator(ac);
4349 /* as we've just preallocated more space than
4350 * user requested orinally, we store allocated
4351 * space in a special descriptor */
4352 if (ac->ac_status == AC_STATUS_FOUND &&
4353 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4354 ext4_mb_new_preallocation(ac);
4357 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4358 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4359 if (*errp == -EAGAIN) {
4360 ac->ac_b_ex.fe_group = 0;
4361 ac->ac_b_ex.fe_start = 0;
4362 ac->ac_b_ex.fe_len = 0;
4363 ac->ac_status = AC_STATUS_CONTINUE;
4364 goto repeat;
4365 } else if (*errp) {
4366 ac->ac_b_ex.fe_len = 0;
4367 ar->len = 0;
4368 ext4_mb_show_ac(ac);
4369 } else {
4370 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4371 ar->len = ac->ac_b_ex.fe_len;
4373 } else {
4374 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4375 if (freed)
4376 goto repeat;
4377 *errp = -ENOSPC;
4378 ac->ac_b_ex.fe_len = 0;
4379 ar->len = 0;
4380 ext4_mb_show_ac(ac);
4383 ext4_mb_release_context(ac);
4385 out2:
4386 kmem_cache_free(ext4_ac_cachep, ac);
4387 out1:
4388 if (ar->len < inquota)
4389 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4391 return block;
4395 * We can merge two free data extents only if the physical blocks
4396 * are contiguous, AND the extents were freed by the same transaction,
4397 * AND the blocks are associated with the same group.
4399 static int can_merge(struct ext4_free_data *entry1,
4400 struct ext4_free_data *entry2)
4402 if ((entry1->t_tid == entry2->t_tid) &&
4403 (entry1->group == entry2->group) &&
4404 ((entry1->start_blk + entry1->count) == entry2->start_blk))
4405 return 1;
4406 return 0;
4409 static noinline_for_stack int
4410 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4411 ext4_group_t group, ext4_grpblk_t block, int count)
4413 struct ext4_group_info *db = e4b->bd_info;
4414 struct super_block *sb = e4b->bd_sb;
4415 struct ext4_sb_info *sbi = EXT4_SB(sb);
4416 struct ext4_free_data *entry, *new_entry;
4417 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4418 struct rb_node *parent = NULL, *new_node;
4420 BUG_ON(!ext4_handle_valid(handle));
4421 BUG_ON(e4b->bd_bitmap_page == NULL);
4422 BUG_ON(e4b->bd_buddy_page == NULL);
4424 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4425 new_entry->start_blk = block;
4426 new_entry->group = group;
4427 new_entry->count = count;
4428 new_entry->t_tid = handle->h_transaction->t_tid;
4429 new_node = &new_entry->node;
4431 ext4_lock_group(sb, group);
4432 if (!*n) {
4433 /* first free block exent. We need to
4434 protect buddy cache from being freed,
4435 * otherwise we'll refresh it from
4436 * on-disk bitmap and lose not-yet-available
4437 * blocks */
4438 page_cache_get(e4b->bd_buddy_page);
4439 page_cache_get(e4b->bd_bitmap_page);
4441 while (*n) {
4442 parent = *n;
4443 entry = rb_entry(parent, struct ext4_free_data, node);
4444 if (block < entry->start_blk)
4445 n = &(*n)->rb_left;
4446 else if (block >= (entry->start_blk + entry->count))
4447 n = &(*n)->rb_right;
4448 else {
4449 ext4_unlock_group(sb, group);
4450 ext4_error(sb, __func__,
4451 "Double free of blocks %d (%d %d)",
4452 block, entry->start_blk, entry->count);
4453 return 0;
4457 rb_link_node(new_node, parent, n);
4458 rb_insert_color(new_node, &db->bb_free_root);
4460 /* Now try to see the extent can be merged to left and right */
4461 node = rb_prev(new_node);
4462 if (node) {
4463 entry = rb_entry(node, struct ext4_free_data, node);
4464 if (can_merge(entry, new_entry)) {
4465 new_entry->start_blk = entry->start_blk;
4466 new_entry->count += entry->count;
4467 rb_erase(node, &(db->bb_free_root));
4468 spin_lock(&sbi->s_md_lock);
4469 list_del(&entry->list);
4470 spin_unlock(&sbi->s_md_lock);
4471 kmem_cache_free(ext4_free_ext_cachep, entry);
4475 node = rb_next(new_node);
4476 if (node) {
4477 entry = rb_entry(node, struct ext4_free_data, node);
4478 if (can_merge(new_entry, entry)) {
4479 new_entry->count += entry->count;
4480 rb_erase(node, &(db->bb_free_root));
4481 spin_lock(&sbi->s_md_lock);
4482 list_del(&entry->list);
4483 spin_unlock(&sbi->s_md_lock);
4484 kmem_cache_free(ext4_free_ext_cachep, entry);
4487 /* Add the extent to transaction's private list */
4488 spin_lock(&sbi->s_md_lock);
4489 list_add(&new_entry->list, &handle->h_transaction->t_private_list);
4490 spin_unlock(&sbi->s_md_lock);
4491 ext4_unlock_group(sb, group);
4492 return 0;
4496 * Main entry point into mballoc to free blocks
4498 void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4499 unsigned long block, unsigned long count,
4500 int metadata, unsigned long *freed)
4502 struct buffer_head *bitmap_bh = NULL;
4503 struct super_block *sb = inode->i_sb;
4504 struct ext4_allocation_context *ac = NULL;
4505 struct ext4_group_desc *gdp;
4506 struct ext4_super_block *es;
4507 unsigned int overflow;
4508 ext4_grpblk_t bit;
4509 struct buffer_head *gd_bh;
4510 ext4_group_t block_group;
4511 struct ext4_sb_info *sbi;
4512 struct ext4_buddy e4b;
4513 int err = 0;
4514 int ret;
4516 *freed = 0;
4518 sbi = EXT4_SB(sb);
4519 es = EXT4_SB(sb)->s_es;
4520 if (block < le32_to_cpu(es->s_first_data_block) ||
4521 block + count < block ||
4522 block + count > ext4_blocks_count(es)) {
4523 ext4_error(sb, __func__,
4524 "Freeing blocks not in datazone - "
4525 "block = %lu, count = %lu", block, count);
4526 goto error_return;
4529 ext4_debug("freeing block %lu\n", block);
4531 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4532 if (ac) {
4533 ac->ac_op = EXT4_MB_HISTORY_FREE;
4534 ac->ac_inode = inode;
4535 ac->ac_sb = sb;
4538 do_more:
4539 overflow = 0;
4540 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4543 * Check to see if we are freeing blocks across a group
4544 * boundary.
4546 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4547 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4548 count -= overflow;
4550 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4551 if (!bitmap_bh) {
4552 err = -EIO;
4553 goto error_return;
4555 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4556 if (!gdp) {
4557 err = -EIO;
4558 goto error_return;
4561 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4562 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4563 in_range(block, ext4_inode_table(sb, gdp),
4564 EXT4_SB(sb)->s_itb_per_group) ||
4565 in_range(block + count - 1, ext4_inode_table(sb, gdp),
4566 EXT4_SB(sb)->s_itb_per_group)) {
4568 ext4_error(sb, __func__,
4569 "Freeing blocks in system zone - "
4570 "Block = %lu, count = %lu", block, count);
4571 /* err = 0. ext4_std_error should be a no op */
4572 goto error_return;
4575 BUFFER_TRACE(bitmap_bh, "getting write access");
4576 err = ext4_journal_get_write_access(handle, bitmap_bh);
4577 if (err)
4578 goto error_return;
4581 * We are about to modify some metadata. Call the journal APIs
4582 * to unshare ->b_data if a currently-committing transaction is
4583 * using it
4585 BUFFER_TRACE(gd_bh, "get_write_access");
4586 err = ext4_journal_get_write_access(handle, gd_bh);
4587 if (err)
4588 goto error_return;
4590 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4591 if (err)
4592 goto error_return;
4594 #ifdef AGGRESSIVE_CHECK
4596 int i;
4597 for (i = 0; i < count; i++)
4598 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4600 #endif
4601 mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4602 bit, count);
4604 /* We dirtied the bitmap block */
4605 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4606 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4608 if (ac) {
4609 ac->ac_b_ex.fe_group = block_group;
4610 ac->ac_b_ex.fe_start = bit;
4611 ac->ac_b_ex.fe_len = count;
4612 ext4_mb_store_history(ac);
4615 if (metadata && ext4_handle_valid(handle)) {
4616 /* blocks being freed are metadata. these blocks shouldn't
4617 * be used until this transaction is committed */
4618 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
4619 } else {
4620 ext4_lock_group(sb, block_group);
4621 mb_free_blocks(inode, &e4b, bit, count);
4622 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4623 ext4_unlock_group(sb, block_group);
4626 spin_lock(sb_bgl_lock(sbi, block_group));
4627 le16_add_cpu(&gdp->bg_free_blocks_count, count);
4628 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4629 spin_unlock(sb_bgl_lock(sbi, block_group));
4630 percpu_counter_add(&sbi->s_freeblocks_counter, count);
4632 if (sbi->s_log_groups_per_flex) {
4633 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4634 spin_lock(sb_bgl_lock(sbi, flex_group));
4635 sbi->s_flex_groups[flex_group].free_blocks += count;
4636 spin_unlock(sb_bgl_lock(sbi, flex_group));
4639 ext4_mb_release_desc(&e4b);
4641 *freed += count;
4643 /* And the group descriptor block */
4644 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4645 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4646 if (!err)
4647 err = ret;
4649 if (overflow && !err) {
4650 block += count;
4651 count = overflow;
4652 put_bh(bitmap_bh);
4653 goto do_more;
4655 sb->s_dirt = 1;
4656 error_return:
4657 brelse(bitmap_bh);
4658 ext4_std_error(sb, err);
4659 if (ac)
4660 kmem_cache_free(ext4_ac_cachep, ac);
4661 return;