HID: uhid: forward output request to user-space
[linux-2.6/libata-dev.git] / fs / reiserfs / bitmap.c
blob4c0c7d163d150c02535cffcad08a1aead02f53d2
1 /*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4 /* Reiserfs block (de)allocator, bitmap-based. */
6 #include <linux/time.h>
7 #include "reiserfs.h"
8 #include <linux/errno.h>
9 #include <linux/buffer_head.h>
10 #include <linux/kernel.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/quotaops.h>
14 #include <linux/seq_file.h>
16 #define PREALLOCATION_SIZE 9
18 /* different reiserfs block allocator options */
20 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
22 #define _ALLOC_concentrating_formatted_nodes 0
23 #define _ALLOC_displacing_large_files 1
24 #define _ALLOC_displacing_new_packing_localities 2
25 #define _ALLOC_old_hashed_relocation 3
26 #define _ALLOC_new_hashed_relocation 4
27 #define _ALLOC_skip_busy 5
28 #define _ALLOC_displace_based_on_dirid 6
29 #define _ALLOC_hashed_formatted_nodes 7
30 #define _ALLOC_old_way 8
31 #define _ALLOC_hundredth_slices 9
32 #define _ALLOC_dirid_groups 10
33 #define _ALLOC_oid_groups 11
34 #define _ALLOC_packing_groups 12
36 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
37 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
38 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
40 #define SET_OPTION(optname) \
41 do { \
42 reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \
43 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
44 } while(0)
45 #define TEST_OPTION(optname, s) \
46 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
48 static inline void get_bit_address(struct super_block *s,
49 b_blocknr_t block,
50 unsigned int *bmap_nr,
51 unsigned int *offset)
53 /* It is in the bitmap block number equal to the block
54 * number divided by the number of bits in a block. */
55 *bmap_nr = block >> (s->s_blocksize_bits + 3);
56 /* Within that bitmap block it is located at bit offset *offset. */
57 *offset = block & ((s->s_blocksize << 3) - 1);
60 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
62 unsigned int bmap, offset;
63 unsigned int bmap_count = reiserfs_bmap_count(s);
65 if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
66 reiserfs_error(s, "vs-4010",
67 "block number is out of range %lu (%u)",
68 block, SB_BLOCK_COUNT(s));
69 return 0;
72 get_bit_address(s, block, &bmap, &offset);
74 /* Old format filesystem? Unlikely, but the bitmaps are all up front so
75 * we need to account for it. */
76 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
77 &(REISERFS_SB(s)->s_properties)))) {
78 b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
79 if (block >= bmap1 &&
80 block <= bmap1 + bmap_count) {
81 reiserfs_error(s, "vs-4019", "bitmap block %lu(%u) "
82 "can't be freed or reused",
83 block, bmap_count);
84 return 0;
86 } else {
87 if (offset == 0) {
88 reiserfs_error(s, "vs-4020", "bitmap block %lu(%u) "
89 "can't be freed or reused",
90 block, bmap_count);
91 return 0;
95 if (bmap >= bmap_count) {
96 reiserfs_error(s, "vs-4030", "bitmap for requested block "
97 "is out of range: block=%lu, bitmap_nr=%u",
98 block, bmap);
99 return 0;
102 if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
103 reiserfs_error(s, "vs-4050", "this is root block (%u), "
104 "it must be busy", SB_ROOT_BLOCK(s));
105 return 0;
108 return 1;
111 /* searches in journal structures for a given block number (bmap, off). If block
112 is found in reiserfs journal it suggests next free block candidate to test. */
113 static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
114 int off, int *next)
116 b_blocknr_t tmp;
118 if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) {
119 if (tmp) { /* hint supplied */
120 *next = tmp;
121 PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
122 } else {
123 (*next) = off + 1; /* inc offset to avoid looping. */
124 PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
126 PROC_INFO_INC(s, scan_bitmap.retry);
127 return 1;
129 return 0;
132 /* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
133 * block; */
134 static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
135 unsigned int bmap_n, int *beg, int boundary,
136 int min, int max, int unfm)
138 struct super_block *s = th->t_super;
139 struct reiserfs_bitmap_info *bi = &SB_AP_BITMAP(s)[bmap_n];
140 struct buffer_head *bh;
141 int end, next;
142 int org = *beg;
144 BUG_ON(!th->t_trans_id);
146 RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
147 "range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
148 PROC_INFO_INC(s, scan_bitmap.bmap);
149 /* this is unclear and lacks comments, explain how journal bitmaps
150 work here for the reader. Convey a sense of the design here. What
151 is a window? */
152 /* - I mean `a window of zero bits' as in description of this function - Zam. */
154 if (!bi) {
155 reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
156 "for bitmap %d", bmap_n);
157 return 0;
160 bh = reiserfs_read_bitmap_block(s, bmap_n);
161 if (bh == NULL)
162 return 0;
164 while (1) {
165 cont:
166 if (bi->free_count < min) {
167 brelse(bh);
168 return 0; // No free blocks in this bitmap
171 /* search for a first zero bit -- beginning of a window */
172 *beg = reiserfs_find_next_zero_le_bit
173 ((unsigned long *)(bh->b_data), boundary, *beg);
175 if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
176 * cannot contain a zero window of minimum size */
177 brelse(bh);
178 return 0;
181 if (unfm && is_block_in_journal(s, bmap_n, *beg, beg))
182 continue;
183 /* first zero bit found; we check next bits */
184 for (end = *beg + 1;; end++) {
185 if (end >= *beg + max || end >= boundary
186 || reiserfs_test_le_bit(end, bh->b_data)) {
187 next = end;
188 break;
190 /* finding the other end of zero bit window requires looking into journal structures (in
191 * case of searching for free blocks for unformatted nodes) */
192 if (unfm && is_block_in_journal(s, bmap_n, end, &next))
193 break;
196 /* now (*beg) points to beginning of zero bits window,
197 * (end) points to one bit after the window end */
198 if (end - *beg >= min) { /* it seems we have found window of proper size */
199 int i;
200 reiserfs_prepare_for_journal(s, bh, 1);
201 /* try to set all blocks used checking are they still free */
202 for (i = *beg; i < end; i++) {
203 /* It seems that we should not check in journal again. */
204 if (reiserfs_test_and_set_le_bit
205 (i, bh->b_data)) {
206 /* bit was set by another process
207 * while we slept in prepare_for_journal() */
208 PROC_INFO_INC(s, scan_bitmap.stolen);
209 if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
210 * if length of this set is more or equal to `min' */
211 end = i;
212 break;
214 /* otherwise we clear all bit were set ... */
215 while (--i >= *beg)
216 reiserfs_clear_le_bit
217 (i, bh->b_data);
218 reiserfs_restore_prepared_buffer(s, bh);
219 *beg = org;
220 /* ... and search again in current block from beginning */
221 goto cont;
224 bi->free_count -= (end - *beg);
225 journal_mark_dirty(th, s, bh);
226 brelse(bh);
228 /* free block count calculation */
229 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
231 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
232 journal_mark_dirty(th, s, SB_BUFFER_WITH_SB(s));
234 return end - (*beg);
235 } else {
236 *beg = next;
241 static int bmap_hash_id(struct super_block *s, u32 id)
243 char *hash_in = NULL;
244 unsigned long hash;
245 unsigned bm;
247 if (id <= 2) {
248 bm = 1;
249 } else {
250 hash_in = (char *)(&id);
251 hash = keyed_hash(hash_in, 4);
252 bm = hash % reiserfs_bmap_count(s);
253 if (!bm)
254 bm = 1;
256 /* this can only be true when SB_BMAP_NR = 1 */
257 if (bm >= reiserfs_bmap_count(s))
258 bm = 0;
259 return bm;
263 * hashes the id and then returns > 0 if the block group for the
264 * corresponding hash is full
266 static inline int block_group_used(struct super_block *s, u32 id)
268 int bm = bmap_hash_id(s, id);
269 struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
271 /* If we don't have cached information on this bitmap block, we're
272 * going to have to load it later anyway. Loading it here allows us
273 * to make a better decision. This favors long-term performance gain
274 * with a better on-disk layout vs. a short term gain of skipping the
275 * read and potentially having a bad placement. */
276 if (info->free_count == UINT_MAX) {
277 struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
278 brelse(bh);
281 if (info->free_count > ((s->s_blocksize << 3) * 60 / 100)) {
282 return 0;
284 return 1;
288 * the packing is returned in disk byte order
290 __le32 reiserfs_choose_packing(struct inode * dir)
292 __le32 packing;
293 if (TEST_OPTION(packing_groups, dir->i_sb)) {
294 u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
296 * some versions of reiserfsck expect packing locality 1 to be
297 * special
299 if (parent_dir == 1 || block_group_used(dir->i_sb, parent_dir))
300 packing = INODE_PKEY(dir)->k_objectid;
301 else
302 packing = INODE_PKEY(dir)->k_dir_id;
303 } else
304 packing = INODE_PKEY(dir)->k_objectid;
305 return packing;
308 /* Tries to find contiguous zero bit window (given size) in given region of
309 * bitmap and place new blocks there. Returns number of allocated blocks. */
310 static int scan_bitmap(struct reiserfs_transaction_handle *th,
311 b_blocknr_t * start, b_blocknr_t finish,
312 int min, int max, int unfm, sector_t file_block)
314 int nr_allocated = 0;
315 struct super_block *s = th->t_super;
316 /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
317 * - Hans, it is not a block number - Zam. */
319 unsigned int bm, off;
320 unsigned int end_bm, end_off;
321 unsigned int off_max = s->s_blocksize << 3;
323 BUG_ON(!th->t_trans_id);
325 PROC_INFO_INC(s, scan_bitmap.call);
326 if (SB_FREE_BLOCKS(s) <= 0)
327 return 0; // No point in looking for more free blocks
329 get_bit_address(s, *start, &bm, &off);
330 get_bit_address(s, finish, &end_bm, &end_off);
331 if (bm > reiserfs_bmap_count(s))
332 return 0;
333 if (end_bm > reiserfs_bmap_count(s))
334 end_bm = reiserfs_bmap_count(s);
336 /* When the bitmap is more than 10% free, anyone can allocate.
337 * When it's less than 10% free, only files that already use the
338 * bitmap are allowed. Once we pass 80% full, this restriction
339 * is lifted.
341 * We do this so that files that grow later still have space close to
342 * their original allocation. This improves locality, and presumably
343 * performance as a result.
345 * This is only an allocation policy and does not make up for getting a
346 * bad hint. Decent hinting must be implemented for this to work well.
348 if (TEST_OPTION(skip_busy, s)
349 && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s) / 20) {
350 for (; bm < end_bm; bm++, off = 0) {
351 if ((off && (!unfm || (file_block != 0)))
352 || SB_AP_BITMAP(s)[bm].free_count >
353 (s->s_blocksize << 3) / 10)
354 nr_allocated =
355 scan_bitmap_block(th, bm, &off, off_max,
356 min, max, unfm);
357 if (nr_allocated)
358 goto ret;
360 /* we know from above that start is a reasonable number */
361 get_bit_address(s, *start, &bm, &off);
364 for (; bm < end_bm; bm++, off = 0) {
365 nr_allocated =
366 scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
367 if (nr_allocated)
368 goto ret;
371 nr_allocated =
372 scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
374 ret:
375 *start = bm * off_max + off;
376 return nr_allocated;
380 static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
381 struct inode *inode, b_blocknr_t block,
382 int for_unformatted)
384 struct super_block *s = th->t_super;
385 struct reiserfs_super_block *rs;
386 struct buffer_head *sbh, *bmbh;
387 struct reiserfs_bitmap_info *apbi;
388 unsigned int nr, offset;
390 BUG_ON(!th->t_trans_id);
392 PROC_INFO_INC(s, free_block);
394 rs = SB_DISK_SUPER_BLOCK(s);
395 sbh = SB_BUFFER_WITH_SB(s);
396 apbi = SB_AP_BITMAP(s);
398 get_bit_address(s, block, &nr, &offset);
400 if (nr >= reiserfs_bmap_count(s)) {
401 reiserfs_error(s, "vs-4075", "block %lu is out of range",
402 block);
403 return;
406 bmbh = reiserfs_read_bitmap_block(s, nr);
407 if (!bmbh)
408 return;
410 reiserfs_prepare_for_journal(s, bmbh, 1);
412 /* clear bit for the given block in bit map */
413 if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
414 reiserfs_error(s, "vs-4080",
415 "block %lu: bit already cleared", block);
417 apbi[nr].free_count++;
418 journal_mark_dirty(th, s, bmbh);
419 brelse(bmbh);
421 reiserfs_prepare_for_journal(s, sbh, 1);
422 /* update super block */
423 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
425 journal_mark_dirty(th, s, sbh);
426 if (for_unformatted)
427 dquot_free_block_nodirty(inode, 1);
430 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
431 struct inode *inode, b_blocknr_t block,
432 int for_unformatted)
434 struct super_block *s = th->t_super;
435 BUG_ON(!th->t_trans_id);
437 RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
438 if (!is_reusable(s, block, 1))
439 return;
441 if (block > sb_block_count(REISERFS_SB(s)->s_rs)) {
442 reiserfs_error(th->t_super, "bitmap-4072",
443 "Trying to free block outside file system "
444 "boundaries (%lu > %lu)",
445 block, sb_block_count(REISERFS_SB(s)->s_rs));
446 return;
448 /* mark it before we clear it, just in case */
449 journal_mark_freed(th, s, block);
450 _reiserfs_free_block(th, inode, block, for_unformatted);
453 /* preallocated blocks don't need to be run through journal_mark_freed */
454 static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle *th,
455 struct inode *inode, b_blocknr_t block)
457 BUG_ON(!th->t_trans_id);
458 RFALSE(!th->t_super,
459 "vs-4060: trying to free block on nonexistent device");
460 if (!is_reusable(th->t_super, block, 1))
461 return;
462 _reiserfs_free_block(th, inode, block, 1);
465 static void __discard_prealloc(struct reiserfs_transaction_handle *th,
466 struct reiserfs_inode_info *ei)
468 unsigned long save = ei->i_prealloc_block;
469 int dirty = 0;
470 struct inode *inode = &ei->vfs_inode;
471 BUG_ON(!th->t_trans_id);
472 #ifdef CONFIG_REISERFS_CHECK
473 if (ei->i_prealloc_count < 0)
474 reiserfs_error(th->t_super, "zam-4001",
475 "inode has negative prealloc blocks count.");
476 #endif
477 while (ei->i_prealloc_count > 0) {
478 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
479 ei->i_prealloc_block++;
480 ei->i_prealloc_count--;
481 dirty = 1;
483 if (dirty)
484 reiserfs_update_sd(th, inode);
485 ei->i_prealloc_block = save;
486 list_del_init(&(ei->i_prealloc_list));
489 /* FIXME: It should be inline function */
490 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
491 struct inode *inode)
493 struct reiserfs_inode_info *ei = REISERFS_I(inode);
494 BUG_ON(!th->t_trans_id);
495 if (ei->i_prealloc_count)
496 __discard_prealloc(th, ei);
499 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
501 struct list_head *plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
503 BUG_ON(!th->t_trans_id);
505 while (!list_empty(plist)) {
506 struct reiserfs_inode_info *ei;
507 ei = list_entry(plist->next, struct reiserfs_inode_info,
508 i_prealloc_list);
509 #ifdef CONFIG_REISERFS_CHECK
510 if (!ei->i_prealloc_count) {
511 reiserfs_error(th->t_super, "zam-4001",
512 "inode is in prealloc list but has "
513 "no preallocated blocks.");
515 #endif
516 __discard_prealloc(th, ei);
520 void reiserfs_init_alloc_options(struct super_block *s)
522 set_bit(_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
523 set_bit(_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
524 set_bit(_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
527 /* block allocator related options are parsed here */
528 int reiserfs_parse_alloc_options(struct super_block *s, char *options)
530 char *this_char, *value;
532 REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
534 while ((this_char = strsep(&options, ":")) != NULL) {
535 if ((value = strchr(this_char, '=')) != NULL)
536 *value++ = 0;
538 if (!strcmp(this_char, "concentrating_formatted_nodes")) {
539 int temp;
540 SET_OPTION(concentrating_formatted_nodes);
541 temp = (value
542 && *value) ? simple_strtoul(value, &value,
543 0) : 10;
544 if (temp <= 0 || temp > 100) {
545 REISERFS_SB(s)->s_alloc_options.border = 10;
546 } else {
547 REISERFS_SB(s)->s_alloc_options.border =
548 100 / temp;
550 continue;
552 if (!strcmp(this_char, "displacing_large_files")) {
553 SET_OPTION(displacing_large_files);
554 REISERFS_SB(s)->s_alloc_options.large_file_size =
555 (value
556 && *value) ? simple_strtoul(value, &value, 0) : 16;
557 continue;
559 if (!strcmp(this_char, "displacing_new_packing_localities")) {
560 SET_OPTION(displacing_new_packing_localities);
561 continue;
564 if (!strcmp(this_char, "old_hashed_relocation")) {
565 SET_OPTION(old_hashed_relocation);
566 continue;
569 if (!strcmp(this_char, "new_hashed_relocation")) {
570 SET_OPTION(new_hashed_relocation);
571 continue;
574 if (!strcmp(this_char, "dirid_groups")) {
575 SET_OPTION(dirid_groups);
576 continue;
578 if (!strcmp(this_char, "oid_groups")) {
579 SET_OPTION(oid_groups);
580 continue;
582 if (!strcmp(this_char, "packing_groups")) {
583 SET_OPTION(packing_groups);
584 continue;
586 if (!strcmp(this_char, "hashed_formatted_nodes")) {
587 SET_OPTION(hashed_formatted_nodes);
588 continue;
591 if (!strcmp(this_char, "skip_busy")) {
592 SET_OPTION(skip_busy);
593 continue;
596 if (!strcmp(this_char, "hundredth_slices")) {
597 SET_OPTION(hundredth_slices);
598 continue;
601 if (!strcmp(this_char, "old_way")) {
602 SET_OPTION(old_way);
603 continue;
606 if (!strcmp(this_char, "displace_based_on_dirid")) {
607 SET_OPTION(displace_based_on_dirid);
608 continue;
611 if (!strcmp(this_char, "preallocmin")) {
612 REISERFS_SB(s)->s_alloc_options.preallocmin =
613 (value
614 && *value) ? simple_strtoul(value, &value, 0) : 4;
615 continue;
618 if (!strcmp(this_char, "preallocsize")) {
619 REISERFS_SB(s)->s_alloc_options.preallocsize =
620 (value
621 && *value) ? simple_strtoul(value, &value,
622 0) :
623 PREALLOCATION_SIZE;
624 continue;
627 reiserfs_warning(s, "zam-4001", "unknown option - %s",
628 this_char);
629 return 1;
632 reiserfs_info(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
633 return 0;
636 static void print_sep(struct seq_file *seq, int *first)
638 if (!*first)
639 seq_puts(seq, ":");
640 else
641 *first = 0;
644 void show_alloc_options(struct seq_file *seq, struct super_block *s)
646 int first = 1;
648 if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) |
649 (1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups)))
650 return;
652 seq_puts(seq, ",alloc=");
654 if (TEST_OPTION(concentrating_formatted_nodes, s)) {
655 print_sep(seq, &first);
656 if (REISERFS_SB(s)->s_alloc_options.border != 10) {
657 seq_printf(seq, "concentrating_formatted_nodes=%d",
658 100 / REISERFS_SB(s)->s_alloc_options.border);
659 } else
660 seq_puts(seq, "concentrating_formatted_nodes");
662 if (TEST_OPTION(displacing_large_files, s)) {
663 print_sep(seq, &first);
664 if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) {
665 seq_printf(seq, "displacing_large_files=%lu",
666 REISERFS_SB(s)->s_alloc_options.large_file_size);
667 } else
668 seq_puts(seq, "displacing_large_files");
670 if (TEST_OPTION(displacing_new_packing_localities, s)) {
671 print_sep(seq, &first);
672 seq_puts(seq, "displacing_new_packing_localities");
674 if (TEST_OPTION(old_hashed_relocation, s)) {
675 print_sep(seq, &first);
676 seq_puts(seq, "old_hashed_relocation");
678 if (TEST_OPTION(new_hashed_relocation, s)) {
679 print_sep(seq, &first);
680 seq_puts(seq, "new_hashed_relocation");
682 if (TEST_OPTION(dirid_groups, s)) {
683 print_sep(seq, &first);
684 seq_puts(seq, "dirid_groups");
686 if (TEST_OPTION(oid_groups, s)) {
687 print_sep(seq, &first);
688 seq_puts(seq, "oid_groups");
690 if (TEST_OPTION(packing_groups, s)) {
691 print_sep(seq, &first);
692 seq_puts(seq, "packing_groups");
694 if (TEST_OPTION(hashed_formatted_nodes, s)) {
695 print_sep(seq, &first);
696 seq_puts(seq, "hashed_formatted_nodes");
698 if (TEST_OPTION(skip_busy, s)) {
699 print_sep(seq, &first);
700 seq_puts(seq, "skip_busy");
702 if (TEST_OPTION(hundredth_slices, s)) {
703 print_sep(seq, &first);
704 seq_puts(seq, "hundredth_slices");
706 if (TEST_OPTION(old_way, s)) {
707 print_sep(seq, &first);
708 seq_puts(seq, "old_way");
710 if (TEST_OPTION(displace_based_on_dirid, s)) {
711 print_sep(seq, &first);
712 seq_puts(seq, "displace_based_on_dirid");
714 if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) {
715 print_sep(seq, &first);
716 seq_printf(seq, "preallocmin=%d",
717 REISERFS_SB(s)->s_alloc_options.preallocmin);
719 if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) {
720 print_sep(seq, &first);
721 seq_printf(seq, "preallocsize=%d",
722 REISERFS_SB(s)->s_alloc_options.preallocsize);
726 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
728 char *hash_in;
729 if (hint->formatted_node) {
730 hash_in = (char *)&hint->key.k_dir_id;
731 } else {
732 if (!hint->inode) {
733 //hint->search_start = hint->beg;
734 hash_in = (char *)&hint->key.k_dir_id;
735 } else
736 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
737 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
738 else
739 hash_in =
740 (char *)(&INODE_PKEY(hint->inode)->k_objectid);
743 hint->search_start =
744 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
748 * Relocation based on dirid, hashing them into a given bitmap block
749 * files. Formatted nodes are unaffected, a separate policy covers them
751 static void dirid_groups(reiserfs_blocknr_hint_t * hint)
753 unsigned long hash;
754 __u32 dirid = 0;
755 int bm = 0;
756 struct super_block *sb = hint->th->t_super;
757 if (hint->inode)
758 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
759 else if (hint->formatted_node)
760 dirid = hint->key.k_dir_id;
762 if (dirid) {
763 bm = bmap_hash_id(sb, dirid);
764 hash = bm * (sb->s_blocksize << 3);
765 /* give a portion of the block group to metadata */
766 if (hint->inode)
767 hash += sb->s_blocksize / 2;
768 hint->search_start = hash;
773 * Relocation based on oid, hashing them into a given bitmap block
774 * files. Formatted nodes are unaffected, a separate policy covers them
776 static void oid_groups(reiserfs_blocknr_hint_t * hint)
778 if (hint->inode) {
779 unsigned long hash;
780 __u32 oid;
781 __u32 dirid;
782 int bm;
784 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
786 /* keep the root dir and it's first set of subdirs close to
787 * the start of the disk
789 if (dirid <= 2)
790 hash = (hint->inode->i_sb->s_blocksize << 3);
791 else {
792 oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
793 bm = bmap_hash_id(hint->inode->i_sb, oid);
794 hash = bm * (hint->inode->i_sb->s_blocksize << 3);
796 hint->search_start = hash;
800 /* returns 1 if it finds an indirect item and gets valid hint info
801 * from it, otherwise 0
803 static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
805 struct treepath *path;
806 struct buffer_head *bh;
807 struct item_head *ih;
808 int pos_in_item;
809 __le32 *item;
810 int ret = 0;
812 if (!hint->path) /* reiserfs code can call this function w/o pointer to path
813 * structure supplied; then we rely on supplied search_start */
814 return 0;
816 path = hint->path;
817 bh = get_last_bh(path);
818 RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor");
819 ih = get_ih(path);
820 pos_in_item = path->pos_in_item;
821 item = get_item(path);
823 hint->search_start = bh->b_blocknr;
825 if (!hint->formatted_node && is_indirect_le_ih(ih)) {
826 /* for indirect item: go to left and look for the first non-hole entry
827 in the indirect item */
828 if (pos_in_item == I_UNFM_NUM(ih))
829 pos_in_item--;
830 // pos_in_item = I_UNFM_NUM (ih) - 1;
831 while (pos_in_item >= 0) {
832 int t = get_block_num(item, pos_in_item);
833 if (t) {
834 hint->search_start = t;
835 ret = 1;
836 break;
838 pos_in_item--;
842 /* does result value fit into specified region? */
843 return ret;
846 /* should be, if formatted node, then try to put on first part of the device
847 specified as number of percent with mount option device, else try to put
848 on last of device. This is not to say it is good code to do so,
849 but the effect should be measured. */
850 static inline void set_border_in_hint(struct super_block *s,
851 reiserfs_blocknr_hint_t * hint)
853 b_blocknr_t border =
854 SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
856 if (hint->formatted_node)
857 hint->end = border - 1;
858 else
859 hint->beg = border;
862 static inline void displace_large_file(reiserfs_blocknr_hint_t * hint)
864 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
865 hint->search_start =
866 hint->beg +
867 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id),
868 4) % (hint->end - hint->beg);
869 else
870 hint->search_start =
871 hint->beg +
872 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid),
873 4) % (hint->end - hint->beg);
876 static inline void hash_formatted_node(reiserfs_blocknr_hint_t * hint)
878 char *hash_in;
880 if (!hint->inode)
881 hash_in = (char *)&hint->key.k_dir_id;
882 else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
883 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
884 else
885 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
887 hint->search_start =
888 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
891 static inline int
892 this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *
893 hint)
895 return hint->block ==
896 REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
899 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
900 static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t * hint)
902 struct in_core_key *key = &hint->key;
904 hint->th->displace_new_blocks = 0;
905 hint->search_start =
906 hint->beg + keyed_hash((char *)(&key->k_objectid),
907 4) % (hint->end - hint->beg);
909 #endif
911 static inline int old_hashed_relocation(reiserfs_blocknr_hint_t * hint)
913 b_blocknr_t border;
914 u32 hash_in;
916 if (hint->formatted_node || hint->inode == NULL) {
917 return 0;
920 hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
921 border =
922 hint->beg + (u32) keyed_hash(((char *)(&hash_in)),
923 4) % (hint->end - hint->beg - 1);
924 if (border > hint->search_start)
925 hint->search_start = border;
927 return 1;
930 static inline int old_way(reiserfs_blocknr_hint_t * hint)
932 b_blocknr_t border;
934 if (hint->formatted_node || hint->inode == NULL) {
935 return 0;
938 border =
939 hint->beg +
940 le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end -
941 hint->beg);
942 if (border > hint->search_start)
943 hint->search_start = border;
945 return 1;
948 static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint)
950 struct in_core_key *key = &hint->key;
951 b_blocknr_t slice_start;
953 slice_start =
954 (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100);
955 if (slice_start > hint->search_start
956 || slice_start + (hint->end / 100) <= hint->search_start) {
957 hint->search_start = slice_start;
961 static void determine_search_start(reiserfs_blocknr_hint_t * hint,
962 int amount_needed)
964 struct super_block *s = hint->th->t_super;
965 int unfm_hint;
967 hint->beg = 0;
968 hint->end = SB_BLOCK_COUNT(s) - 1;
970 /* This is former border algorithm. Now with tunable border offset */
971 if (concentrating_formatted_nodes(s))
972 set_border_in_hint(s, hint);
974 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
975 /* whenever we create a new directory, we displace it. At first we will
976 hash for location, later we might look for a moderately empty place for
977 it */
978 if (displacing_new_packing_localities(s)
979 && hint->th->displace_new_blocks) {
980 displace_new_packing_locality(hint);
982 /* we do not continue determine_search_start,
983 * if new packing locality is being displaced */
984 return;
986 #endif
988 /* all persons should feel encouraged to add more special cases here and
989 * test them */
991 if (displacing_large_files(s) && !hint->formatted_node
992 && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
993 displace_large_file(hint);
994 return;
997 /* if none of our special cases is relevant, use the left neighbor in the
998 tree order of the new node we are allocating for */
999 if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
1000 hash_formatted_node(hint);
1001 return;
1004 unfm_hint = get_left_neighbor(hint);
1006 /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
1007 new blocks are displaced based on directory ID. Also, if suggested search_start
1008 is less than last preallocated block, we start searching from it, assuming that
1009 HDD dataflow is faster in forward direction */
1010 if (TEST_OPTION(old_way, s)) {
1011 if (!hint->formatted_node) {
1012 if (!reiserfs_hashed_relocation(s))
1013 old_way(hint);
1014 else if (!reiserfs_no_unhashed_relocation(s))
1015 old_hashed_relocation(hint);
1017 if (hint->inode
1018 && hint->search_start <
1019 REISERFS_I(hint->inode)->i_prealloc_block)
1020 hint->search_start =
1021 REISERFS_I(hint->inode)->i_prealloc_block;
1023 return;
1026 /* This is an approach proposed by Hans */
1027 if (TEST_OPTION(hundredth_slices, s)
1028 && !(displacing_large_files(s) && !hint->formatted_node)) {
1029 hundredth_slices(hint);
1030 return;
1033 /* old_hashed_relocation only works on unformatted */
1034 if (!unfm_hint && !hint->formatted_node &&
1035 TEST_OPTION(old_hashed_relocation, s)) {
1036 old_hashed_relocation(hint);
1038 /* new_hashed_relocation works with both formatted/unformatted nodes */
1039 if ((!unfm_hint || hint->formatted_node) &&
1040 TEST_OPTION(new_hashed_relocation, s)) {
1041 new_hashed_relocation(hint);
1043 /* dirid grouping works only on unformatted nodes */
1044 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1045 dirid_groups(hint);
1047 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1048 if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1049 dirid_groups(hint);
1051 #endif
1053 /* oid grouping works only on unformatted nodes */
1054 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) {
1055 oid_groups(hint);
1057 return;
1060 static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
1062 /* make minimum size a mount option and benchmark both ways */
1063 /* we preallocate blocks only for regular files, specific size */
1064 /* benchmark preallocating always and see what happens */
1066 hint->prealloc_size = 0;
1068 if (!hint->formatted_node && hint->preallocate) {
1069 if (S_ISREG(hint->inode->i_mode)
1070 && hint->inode->i_size >=
1071 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1072 preallocmin * hint->inode->i_sb->s_blocksize)
1073 hint->prealloc_size =
1074 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1075 preallocsize - 1;
1077 return CARRY_ON;
1080 /* XXX I know it could be merged with upper-level function;
1081 but may be result function would be too complex. */
1082 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
1083 b_blocknr_t * new_blocknrs,
1084 b_blocknr_t start,
1085 b_blocknr_t finish, int min,
1086 int amount_needed,
1087 int prealloc_size)
1089 int rest = amount_needed;
1090 int nr_allocated;
1092 while (rest > 0 && start <= finish) {
1093 nr_allocated = scan_bitmap(hint->th, &start, finish, min,
1094 rest + prealloc_size,
1095 !hint->formatted_node, hint->block);
1097 if (nr_allocated == 0) /* no new blocks allocated, return */
1098 break;
1100 /* fill free_blocknrs array first */
1101 while (rest > 0 && nr_allocated > 0) {
1102 *new_blocknrs++ = start++;
1103 rest--;
1104 nr_allocated--;
1107 /* do we have something to fill prealloc. array also ? */
1108 if (nr_allocated > 0) {
1109 /* it means prealloc_size was greater that 0 and we do preallocation */
1110 list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
1111 &SB_JOURNAL(hint->th->t_super)->
1112 j_prealloc_list);
1113 REISERFS_I(hint->inode)->i_prealloc_block = start;
1114 REISERFS_I(hint->inode)->i_prealloc_count =
1115 nr_allocated;
1116 break;
1120 return (amount_needed - rest);
1123 static inline int blocknrs_and_prealloc_arrays_from_search_start
1124 (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs,
1125 int amount_needed) {
1126 struct super_block *s = hint->th->t_super;
1127 b_blocknr_t start = hint->search_start;
1128 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
1129 int passno = 0;
1130 int nr_allocated = 0;
1132 determine_prealloc_size(hint);
1133 if (!hint->formatted_node) {
1134 int quota_ret;
1135 #ifdef REISERQUOTA_DEBUG
1136 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1137 "reiserquota: allocating %d blocks id=%u",
1138 amount_needed, hint->inode->i_uid);
1139 #endif
1140 quota_ret =
1141 dquot_alloc_block_nodirty(hint->inode, amount_needed);
1142 if (quota_ret) /* Quota exceeded? */
1143 return QUOTA_EXCEEDED;
1144 if (hint->preallocate && hint->prealloc_size) {
1145 #ifdef REISERQUOTA_DEBUG
1146 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1147 "reiserquota: allocating (prealloc) %d blocks id=%u",
1148 hint->prealloc_size, hint->inode->i_uid);
1149 #endif
1150 quota_ret = dquot_prealloc_block_nodirty(hint->inode,
1151 hint->prealloc_size);
1152 if (quota_ret)
1153 hint->preallocate = hint->prealloc_size = 0;
1155 /* for unformatted nodes, force large allocations */
1158 do {
1159 switch (passno++) {
1160 case 0: /* Search from hint->search_start to end of disk */
1161 start = hint->search_start;
1162 finish = SB_BLOCK_COUNT(s) - 1;
1163 break;
1164 case 1: /* Search from hint->beg to hint->search_start */
1165 start = hint->beg;
1166 finish = hint->search_start;
1167 break;
1168 case 2: /* Last chance: Search from 0 to hint->beg */
1169 start = 0;
1170 finish = hint->beg;
1171 break;
1172 default: /* We've tried searching everywhere, not enough space */
1173 /* Free the blocks */
1174 if (!hint->formatted_node) {
1175 #ifdef REISERQUOTA_DEBUG
1176 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1177 "reiserquota: freeing (nospace) %d blocks id=%u",
1178 amount_needed +
1179 hint->prealloc_size -
1180 nr_allocated,
1181 hint->inode->i_uid);
1182 #endif
1183 /* Free not allocated blocks */
1184 dquot_free_block_nodirty(hint->inode,
1185 amount_needed + hint->prealloc_size -
1186 nr_allocated);
1188 while (nr_allocated--)
1189 reiserfs_free_block(hint->th, hint->inode,
1190 new_blocknrs[nr_allocated],
1191 !hint->formatted_node);
1193 return NO_DISK_SPACE;
1195 } while ((nr_allocated += allocate_without_wrapping_disk(hint,
1196 new_blocknrs +
1197 nr_allocated,
1198 start, finish,
1200 amount_needed -
1201 nr_allocated,
1202 hint->
1203 prealloc_size))
1204 < amount_needed);
1205 if (!hint->formatted_node &&
1206 amount_needed + hint->prealloc_size >
1207 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
1208 /* Some of preallocation blocks were not allocated */
1209 #ifdef REISERQUOTA_DEBUG
1210 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1211 "reiserquota: freeing (failed prealloc) %d blocks id=%u",
1212 amount_needed + hint->prealloc_size -
1213 nr_allocated -
1214 REISERFS_I(hint->inode)->i_prealloc_count,
1215 hint->inode->i_uid);
1216 #endif
1217 dquot_free_block_nodirty(hint->inode, amount_needed +
1218 hint->prealloc_size - nr_allocated -
1219 REISERFS_I(hint->inode)->
1220 i_prealloc_count);
1223 return CARRY_ON;
1226 /* grab new blocknrs from preallocated list */
1227 /* return amount still needed after using them */
1228 static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
1229 b_blocknr_t * new_blocknrs,
1230 int amount_needed)
1232 struct inode *inode = hint->inode;
1234 if (REISERFS_I(inode)->i_prealloc_count > 0) {
1235 while (amount_needed) {
1237 *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++;
1238 REISERFS_I(inode)->i_prealloc_count--;
1240 amount_needed--;
1242 if (REISERFS_I(inode)->i_prealloc_count <= 0) {
1243 list_del(&REISERFS_I(inode)->i_prealloc_list);
1244 break;
1248 /* return amount still needed after using preallocated blocks */
1249 return amount_needed;
1252 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have
1253 already reserved */ )
1255 int initial_amount_needed = amount_needed;
1256 int ret;
1257 struct super_block *s = hint->th->t_super;
1259 /* Check if there is enough space, taking into account reserved space */
1260 if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
1261 amount_needed - reserved_by_us)
1262 return NO_DISK_SPACE;
1263 /* should this be if !hint->inode && hint->preallocate? */
1264 /* do you mean hint->formatted_node can be removed ? - Zam */
1265 /* hint->formatted_node cannot be removed because we try to access
1266 inode information here, and there is often no inode assotiated with
1267 metadata allocations - green */
1269 if (!hint->formatted_node && hint->preallocate) {
1270 amount_needed = use_preallocated_list_if_available
1271 (hint, new_blocknrs, amount_needed);
1272 if (amount_needed == 0) /* all blocknrs we need we got from
1273 prealloc. list */
1274 return CARRY_ON;
1275 new_blocknrs += (initial_amount_needed - amount_needed);
1278 /* find search start and save it in hint structure */
1279 determine_search_start(hint, amount_needed);
1280 if (hint->search_start >= SB_BLOCK_COUNT(s))
1281 hint->search_start = SB_BLOCK_COUNT(s) - 1;
1283 /* allocation itself; fill new_blocknrs and preallocation arrays */
1284 ret = blocknrs_and_prealloc_arrays_from_search_start
1285 (hint, new_blocknrs, amount_needed);
1287 /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
1288 * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
1289 * variant) */
1291 if (ret != CARRY_ON) {
1292 while (amount_needed++ < initial_amount_needed) {
1293 reiserfs_free_block(hint->th, hint->inode,
1294 *(--new_blocknrs), 1);
1297 return ret;
1300 void reiserfs_cache_bitmap_metadata(struct super_block *sb,
1301 struct buffer_head *bh,
1302 struct reiserfs_bitmap_info *info)
1304 unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size);
1306 /* The first bit must ALWAYS be 1 */
1307 if (!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data))
1308 reiserfs_error(sb, "reiserfs-2025", "bitmap block %lu is "
1309 "corrupted: first bit must be 1", bh->b_blocknr);
1311 info->free_count = 0;
1313 while (--cur >= (unsigned long *)bh->b_data) {
1314 /* 0 and ~0 are special, we can optimize for them */
1315 if (*cur == 0)
1316 info->free_count += BITS_PER_LONG;
1317 else if (*cur != ~0L) /* A mix, investigate */
1318 info->free_count += BITS_PER_LONG - hweight_long(*cur);
1322 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1323 unsigned int bitmap)
1325 b_blocknr_t block = (sb->s_blocksize << 3) * bitmap;
1326 struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
1327 struct buffer_head *bh;
1329 /* Way old format filesystems had the bitmaps packed up front.
1330 * I doubt there are any of these left, but just in case... */
1331 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
1332 &(REISERFS_SB(sb)->s_properties))))
1333 block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
1334 else if (bitmap == 0)
1335 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1337 reiserfs_write_unlock(sb);
1338 bh = sb_bread(sb, block);
1339 reiserfs_write_lock(sb);
1340 if (bh == NULL)
1341 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1342 "reading failed", __func__, block);
1343 else {
1344 if (buffer_locked(bh)) {
1345 PROC_INFO_INC(sb, scan_bitmap.wait);
1346 reiserfs_write_unlock(sb);
1347 __wait_on_buffer(bh);
1348 reiserfs_write_lock(sb);
1350 BUG_ON(!buffer_uptodate(bh));
1351 BUG_ON(atomic_read(&bh->b_count) == 0);
1353 if (info->free_count == UINT_MAX)
1354 reiserfs_cache_bitmap_metadata(sb, bh, info);
1357 return bh;
1360 int reiserfs_init_bitmap_cache(struct super_block *sb)
1362 struct reiserfs_bitmap_info *bitmap;
1363 unsigned int bmap_nr = reiserfs_bmap_count(sb);
1365 bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
1366 if (bitmap == NULL)
1367 return -ENOMEM;
1369 memset(bitmap, 0xff, sizeof(*bitmap) * bmap_nr);
1371 SB_AP_BITMAP(sb) = bitmap;
1373 return 0;
1376 void reiserfs_free_bitmap_cache(struct super_block *sb)
1378 if (SB_AP_BITMAP(sb)) {
1379 vfree(SB_AP_BITMAP(sb));
1380 SB_AP_BITMAP(sb) = NULL;