Fix gcc 4.5.1 miscompiling drivers/char/i8k.c (again)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nilfs2 / alloc.c
blob3f959f1879d8b813a0ce775af216c5e5cd8df617
1 /*
2 * alloc.c - NILFS dat/inode allocator
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Original code was written by Koji Sato <koji@osrg.net>.
21 * Two allocators were unified by Ryusuke Konishi <ryusuke@osrg.net>,
22 * Amagai Yoshiji <amagai@osrg.net>.
25 #include <linux/types.h>
26 #include <linux/buffer_head.h>
27 #include <linux/fs.h>
28 #include <linux/bitops.h>
29 #include "mdt.h"
30 #include "alloc.h"
33 static inline unsigned long
34 nilfs_palloc_groups_per_desc_block(const struct inode *inode)
36 return (1UL << inode->i_blkbits) /
37 sizeof(struct nilfs_palloc_group_desc);
40 static inline unsigned long
41 nilfs_palloc_groups_count(const struct inode *inode)
43 return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 /* log2(8) */));
46 int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
48 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
50 mi->mi_bgl = kmalloc(sizeof(*mi->mi_bgl), GFP_NOFS);
51 if (!mi->mi_bgl)
52 return -ENOMEM;
54 bgl_lock_init(mi->mi_bgl);
56 nilfs_mdt_set_entry_size(inode, entry_size, 0);
58 mi->mi_blocks_per_group =
59 DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode),
60 mi->mi_entries_per_block) + 1;
61 /* Number of blocks in a group including entry blocks and
62 a bitmap block */
63 mi->mi_blocks_per_desc_block =
64 nilfs_palloc_groups_per_desc_block(inode) *
65 mi->mi_blocks_per_group + 1;
66 /* Number of blocks per descriptor including the
67 descriptor block */
68 return 0;
71 static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
72 unsigned long *offset)
74 __u64 group = nr;
76 *offset = do_div(group, nilfs_palloc_entries_per_group(inode));
77 return group;
80 static unsigned long
81 nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
83 unsigned long desc_block =
84 group / nilfs_palloc_groups_per_desc_block(inode);
85 return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
88 static unsigned long
89 nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
91 unsigned long desc_offset =
92 group % nilfs_palloc_groups_per_desc_block(inode);
93 return nilfs_palloc_desc_blkoff(inode, group) + 1 +
94 desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
97 static unsigned long
98 nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
99 const struct nilfs_palloc_group_desc *desc)
101 unsigned long nfree;
103 spin_lock(nilfs_mdt_bgl_lock(inode, group));
104 nfree = le32_to_cpu(desc->pg_nfrees);
105 spin_unlock(nilfs_mdt_bgl_lock(inode, group));
106 return nfree;
109 static void
110 nilfs_palloc_group_desc_add_entries(struct inode *inode,
111 unsigned long group,
112 struct nilfs_palloc_group_desc *desc,
113 u32 n)
115 spin_lock(nilfs_mdt_bgl_lock(inode, group));
116 le32_add_cpu(&desc->pg_nfrees, n);
117 spin_unlock(nilfs_mdt_bgl_lock(inode, group));
120 static unsigned long
121 nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
123 unsigned long group, group_offset;
125 group = nilfs_palloc_group(inode, nr, &group_offset);
127 return nilfs_palloc_bitmap_blkoff(inode, group) + 1 +
128 group_offset / NILFS_MDT(inode)->mi_entries_per_block;
131 static void nilfs_palloc_desc_block_init(struct inode *inode,
132 struct buffer_head *bh, void *kaddr)
134 struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh);
135 unsigned long n = nilfs_palloc_groups_per_desc_block(inode);
136 __le32 nfrees;
138 nfrees = cpu_to_le32(nilfs_palloc_entries_per_group(inode));
139 while (n-- > 0) {
140 desc->pg_nfrees = nfrees;
141 desc++;
145 static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
146 int create,
147 void (*init_block)(struct inode *,
148 struct buffer_head *,
149 void *),
150 struct buffer_head **bhp,
151 struct nilfs_bh_assoc *prev,
152 spinlock_t *lock)
154 int ret;
156 spin_lock(lock);
157 if (prev->bh && blkoff == prev->blkoff) {
158 get_bh(prev->bh);
159 *bhp = prev->bh;
160 spin_unlock(lock);
161 return 0;
163 spin_unlock(lock);
165 ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
166 if (!ret) {
167 spin_lock(lock);
169 * The following code must be safe for change of the
170 * cache contents during the get block call.
172 brelse(prev->bh);
173 get_bh(*bhp);
174 prev->bh = *bhp;
175 prev->blkoff = blkoff;
176 spin_unlock(lock);
178 return ret;
181 static int nilfs_palloc_get_desc_block(struct inode *inode,
182 unsigned long group,
183 int create, struct buffer_head **bhp)
185 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
187 return nilfs_palloc_get_block(inode,
188 nilfs_palloc_desc_blkoff(inode, group),
189 create, nilfs_palloc_desc_block_init,
190 bhp, &cache->prev_desc, &cache->lock);
193 static int nilfs_palloc_get_bitmap_block(struct inode *inode,
194 unsigned long group,
195 int create, struct buffer_head **bhp)
197 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
199 return nilfs_palloc_get_block(inode,
200 nilfs_palloc_bitmap_blkoff(inode, group),
201 create, NULL, bhp,
202 &cache->prev_bitmap, &cache->lock);
205 int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
206 int create, struct buffer_head **bhp)
208 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
210 return nilfs_palloc_get_block(inode,
211 nilfs_palloc_entry_blkoff(inode, nr),
212 create, NULL, bhp,
213 &cache->prev_entry, &cache->lock);
216 static struct nilfs_palloc_group_desc *
217 nilfs_palloc_block_get_group_desc(const struct inode *inode,
218 unsigned long group,
219 const struct buffer_head *bh, void *kaddr)
221 return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) +
222 group % nilfs_palloc_groups_per_desc_block(inode);
225 void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
226 const struct buffer_head *bh, void *kaddr)
228 unsigned long entry_offset, group_offset;
230 nilfs_palloc_group(inode, nr, &group_offset);
231 entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block;
233 return kaddr + bh_offset(bh) +
234 entry_offset * NILFS_MDT(inode)->mi_entry_size;
237 static int nilfs_palloc_find_available_slot(struct inode *inode,
238 unsigned long group,
239 unsigned long target,
240 unsigned char *bitmap,
241 int bsize) /* size in bits */
243 int curr, pos, end, i;
245 if (target > 0) {
246 end = (target + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1);
247 if (end > bsize)
248 end = bsize;
249 pos = nilfs_find_next_zero_bit(bitmap, end, target);
250 if (pos < end &&
251 !nilfs_set_bit_atomic(
252 nilfs_mdt_bgl_lock(inode, group), pos, bitmap))
253 return pos;
254 } else
255 end = 0;
257 for (i = 0, curr = end;
258 i < bsize;
259 i += BITS_PER_LONG, curr += BITS_PER_LONG) {
260 /* wrap around */
261 if (curr >= bsize)
262 curr = 0;
263 while (*((unsigned long *)bitmap + curr / BITS_PER_LONG)
264 != ~0UL) {
265 end = curr + BITS_PER_LONG;
266 if (end > bsize)
267 end = bsize;
268 pos = nilfs_find_next_zero_bit(bitmap, end, curr);
269 if ((pos < end) &&
270 !nilfs_set_bit_atomic(
271 nilfs_mdt_bgl_lock(inode, group), pos,
272 bitmap))
273 return pos;
276 return -ENOSPC;
279 static unsigned long
280 nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
281 unsigned long curr, unsigned long max)
283 return min_t(unsigned long,
284 nilfs_palloc_groups_per_desc_block(inode) -
285 curr % nilfs_palloc_groups_per_desc_block(inode),
286 max - curr + 1);
289 int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
290 struct nilfs_palloc_req *req)
292 struct buffer_head *desc_bh, *bitmap_bh;
293 struct nilfs_palloc_group_desc *desc;
294 unsigned char *bitmap;
295 void *desc_kaddr, *bitmap_kaddr;
296 unsigned long group, maxgroup, ngroups;
297 unsigned long group_offset, maxgroup_offset;
298 unsigned long n, entries_per_group, groups_per_desc_block;
299 unsigned long i, j;
300 int pos, ret;
302 ngroups = nilfs_palloc_groups_count(inode);
303 maxgroup = ngroups - 1;
304 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
305 entries_per_group = nilfs_palloc_entries_per_group(inode);
306 groups_per_desc_block = nilfs_palloc_groups_per_desc_block(inode);
308 for (i = 0; i < ngroups; i += n) {
309 if (group >= ngroups) {
310 /* wrap around */
311 group = 0;
312 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
313 &maxgroup_offset) - 1;
315 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
316 if (ret < 0)
317 return ret;
318 desc_kaddr = kmap(desc_bh->b_page);
319 desc = nilfs_palloc_block_get_group_desc(
320 inode, group, desc_bh, desc_kaddr);
321 n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
322 maxgroup);
323 for (j = 0; j < n; j++, desc++, group++) {
324 if (nilfs_palloc_group_desc_nfrees(inode, group, desc)
325 > 0) {
326 ret = nilfs_palloc_get_bitmap_block(
327 inode, group, 1, &bitmap_bh);
328 if (ret < 0)
329 goto out_desc;
330 bitmap_kaddr = kmap(bitmap_bh->b_page);
331 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
332 pos = nilfs_palloc_find_available_slot(
333 inode, group, group_offset, bitmap,
334 entries_per_group);
335 if (pos >= 0) {
336 /* found a free entry */
337 nilfs_palloc_group_desc_add_entries(
338 inode, group, desc, -1);
339 req->pr_entry_nr =
340 entries_per_group * group + pos;
341 kunmap(desc_bh->b_page);
342 kunmap(bitmap_bh->b_page);
344 req->pr_desc_bh = desc_bh;
345 req->pr_bitmap_bh = bitmap_bh;
346 return 0;
348 kunmap(bitmap_bh->b_page);
349 brelse(bitmap_bh);
352 group_offset = 0;
355 kunmap(desc_bh->b_page);
356 brelse(desc_bh);
359 /* no entries left */
360 return -ENOSPC;
362 out_desc:
363 kunmap(desc_bh->b_page);
364 brelse(desc_bh);
365 return ret;
368 void nilfs_palloc_commit_alloc_entry(struct inode *inode,
369 struct nilfs_palloc_req *req)
371 nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh);
372 nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh);
373 nilfs_mdt_mark_dirty(inode);
375 brelse(req->pr_bitmap_bh);
376 brelse(req->pr_desc_bh);
379 void nilfs_palloc_commit_free_entry(struct inode *inode,
380 struct nilfs_palloc_req *req)
382 struct nilfs_palloc_group_desc *desc;
383 unsigned long group, group_offset;
384 unsigned char *bitmap;
385 void *desc_kaddr, *bitmap_kaddr;
387 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
388 desc_kaddr = kmap(req->pr_desc_bh->b_page);
389 desc = nilfs_palloc_block_get_group_desc(inode, group,
390 req->pr_desc_bh, desc_kaddr);
391 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
392 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
394 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
395 group_offset, bitmap))
396 printk(KERN_WARNING "%s: entry number %llu already freed\n",
397 __func__, (unsigned long long)req->pr_entry_nr);
399 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
401 kunmap(req->pr_bitmap_bh->b_page);
402 kunmap(req->pr_desc_bh->b_page);
404 nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh);
405 nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh);
406 nilfs_mdt_mark_dirty(inode);
408 brelse(req->pr_bitmap_bh);
409 brelse(req->pr_desc_bh);
412 void nilfs_palloc_abort_alloc_entry(struct inode *inode,
413 struct nilfs_palloc_req *req)
415 struct nilfs_palloc_group_desc *desc;
416 void *desc_kaddr, *bitmap_kaddr;
417 unsigned char *bitmap;
418 unsigned long group, group_offset;
420 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
421 desc_kaddr = kmap(req->pr_desc_bh->b_page);
422 desc = nilfs_palloc_block_get_group_desc(inode, group,
423 req->pr_desc_bh, desc_kaddr);
424 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
425 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
426 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
427 group_offset, bitmap))
428 printk(KERN_WARNING "%s: entry numer %llu already freed\n",
429 __func__, (unsigned long long)req->pr_entry_nr);
431 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
433 kunmap(req->pr_bitmap_bh->b_page);
434 kunmap(req->pr_desc_bh->b_page);
436 brelse(req->pr_bitmap_bh);
437 brelse(req->pr_desc_bh);
439 req->pr_entry_nr = 0;
440 req->pr_bitmap_bh = NULL;
441 req->pr_desc_bh = NULL;
444 int nilfs_palloc_prepare_free_entry(struct inode *inode,
445 struct nilfs_palloc_req *req)
447 struct buffer_head *desc_bh, *bitmap_bh;
448 unsigned long group, group_offset;
449 int ret;
451 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
452 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
453 if (ret < 0)
454 return ret;
455 ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh);
456 if (ret < 0) {
457 brelse(desc_bh);
458 return ret;
461 req->pr_desc_bh = desc_bh;
462 req->pr_bitmap_bh = bitmap_bh;
463 return 0;
466 void nilfs_palloc_abort_free_entry(struct inode *inode,
467 struct nilfs_palloc_req *req)
469 brelse(req->pr_bitmap_bh);
470 brelse(req->pr_desc_bh);
472 req->pr_entry_nr = 0;
473 req->pr_bitmap_bh = NULL;
474 req->pr_desc_bh = NULL;
477 static int
478 nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
480 __u64 first, last;
482 first = group * nilfs_palloc_entries_per_group(inode);
483 last = first + nilfs_palloc_entries_per_group(inode) - 1;
484 return (nr >= first) && (nr <= last);
487 int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
489 struct buffer_head *desc_bh, *bitmap_bh;
490 struct nilfs_palloc_group_desc *desc;
491 unsigned char *bitmap;
492 void *desc_kaddr, *bitmap_kaddr;
493 unsigned long group, group_offset;
494 int i, j, n, ret;
496 for (i = 0; i < nitems; i += n) {
497 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
498 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
499 if (ret < 0)
500 return ret;
501 ret = nilfs_palloc_get_bitmap_block(inode, group, 0,
502 &bitmap_bh);
503 if (ret < 0) {
504 brelse(desc_bh);
505 return ret;
507 desc_kaddr = kmap(desc_bh->b_page);
508 desc = nilfs_palloc_block_get_group_desc(
509 inode, group, desc_bh, desc_kaddr);
510 bitmap_kaddr = kmap(bitmap_bh->b_page);
511 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
512 for (j = i, n = 0;
513 (j < nitems) && nilfs_palloc_group_is_in(inode, group,
514 entry_nrs[j]);
515 j++, n++) {
516 nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
517 if (!nilfs_clear_bit_atomic(
518 nilfs_mdt_bgl_lock(inode, group),
519 group_offset, bitmap)) {
520 printk(KERN_WARNING
521 "%s: entry number %llu already freed\n",
522 __func__,
523 (unsigned long long)entry_nrs[j]);
526 nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
528 kunmap(bitmap_bh->b_page);
529 kunmap(desc_bh->b_page);
531 nilfs_mdt_mark_buffer_dirty(desc_bh);
532 nilfs_mdt_mark_buffer_dirty(bitmap_bh);
533 nilfs_mdt_mark_dirty(inode);
535 brelse(bitmap_bh);
536 brelse(desc_bh);
538 return 0;
541 void nilfs_palloc_setup_cache(struct inode *inode,
542 struct nilfs_palloc_cache *cache)
544 NILFS_MDT(inode)->mi_palloc_cache = cache;
545 spin_lock_init(&cache->lock);
548 void nilfs_palloc_clear_cache(struct inode *inode)
550 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
552 spin_lock(&cache->lock);
553 brelse(cache->prev_desc.bh);
554 brelse(cache->prev_bitmap.bh);
555 brelse(cache->prev_entry.bh);
556 cache->prev_desc.bh = NULL;
557 cache->prev_bitmap.bh = NULL;
558 cache->prev_entry.bh = NULL;
559 spin_unlock(&cache->lock);
562 void nilfs_palloc_destroy_cache(struct inode *inode)
564 nilfs_palloc_clear_cache(inode);
565 NILFS_MDT(inode)->mi_palloc_cache = NULL;