USB: g_file_storage: per-LUN ro, removable and cdrom flags handling changed
[linux-2.6/btrfs-unstable.git] / arch / powerpc / lib / rheap.c
blob45907c1dae66da343344b5e1cfe9c682afc409ff
1 /*
2 * A Remote Heap. Remote means that we don't touch the memory that the
3 * heap points to. Normal heap implementations use the memory they manage
4 * to place their list. We cannot do that because the memory we manage may
5 * have special properties, for example it is uncachable or of different
6 * endianess.
8 * Author: Pantelis Antoniou <panto@intracom.gr>
10 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
11 * the terms of the GNU General Public License version 2. This program
12 * is licensed "as is" without any warranty of any kind, whether express
13 * or implied.
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
23 #include <asm/rheap.h>
26 * Fixup a list_head, needed when copying lists. If the pointers fall
27 * between s and e, apply the delta. This assumes that
28 * sizeof(struct list_head *) == sizeof(unsigned long *).
30 static inline void fixup(unsigned long s, unsigned long e, int d,
31 struct list_head *l)
33 unsigned long *pp;
35 pp = (unsigned long *)&l->next;
36 if (*pp >= s && *pp < e)
37 *pp += d;
39 pp = (unsigned long *)&l->prev;
40 if (*pp >= s && *pp < e)
41 *pp += d;
44 /* Grow the allocated blocks */
45 static int grow(rh_info_t * info, int max_blocks)
47 rh_block_t *block, *blk;
48 int i, new_blocks;
49 int delta;
50 unsigned long blks, blke;
52 if (max_blocks <= info->max_blocks)
53 return -EINVAL;
55 new_blocks = max_blocks - info->max_blocks;
57 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
58 if (block == NULL)
59 return -ENOMEM;
61 if (info->max_blocks > 0) {
63 /* copy old block area */
64 memcpy(block, info->block,
65 sizeof(rh_block_t) * info->max_blocks);
67 delta = (char *)block - (char *)info->block;
69 /* and fixup list pointers */
70 blks = (unsigned long)info->block;
71 blke = (unsigned long)(info->block + info->max_blocks);
73 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
74 fixup(blks, blke, delta, &blk->list);
76 fixup(blks, blke, delta, &info->empty_list);
77 fixup(blks, blke, delta, &info->free_list);
78 fixup(blks, blke, delta, &info->taken_list);
80 /* free the old allocated memory */
81 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
82 kfree(info->block);
85 info->block = block;
86 info->empty_slots += new_blocks;
87 info->max_blocks = max_blocks;
88 info->flags &= ~RHIF_STATIC_BLOCK;
90 /* add all new blocks to the free list */
91 blk = block + info->max_blocks - new_blocks;
92 for (i = 0; i < new_blocks; i++, blk++)
93 list_add(&blk->list, &info->empty_list);
95 return 0;
99 * Assure at least the required amount of empty slots. If this function
100 * causes a grow in the block area then all pointers kept to the block
101 * area are invalid!
103 static int assure_empty(rh_info_t * info, int slots)
105 int max_blocks;
107 /* This function is not meant to be used to grow uncontrollably */
108 if (slots >= 4)
109 return -EINVAL;
111 /* Enough space */
112 if (info->empty_slots >= slots)
113 return 0;
115 /* Next 16 sized block */
116 max_blocks = ((info->max_blocks + slots) + 15) & ~15;
118 return grow(info, max_blocks);
121 static rh_block_t *get_slot(rh_info_t * info)
123 rh_block_t *blk;
125 /* If no more free slots, and failure to extend. */
126 /* XXX: You should have called assure_empty before */
127 if (info->empty_slots == 0) {
128 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
129 return NULL;
132 /* Get empty slot to use */
133 blk = list_entry(info->empty_list.next, rh_block_t, list);
134 list_del_init(&blk->list);
135 info->empty_slots--;
137 /* Initialize */
138 blk->start = 0;
139 blk->size = 0;
140 blk->owner = NULL;
142 return blk;
145 static inline void release_slot(rh_info_t * info, rh_block_t * blk)
147 list_add(&blk->list, &info->empty_list);
148 info->empty_slots++;
151 static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
153 rh_block_t *blk;
154 rh_block_t *before;
155 rh_block_t *after;
156 rh_block_t *next;
157 int size;
158 unsigned long s, e, bs, be;
159 struct list_head *l;
161 /* We assume that they are aligned properly */
162 size = blkn->size;
163 s = blkn->start;
164 e = s + size;
166 /* Find the blocks immediately before and after the given one
167 * (if any) */
168 before = NULL;
169 after = NULL;
170 next = NULL;
172 list_for_each(l, &info->free_list) {
173 blk = list_entry(l, rh_block_t, list);
175 bs = blk->start;
176 be = bs + blk->size;
178 if (next == NULL && s >= bs)
179 next = blk;
181 if (be == s)
182 before = blk;
184 if (e == bs)
185 after = blk;
187 /* If both are not null, break now */
188 if (before != NULL && after != NULL)
189 break;
192 /* Now check if they are really adjacent */
193 if (before && s != (before->start + before->size))
194 before = NULL;
196 if (after && e != after->start)
197 after = NULL;
199 /* No coalescing; list insert and return */
200 if (before == NULL && after == NULL) {
202 if (next != NULL)
203 list_add(&blkn->list, &next->list);
204 else
205 list_add(&blkn->list, &info->free_list);
207 return;
210 /* We don't need it anymore */
211 release_slot(info, blkn);
213 /* Grow the before block */
214 if (before != NULL && after == NULL) {
215 before->size += size;
216 return;
219 /* Grow the after block backwards */
220 if (before == NULL && after != NULL) {
221 after->start -= size;
222 after->size += size;
223 return;
226 /* Grow the before block, and release the after block */
227 before->size += size + after->size;
228 list_del(&after->list);
229 release_slot(info, after);
232 static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
234 rh_block_t *blk;
235 struct list_head *l;
237 /* Find the block immediately before the given one (if any) */
238 list_for_each(l, &info->taken_list) {
239 blk = list_entry(l, rh_block_t, list);
240 if (blk->start > blkn->start) {
241 list_add_tail(&blkn->list, &blk->list);
242 return;
246 list_add_tail(&blkn->list, &info->taken_list);
250 * Create a remote heap dynamically. Note that no memory for the blocks
251 * are allocated. It will upon the first allocation
253 rh_info_t *rh_create(unsigned int alignment)
255 rh_info_t *info;
257 /* Alignment must be a power of two */
258 if ((alignment & (alignment - 1)) != 0)
259 return ERR_PTR(-EINVAL);
261 info = kmalloc(sizeof(*info), GFP_ATOMIC);
262 if (info == NULL)
263 return ERR_PTR(-ENOMEM);
265 info->alignment = alignment;
267 /* Initially everything as empty */
268 info->block = NULL;
269 info->max_blocks = 0;
270 info->empty_slots = 0;
271 info->flags = 0;
273 INIT_LIST_HEAD(&info->empty_list);
274 INIT_LIST_HEAD(&info->free_list);
275 INIT_LIST_HEAD(&info->taken_list);
277 return info;
279 EXPORT_SYMBOL_GPL(rh_create);
282 * Destroy a dynamically created remote heap. Deallocate only if the areas
283 * are not static
285 void rh_destroy(rh_info_t * info)
287 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
288 kfree(info->block);
290 if ((info->flags & RHIF_STATIC_INFO) == 0)
291 kfree(info);
293 EXPORT_SYMBOL_GPL(rh_destroy);
296 * Initialize in place a remote heap info block. This is needed to support
297 * operation very early in the startup of the kernel, when it is not yet safe
298 * to call kmalloc.
300 void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
301 rh_block_t * block)
303 int i;
304 rh_block_t *blk;
306 /* Alignment must be a power of two */
307 if ((alignment & (alignment - 1)) != 0)
308 return;
310 info->alignment = alignment;
312 /* Initially everything as empty */
313 info->block = block;
314 info->max_blocks = max_blocks;
315 info->empty_slots = max_blocks;
316 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
318 INIT_LIST_HEAD(&info->empty_list);
319 INIT_LIST_HEAD(&info->free_list);
320 INIT_LIST_HEAD(&info->taken_list);
322 /* Add all new blocks to the free list */
323 for (i = 0, blk = block; i < max_blocks; i++, blk++)
324 list_add(&blk->list, &info->empty_list);
326 EXPORT_SYMBOL_GPL(rh_init);
328 /* Attach a free memory region, coalesces regions if adjuscent */
329 int rh_attach_region(rh_info_t * info, unsigned long start, int size)
331 rh_block_t *blk;
332 unsigned long s, e, m;
333 int r;
335 /* The region must be aligned */
336 s = start;
337 e = s + size;
338 m = info->alignment - 1;
340 /* Round start up */
341 s = (s + m) & ~m;
343 /* Round end down */
344 e = e & ~m;
346 if (IS_ERR_VALUE(e) || (e < s))
347 return -ERANGE;
349 /* Take final values */
350 start = s;
351 size = e - s;
353 /* Grow the blocks, if needed */
354 r = assure_empty(info, 1);
355 if (r < 0)
356 return r;
358 blk = get_slot(info);
359 blk->start = start;
360 blk->size = size;
361 blk->owner = NULL;
363 attach_free_block(info, blk);
365 return 0;
367 EXPORT_SYMBOL_GPL(rh_attach_region);
369 /* Detatch given address range, splits free block if needed. */
370 unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
372 struct list_head *l;
373 rh_block_t *blk, *newblk;
374 unsigned long s, e, m, bs, be;
376 /* Validate size */
377 if (size <= 0)
378 return (unsigned long) -EINVAL;
380 /* The region must be aligned */
381 s = start;
382 e = s + size;
383 m = info->alignment - 1;
385 /* Round start up */
386 s = (s + m) & ~m;
388 /* Round end down */
389 e = e & ~m;
391 if (assure_empty(info, 1) < 0)
392 return (unsigned long) -ENOMEM;
394 blk = NULL;
395 list_for_each(l, &info->free_list) {
396 blk = list_entry(l, rh_block_t, list);
397 /* The range must lie entirely inside one free block */
398 bs = blk->start;
399 be = blk->start + blk->size;
400 if (s >= bs && e <= be)
401 break;
402 blk = NULL;
405 if (blk == NULL)
406 return (unsigned long) -ENOMEM;
408 /* Perfect fit */
409 if (bs == s && be == e) {
410 /* Delete from free list, release slot */
411 list_del(&blk->list);
412 release_slot(info, blk);
413 return s;
416 /* blk still in free list, with updated start and/or size */
417 if (bs == s || be == e) {
418 if (bs == s)
419 blk->start += size;
420 blk->size -= size;
422 } else {
423 /* The front free fragment */
424 blk->size = s - bs;
426 /* the back free fragment */
427 newblk = get_slot(info);
428 newblk->start = e;
429 newblk->size = be - e;
431 list_add(&newblk->list, &blk->list);
434 return s;
436 EXPORT_SYMBOL_GPL(rh_detach_region);
438 /* Allocate a block of memory at the specified alignment. The value returned
439 * is an offset into the buffer initialized by rh_init(), or a negative number
440 * if there is an error.
442 unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
444 struct list_head *l;
445 rh_block_t *blk;
446 rh_block_t *newblk;
447 unsigned long start, sp_size;
449 /* Validate size, and alignment must be power of two */
450 if (size <= 0 || (alignment & (alignment - 1)) != 0)
451 return (unsigned long) -EINVAL;
453 /* Align to configured alignment */
454 size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
456 if (assure_empty(info, 2) < 0)
457 return (unsigned long) -ENOMEM;
459 blk = NULL;
460 list_for_each(l, &info->free_list) {
461 blk = list_entry(l, rh_block_t, list);
462 if (size <= blk->size) {
463 start = (blk->start + alignment - 1) & ~(alignment - 1);
464 if (start + size <= blk->start + blk->size)
465 break;
467 blk = NULL;
470 if (blk == NULL)
471 return (unsigned long) -ENOMEM;
473 /* Just fits */
474 if (blk->size == size) {
475 /* Move from free list to taken list */
476 list_del(&blk->list);
477 newblk = blk;
478 } else {
479 /* Fragment caused, split if needed */
480 /* Create block for fragment in the beginning */
481 sp_size = start - blk->start;
482 if (sp_size) {
483 rh_block_t *spblk;
485 spblk = get_slot(info);
486 spblk->start = blk->start;
487 spblk->size = sp_size;
488 /* add before the blk */
489 list_add(&spblk->list, blk->list.prev);
491 newblk = get_slot(info);
492 newblk->start = start;
493 newblk->size = size;
495 /* blk still in free list, with updated start and size
496 * for fragment in the end */
497 blk->start = start + size;
498 blk->size -= sp_size + size;
499 /* No fragment in the end, remove blk */
500 if (blk->size == 0) {
501 list_del(&blk->list);
502 release_slot(info, blk);
506 newblk->owner = owner;
507 attach_taken_block(info, newblk);
509 return start;
511 EXPORT_SYMBOL_GPL(rh_alloc_align);
513 /* Allocate a block of memory at the default alignment. The value returned is
514 * an offset into the buffer initialized by rh_init(), or a negative number if
515 * there is an error.
517 unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
519 return rh_alloc_align(info, size, info->alignment, owner);
521 EXPORT_SYMBOL_GPL(rh_alloc);
523 /* Allocate a block of memory at the given offset, rounded up to the default
524 * alignment. The value returned is an offset into the buffer initialized by
525 * rh_init(), or a negative number if there is an error.
527 unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
529 struct list_head *l;
530 rh_block_t *blk, *newblk1, *newblk2;
531 unsigned long s, e, m, bs = 0, be = 0;
533 /* Validate size */
534 if (size <= 0)
535 return (unsigned long) -EINVAL;
537 /* The region must be aligned */
538 s = start;
539 e = s + size;
540 m = info->alignment - 1;
542 /* Round start up */
543 s = (s + m) & ~m;
545 /* Round end down */
546 e = e & ~m;
548 if (assure_empty(info, 2) < 0)
549 return (unsigned long) -ENOMEM;
551 blk = NULL;
552 list_for_each(l, &info->free_list) {
553 blk = list_entry(l, rh_block_t, list);
554 /* The range must lie entirely inside one free block */
555 bs = blk->start;
556 be = blk->start + blk->size;
557 if (s >= bs && e <= be)
558 break;
559 blk = NULL;
562 if (blk == NULL)
563 return (unsigned long) -ENOMEM;
565 /* Perfect fit */
566 if (bs == s && be == e) {
567 /* Move from free list to taken list */
568 list_del(&blk->list);
569 blk->owner = owner;
571 start = blk->start;
572 attach_taken_block(info, blk);
574 return start;
578 /* blk still in free list, with updated start and/or size */
579 if (bs == s || be == e) {
580 if (bs == s)
581 blk->start += size;
582 blk->size -= size;
584 } else {
585 /* The front free fragment */
586 blk->size = s - bs;
588 /* The back free fragment */
589 newblk2 = get_slot(info);
590 newblk2->start = e;
591 newblk2->size = be - e;
593 list_add(&newblk2->list, &blk->list);
596 newblk1 = get_slot(info);
597 newblk1->start = s;
598 newblk1->size = e - s;
599 newblk1->owner = owner;
601 start = newblk1->start;
602 attach_taken_block(info, newblk1);
604 return start;
606 EXPORT_SYMBOL_GPL(rh_alloc_fixed);
608 /* Deallocate the memory previously allocated by one of the rh_alloc functions.
609 * The return value is the size of the deallocated block, or a negative number
610 * if there is an error.
612 int rh_free(rh_info_t * info, unsigned long start)
614 rh_block_t *blk, *blk2;
615 struct list_head *l;
616 int size;
618 /* Linear search for block */
619 blk = NULL;
620 list_for_each(l, &info->taken_list) {
621 blk2 = list_entry(l, rh_block_t, list);
622 if (start < blk2->start)
623 break;
624 blk = blk2;
627 if (blk == NULL || start > (blk->start + blk->size))
628 return -EINVAL;
630 /* Remove from taken list */
631 list_del(&blk->list);
633 /* Get size of freed block */
634 size = blk->size;
635 attach_free_block(info, blk);
637 return size;
639 EXPORT_SYMBOL_GPL(rh_free);
641 int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
643 rh_block_t *blk;
644 struct list_head *l;
645 struct list_head *h;
646 int nr;
648 switch (what) {
650 case RHGS_FREE:
651 h = &info->free_list;
652 break;
654 case RHGS_TAKEN:
655 h = &info->taken_list;
656 break;
658 default:
659 return -EINVAL;
662 /* Linear search for block */
663 nr = 0;
664 list_for_each(l, h) {
665 blk = list_entry(l, rh_block_t, list);
666 if (stats != NULL && nr < max_stats) {
667 stats->start = blk->start;
668 stats->size = blk->size;
669 stats->owner = blk->owner;
670 stats++;
672 nr++;
675 return nr;
677 EXPORT_SYMBOL_GPL(rh_get_stats);
679 int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
681 rh_block_t *blk, *blk2;
682 struct list_head *l;
683 int size;
685 /* Linear search for block */
686 blk = NULL;
687 list_for_each(l, &info->taken_list) {
688 blk2 = list_entry(l, rh_block_t, list);
689 if (start < blk2->start)
690 break;
691 blk = blk2;
694 if (blk == NULL || start > (blk->start + blk->size))
695 return -EINVAL;
697 blk->owner = owner;
698 size = blk->size;
700 return size;
702 EXPORT_SYMBOL_GPL(rh_set_owner);
704 void rh_dump(rh_info_t * info)
706 static rh_stats_t st[32]; /* XXX maximum 32 blocks */
707 int maxnr;
708 int i, nr;
710 maxnr = ARRAY_SIZE(st);
712 printk(KERN_INFO
713 "info @0x%p (%d slots empty / %d max)\n",
714 info, info->empty_slots, info->max_blocks);
716 printk(KERN_INFO " Free:\n");
717 nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
718 if (nr > maxnr)
719 nr = maxnr;
720 for (i = 0; i < nr; i++)
721 printk(KERN_INFO
722 " 0x%lx-0x%lx (%u)\n",
723 st[i].start, st[i].start + st[i].size,
724 st[i].size);
725 printk(KERN_INFO "\n");
727 printk(KERN_INFO " Taken:\n");
728 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
729 if (nr > maxnr)
730 nr = maxnr;
731 for (i = 0; i < nr; i++)
732 printk(KERN_INFO
733 " 0x%lx-0x%lx (%u) %s\n",
734 st[i].start, st[i].start + st[i].size,
735 st[i].size, st[i].owner != NULL ? st[i].owner : "");
736 printk(KERN_INFO "\n");
738 EXPORT_SYMBOL_GPL(rh_dump);
740 void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
742 printk(KERN_INFO
743 "blk @0x%p: 0x%lx-0x%lx (%u)\n",
744 blk, blk->start, blk->start + blk->size, blk->size);
746 EXPORT_SYMBOL_GPL(rh_dump_blk);