2 * Basic general purpose allocator for managing special purpose memory
3 * not managed by the regular kmalloc/kfree interface.
4 * Uses for this includes on-device special memory, uncached memory
7 * This code is based on the buddy allocator found in the sym53c8xx_2
8 * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
9 * and adapted for general purpose use.
11 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
13 * This source code is licensed under the GNU General Public License,
14 * Version 2. See the file COPYING for more details.
17 #include <linux/module.h>
18 #include <linux/stddef.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/genalloc.h>
30 struct gen_pool
*gen_pool_create(int nr_chunks
, int max_chunk_shift
,
31 unsigned long (*fp
)(struct gen_pool
*),
34 struct gen_pool
*poolp
;
39 * This is really an arbitrary limit, +10 is enough for
40 * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
41 * this can be increased without problems.
43 if ((max_chunk_shift
> (PAGE_SHIFT
+ 10)) ||
44 ((max_chunk_shift
< ALLOC_MIN_SHIFT
) && max_chunk_shift
))
48 max_chunk_shift
= PAGE_SHIFT
;
50 poolp
= kmalloc(sizeof(struct gen_pool
), GFP_KERNEL
);
53 memset(poolp
, 0, sizeof(struct gen_pool
));
54 poolp
->h
= kmalloc(sizeof(struct gen_pool_link
) *
55 (max_chunk_shift
- ALLOC_MIN_SHIFT
+ 1),
58 printk(KERN_WARNING
"gen_pool_alloc() failed to allocate\n");
62 memset(poolp
->h
, 0, sizeof(struct gen_pool_link
) *
63 (max_chunk_shift
- ALLOC_MIN_SHIFT
+ 1));
65 spin_lock_init(&poolp
->lock
);
66 poolp
->get_new_chunk
= fp
;
67 poolp
->max_chunk_shift
= max_chunk_shift
;
68 poolp
->private = data
;
70 for (i
= 0; i
< nr_chunks
; i
++) {
71 tmp
= poolp
->get_new_chunk(poolp
);
72 printk(KERN_INFO
"allocated %lx\n", tmp
);
75 gen_pool_free(poolp
, tmp
, (1 << poolp
->max_chunk_shift
));
80 EXPORT_SYMBOL(gen_pool_create
);
84 * Simple power of two buddy-like generic allocator.
85 * Provides naturally aligned memory chunks.
87 unsigned long gen_pool_alloc(struct gen_pool
*poolp
, int size
)
89 int j
, i
, s
, max_chunk_size
;
90 unsigned long a
, flags
;
91 struct gen_pool_link
*h
= poolp
->h
;
93 max_chunk_size
= 1 << poolp
->max_chunk_shift
;
95 if (size
> max_chunk_size
)
100 size
= max(size
, 1 << ALLOC_MIN_SHIFT
);
101 s
= roundup_pow_of_two(size
);
105 spin_lock_irqsave(&poolp
->lock
, flags
);
107 if (s
== max_chunk_size
) {
108 struct gen_pool_link
*ptr
;
109 spin_unlock_irqrestore(&poolp
->lock
, flags
);
110 ptr
= (struct gen_pool_link
*)poolp
->get_new_chunk(poolp
);
111 spin_lock_irqsave(&poolp
->lock
, flags
);
114 h
[j
].next
->next
= NULL
;
120 a
= (unsigned long) h
[j
].next
;
122 h
[j
].next
= h
[j
].next
->next
;
124 * This should be split into a seperate function doing
125 * the chunk split in order to support custom
126 * handling memory not physically accessible by host
131 h
[j
].next
= (struct gen_pool_link
*) (a
+ s
);
132 h
[j
].next
->next
= NULL
;
135 spin_unlock_irqrestore(&poolp
->lock
, flags
);
138 EXPORT_SYMBOL(gen_pool_alloc
);
142 * Counter-part of the generic allocator.
144 void gen_pool_free(struct gen_pool
*poolp
, unsigned long ptr
, int size
)
146 struct gen_pool_link
*q
;
147 struct gen_pool_link
*h
= poolp
->h
;
148 unsigned long a
, b
, flags
;
149 int i
, s
, max_chunk_size
;
151 max_chunk_size
= 1 << poolp
->max_chunk_shift
;
153 if (size
> max_chunk_size
)
158 size
= max(size
, 1 << ALLOC_MIN_SHIFT
);
159 s
= roundup_pow_of_two(size
);
163 spin_lock_irqsave(&poolp
->lock
, flags
);
165 if (s
== max_chunk_size
) {
166 ((struct gen_pool_link
*)a
)->next
= h
[i
].next
;
167 h
[i
].next
= (struct gen_pool_link
*)a
;
173 while (q
->next
&& q
->next
!= (struct gen_pool_link
*)b
)
177 ((struct gen_pool_link
*)a
)->next
= h
[i
].next
;
178 h
[i
].next
= (struct gen_pool_link
*)a
;
181 q
->next
= q
->next
->next
;
186 spin_unlock_irqrestore(&poolp
->lock
, flags
);
188 EXPORT_SYMBOL(gen_pool_free
);