NFSv4: Make sure unlock is really an unlock when cancelling a lock
[linux-2.6/mini2440.git] / mm / mempool.c
blobcc1ca86dfc2496b6e0cae7703d4a6dc4f4f97150
1 /*
2 * linux/mm/mempool.c
4 * memory buffer pool support. Such pools are mostly used
5 * for guaranteed, deadlock-free memory allocations during
6 * extreme VM load.
8 * started by Ingo Molnar, Copyright (C) 2001
9 */
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/mempool.h>
15 #include <linux/blkdev.h>
16 #include <linux/writeback.h>
18 static void add_element(mempool_t *pool, void *element)
20 BUG_ON(pool->curr_nr >= pool->min_nr);
21 pool->elements[pool->curr_nr++] = element;
24 static void *remove_element(mempool_t *pool)
26 BUG_ON(pool->curr_nr <= 0);
27 return pool->elements[--pool->curr_nr];
30 static void free_pool(mempool_t *pool)
32 while (pool->curr_nr) {
33 void *element = remove_element(pool);
34 pool->free(element, pool->pool_data);
36 kfree(pool->elements);
37 kfree(pool);
40 /**
41 * mempool_create - create a memory pool
42 * @min_nr: the minimum number of elements guaranteed to be
43 * allocated for this pool.
44 * @alloc_fn: user-defined element-allocation function.
45 * @free_fn: user-defined element-freeing function.
46 * @pool_data: optional private data available to the user-defined functions.
48 * this function creates and allocates a guaranteed size, preallocated
49 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
50 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
51 * functions might sleep - as long as the mempool_alloc() function is not called
52 * from IRQ contexts.
54 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
55 mempool_free_t *free_fn, void *pool_data)
57 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
59 EXPORT_SYMBOL(mempool_create);
61 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
62 mempool_free_t *free_fn, void *pool_data, int node_id)
64 mempool_t *pool;
65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id);
66 if (!pool)
67 return NULL;
68 memset(pool, 0, sizeof(*pool));
69 pool->elements = kmalloc_node(min_nr * sizeof(void *),
70 GFP_KERNEL, node_id);
71 if (!pool->elements) {
72 kfree(pool);
73 return NULL;
75 spin_lock_init(&pool->lock);
76 pool->min_nr = min_nr;
77 pool->pool_data = pool_data;
78 init_waitqueue_head(&pool->wait);
79 pool->alloc = alloc_fn;
80 pool->free = free_fn;
83 * First pre-allocate the guaranteed number of buffers.
85 while (pool->curr_nr < pool->min_nr) {
86 void *element;
88 element = pool->alloc(GFP_KERNEL, pool->pool_data);
89 if (unlikely(!element)) {
90 free_pool(pool);
91 return NULL;
93 add_element(pool, element);
95 return pool;
97 EXPORT_SYMBOL(mempool_create_node);
99 /**
100 * mempool_resize - resize an existing memory pool
101 * @pool: pointer to the memory pool which was allocated via
102 * mempool_create().
103 * @new_min_nr: the new minimum number of elements guaranteed to be
104 * allocated for this pool.
105 * @gfp_mask: the usual allocation bitmask.
107 * This function shrinks/grows the pool. In the case of growing,
108 * it cannot be guaranteed that the pool will be grown to the new
109 * size immediately, but new mempool_free() calls will refill it.
111 * Note, the caller must guarantee that no mempool_destroy is called
112 * while this function is running. mempool_alloc() & mempool_free()
113 * might be called (eg. from IRQ contexts) while this function executes.
115 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
117 void *element;
118 void **new_elements;
119 unsigned long flags;
121 BUG_ON(new_min_nr <= 0);
123 spin_lock_irqsave(&pool->lock, flags);
124 if (new_min_nr <= pool->min_nr) {
125 while (new_min_nr < pool->curr_nr) {
126 element = remove_element(pool);
127 spin_unlock_irqrestore(&pool->lock, flags);
128 pool->free(element, pool->pool_data);
129 spin_lock_irqsave(&pool->lock, flags);
131 pool->min_nr = new_min_nr;
132 goto out_unlock;
134 spin_unlock_irqrestore(&pool->lock, flags);
136 /* Grow the pool */
137 new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
138 if (!new_elements)
139 return -ENOMEM;
141 spin_lock_irqsave(&pool->lock, flags);
142 if (unlikely(new_min_nr <= pool->min_nr)) {
143 /* Raced, other resize will do our work */
144 spin_unlock_irqrestore(&pool->lock, flags);
145 kfree(new_elements);
146 goto out;
148 memcpy(new_elements, pool->elements,
149 pool->curr_nr * sizeof(*new_elements));
150 kfree(pool->elements);
151 pool->elements = new_elements;
152 pool->min_nr = new_min_nr;
154 while (pool->curr_nr < pool->min_nr) {
155 spin_unlock_irqrestore(&pool->lock, flags);
156 element = pool->alloc(gfp_mask, pool->pool_data);
157 if (!element)
158 goto out;
159 spin_lock_irqsave(&pool->lock, flags);
160 if (pool->curr_nr < pool->min_nr) {
161 add_element(pool, element);
162 } else {
163 spin_unlock_irqrestore(&pool->lock, flags);
164 pool->free(element, pool->pool_data); /* Raced */
165 goto out;
168 out_unlock:
169 spin_unlock_irqrestore(&pool->lock, flags);
170 out:
171 return 0;
173 EXPORT_SYMBOL(mempool_resize);
176 * mempool_destroy - deallocate a memory pool
177 * @pool: pointer to the memory pool which was allocated via
178 * mempool_create().
180 * this function only sleeps if the free_fn() function sleeps. The caller
181 * has to guarantee that all elements have been returned to the pool (ie:
182 * freed) prior to calling mempool_destroy().
184 void mempool_destroy(mempool_t *pool)
186 /* Check for outstanding elements */
187 BUG_ON(pool->curr_nr != pool->min_nr);
188 free_pool(pool);
190 EXPORT_SYMBOL(mempool_destroy);
193 * mempool_alloc - allocate an element from a specific memory pool
194 * @pool: pointer to the memory pool which was allocated via
195 * mempool_create().
196 * @gfp_mask: the usual allocation bitmask.
198 * this function only sleeps if the alloc_fn() function sleeps or
199 * returns NULL. Note that due to preallocation, this function
200 * *never* fails when called from process contexts. (it might
201 * fail if called from an IRQ context.)
203 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
205 void *element;
206 unsigned long flags;
207 wait_queue_t wait;
208 gfp_t gfp_temp;
210 might_sleep_if(gfp_mask & __GFP_WAIT);
212 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
213 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
214 gfp_mask |= __GFP_NOWARN; /* failures are OK */
216 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
218 repeat_alloc:
220 element = pool->alloc(gfp_temp, pool->pool_data);
221 if (likely(element != NULL))
222 return element;
224 spin_lock_irqsave(&pool->lock, flags);
225 if (likely(pool->curr_nr)) {
226 element = remove_element(pool);
227 spin_unlock_irqrestore(&pool->lock, flags);
228 return element;
230 spin_unlock_irqrestore(&pool->lock, flags);
232 /* We must not sleep in the GFP_ATOMIC case */
233 if (!(gfp_mask & __GFP_WAIT))
234 return NULL;
236 /* Now start performing page reclaim */
237 gfp_temp = gfp_mask;
238 init_wait(&wait);
239 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
240 smp_mb();
241 if (!pool->curr_nr) {
243 * FIXME: this should be io_schedule(). The timeout is there
244 * as a workaround for some DM problems in 2.6.18.
246 io_schedule_timeout(5*HZ);
248 finish_wait(&pool->wait, &wait);
250 goto repeat_alloc;
252 EXPORT_SYMBOL(mempool_alloc);
255 * mempool_free - return an element to the pool.
256 * @element: pool element pointer.
257 * @pool: pointer to the memory pool which was allocated via
258 * mempool_create().
260 * this function only sleeps if the free_fn() function sleeps.
262 void mempool_free(void *element, mempool_t *pool)
264 unsigned long flags;
266 smp_mb();
267 if (pool->curr_nr < pool->min_nr) {
268 spin_lock_irqsave(&pool->lock, flags);
269 if (pool->curr_nr < pool->min_nr) {
270 add_element(pool, element);
271 spin_unlock_irqrestore(&pool->lock, flags);
272 wake_up(&pool->wait);
273 return;
275 spin_unlock_irqrestore(&pool->lock, flags);
277 pool->free(element, pool->pool_data);
279 EXPORT_SYMBOL(mempool_free);
282 * A commonly used alloc and free fn.
284 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
286 struct kmem_cache *mem = pool_data;
287 return kmem_cache_alloc(mem, gfp_mask);
289 EXPORT_SYMBOL(mempool_alloc_slab);
291 void mempool_free_slab(void *element, void *pool_data)
293 struct kmem_cache *mem = pool_data;
294 kmem_cache_free(mem, element);
296 EXPORT_SYMBOL(mempool_free_slab);
299 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
300 * specfied by pool_data
302 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
304 size_t size = (size_t)(long)pool_data;
305 return kmalloc(size, gfp_mask);
307 EXPORT_SYMBOL(mempool_kmalloc);
309 void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
311 size_t size = (size_t) pool_data;
312 return kzalloc(size, gfp_mask);
314 EXPORT_SYMBOL(mempool_kzalloc);
316 void mempool_kfree(void *element, void *pool_data)
318 kfree(element);
320 EXPORT_SYMBOL(mempool_kfree);
323 * A simple mempool-backed page allocator that allocates pages
324 * of the order specified by pool_data.
326 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
328 int order = (int)(long)pool_data;
329 return alloc_pages(gfp_mask, order);
331 EXPORT_SYMBOL(mempool_alloc_pages);
333 void mempool_free_pages(void *element, void *pool_data)
335 int order = (int)(long)pool_data;
336 __free_pages(element, order);
338 EXPORT_SYMBOL(mempool_free_pages);