[PATCH] acpi bridge hotadd: Allow ACPI .add and .start operations to be done independ...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / vmalloc.c
blob8ff16a1eee6ad43e5a7bec93a207ee94392c2233
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 */
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 DEFINE_RWLOCK(vmlist_lock);
24 struct vm_struct *vmlist;
26 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28 pte_t *pte;
30 pte = pte_offset_kernel(pmd, addr);
31 do {
32 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
33 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
34 } while (pte++, addr += PAGE_SIZE, addr != end);
37 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
38 unsigned long end)
40 pmd_t *pmd;
41 unsigned long next;
43 pmd = pmd_offset(pud, addr);
44 do {
45 next = pmd_addr_end(addr, end);
46 if (pmd_none_or_clear_bad(pmd))
47 continue;
48 vunmap_pte_range(pmd, addr, next);
49 } while (pmd++, addr = next, addr != end);
52 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
53 unsigned long end)
55 pud_t *pud;
56 unsigned long next;
58 pud = pud_offset(pgd, addr);
59 do {
60 next = pud_addr_end(addr, end);
61 if (pud_none_or_clear_bad(pud))
62 continue;
63 vunmap_pmd_range(pud, addr, next);
64 } while (pud++, addr = next, addr != end);
67 void unmap_vm_area(struct vm_struct *area)
69 pgd_t *pgd;
70 unsigned long next;
71 unsigned long addr = (unsigned long) area->addr;
72 unsigned long end = addr + area->size;
74 BUG_ON(addr >= end);
75 pgd = pgd_offset_k(addr);
76 flush_cache_vunmap(addr, end);
77 do {
78 next = pgd_addr_end(addr, end);
79 if (pgd_none_or_clear_bad(pgd))
80 continue;
81 vunmap_pud_range(pgd, addr, next);
82 } while (pgd++, addr = next, addr != end);
83 flush_tlb_kernel_range((unsigned long) area->addr, end);
86 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
87 unsigned long end, pgprot_t prot, struct page ***pages)
89 pte_t *pte;
91 pte = pte_alloc_kernel(&init_mm, pmd, addr);
92 if (!pte)
93 return -ENOMEM;
94 do {
95 struct page *page = **pages;
96 WARN_ON(!pte_none(*pte));
97 if (!page)
98 return -ENOMEM;
99 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100 (*pages)++;
101 } while (pte++, addr += PAGE_SIZE, addr != end);
102 return 0;
105 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
106 unsigned long end, pgprot_t prot, struct page ***pages)
108 pmd_t *pmd;
109 unsigned long next;
111 pmd = pmd_alloc(&init_mm, pud, addr);
112 if (!pmd)
113 return -ENOMEM;
114 do {
115 next = pmd_addr_end(addr, end);
116 if (vmap_pte_range(pmd, addr, next, prot, pages))
117 return -ENOMEM;
118 } while (pmd++, addr = next, addr != end);
119 return 0;
122 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
123 unsigned long end, pgprot_t prot, struct page ***pages)
125 pud_t *pud;
126 unsigned long next;
128 pud = pud_alloc(&init_mm, pgd, addr);
129 if (!pud)
130 return -ENOMEM;
131 do {
132 next = pud_addr_end(addr, end);
133 if (vmap_pmd_range(pud, addr, next, prot, pages))
134 return -ENOMEM;
135 } while (pud++, addr = next, addr != end);
136 return 0;
139 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
141 pgd_t *pgd;
142 unsigned long next;
143 unsigned long addr = (unsigned long) area->addr;
144 unsigned long end = addr + area->size - PAGE_SIZE;
145 int err;
147 BUG_ON(addr >= end);
148 pgd = pgd_offset_k(addr);
149 spin_lock(&init_mm.page_table_lock);
150 do {
151 next = pgd_addr_end(addr, end);
152 err = vmap_pud_range(pgd, addr, next, prot, pages);
153 if (err)
154 break;
155 } while (pgd++, addr = next, addr != end);
156 spin_unlock(&init_mm.page_table_lock);
157 flush_cache_vmap((unsigned long) area->addr, end);
158 return err;
161 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
163 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end)
166 struct vm_struct **p, *tmp, *area;
167 unsigned long align = 1;
168 unsigned long addr;
170 if (flags & VM_IOREMAP) {
171 int bit = fls(size);
173 if (bit > IOREMAP_MAX_ORDER)
174 bit = IOREMAP_MAX_ORDER;
175 else if (bit < PAGE_SHIFT)
176 bit = PAGE_SHIFT;
178 align = 1ul << bit;
180 addr = ALIGN(start, align);
181 size = PAGE_ALIGN(size);
183 area = kmalloc(sizeof(*area), GFP_KERNEL);
184 if (unlikely(!area))
185 return NULL;
187 if (unlikely(!size)) {
188 kfree (area);
189 return NULL;
193 * We always allocate a guard page.
195 size += PAGE_SIZE;
197 write_lock(&vmlist_lock);
198 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
199 if ((unsigned long)tmp->addr < addr) {
200 if((unsigned long)tmp->addr + tmp->size >= addr)
201 addr = ALIGN(tmp->size +
202 (unsigned long)tmp->addr, align);
203 continue;
205 if ((size + addr) < addr)
206 goto out;
207 if (size + addr <= (unsigned long)tmp->addr)
208 goto found;
209 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
210 if (addr > end - size)
211 goto out;
214 found:
215 area->next = *p;
216 *p = area;
218 area->flags = flags;
219 area->addr = (void *)addr;
220 area->size = size;
221 area->pages = NULL;
222 area->nr_pages = 0;
223 area->phys_addr = 0;
224 write_unlock(&vmlist_lock);
226 return area;
228 out:
229 write_unlock(&vmlist_lock);
230 kfree(area);
231 if (printk_ratelimit())
232 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
233 return NULL;
237 * get_vm_area - reserve a contingous kernel virtual area
239 * @size: size of the area
240 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
242 * Search an area of @size in the kernel virtual mapping area,
243 * and reserved it for out purposes. Returns the area descriptor
244 * on success or %NULL on failure.
246 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
251 /* Caller must hold vmlist_lock */
252 struct vm_struct *__remove_vm_area(void *addr)
254 struct vm_struct **p, *tmp;
256 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
257 if (tmp->addr == addr)
258 goto found;
260 return NULL;
262 found:
263 unmap_vm_area(tmp);
264 *p = tmp->next;
267 * Remove the guard page.
269 tmp->size -= PAGE_SIZE;
270 return tmp;
274 * remove_vm_area - find and remove a contingous kernel virtual area
276 * @addr: base address
278 * Search for the kernel VM area starting at @addr, and remove it.
279 * This function returns the found VM area, but using it is NOT safe
280 * on SMP machines, except for its size or flags.
282 struct vm_struct *remove_vm_area(void *addr)
284 struct vm_struct *v;
285 write_lock(&vmlist_lock);
286 v = __remove_vm_area(addr);
287 write_unlock(&vmlist_lock);
288 return v;
291 void __vunmap(void *addr, int deallocate_pages)
293 struct vm_struct *area;
295 if (!addr)
296 return;
298 if ((PAGE_SIZE-1) & (unsigned long)addr) {
299 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
300 WARN_ON(1);
301 return;
304 area = remove_vm_area(addr);
305 if (unlikely(!area)) {
306 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
307 addr);
308 WARN_ON(1);
309 return;
312 if (deallocate_pages) {
313 int i;
315 for (i = 0; i < area->nr_pages; i++) {
316 if (unlikely(!area->pages[i]))
317 BUG();
318 __free_page(area->pages[i]);
321 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
322 vfree(area->pages);
323 else
324 kfree(area->pages);
327 kfree(area);
328 return;
332 * vfree - release memory allocated by vmalloc()
334 * @addr: memory base address
336 * Free the virtually contiguous memory area starting at @addr, as
337 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
339 * May not be called in interrupt context.
341 void vfree(void *addr)
343 BUG_ON(in_interrupt());
344 __vunmap(addr, 1);
347 EXPORT_SYMBOL(vfree);
350 * vunmap - release virtual mapping obtained by vmap()
352 * @addr: memory base address
354 * Free the virtually contiguous memory area starting at @addr,
355 * which was created from the page array passed to vmap().
357 * May not be called in interrupt context.
359 void vunmap(void *addr)
361 BUG_ON(in_interrupt());
362 __vunmap(addr, 0);
365 EXPORT_SYMBOL(vunmap);
368 * vmap - map an array of pages into virtually contiguous space
370 * @pages: array of page pointers
371 * @count: number of pages to map
372 * @flags: vm_area->flags
373 * @prot: page protection for the mapping
375 * Maps @count pages from @pages into contiguous kernel virtual
376 * space.
378 void *vmap(struct page **pages, unsigned int count,
379 unsigned long flags, pgprot_t prot)
381 struct vm_struct *area;
383 if (count > num_physpages)
384 return NULL;
386 area = get_vm_area((count << PAGE_SHIFT), flags);
387 if (!area)
388 return NULL;
389 if (map_vm_area(area, prot, &pages)) {
390 vunmap(area->addr);
391 return NULL;
394 return area->addr;
397 EXPORT_SYMBOL(vmap);
399 void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot)
401 struct page **pages;
402 unsigned int nr_pages, array_size, i;
404 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
405 array_size = (nr_pages * sizeof(struct page *));
407 area->nr_pages = nr_pages;
408 /* Please note that the recursion is strictly bounded. */
409 if (array_size > PAGE_SIZE)
410 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL);
411 else
412 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
413 area->pages = pages;
414 if (!area->pages) {
415 remove_vm_area(area->addr);
416 kfree(area);
417 return NULL;
419 memset(area->pages, 0, array_size);
421 for (i = 0; i < area->nr_pages; i++) {
422 area->pages[i] = alloc_page(gfp_mask);
423 if (unlikely(!area->pages[i])) {
424 /* Successfully allocated i pages, free them in __vunmap() */
425 area->nr_pages = i;
426 goto fail;
430 if (map_vm_area(area, prot, &pages))
431 goto fail;
432 return area->addr;
434 fail:
435 vfree(area->addr);
436 return NULL;
440 * __vmalloc - allocate virtually contiguous memory
442 * @size: allocation size
443 * @gfp_mask: flags for the page level allocator
444 * @prot: protection mask for the allocated pages
446 * Allocate enough pages to cover @size from the page level
447 * allocator with @gfp_mask flags. Map them into contiguous
448 * kernel virtual space, using a pagetable protection of @prot.
450 void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot)
452 struct vm_struct *area;
454 size = PAGE_ALIGN(size);
455 if (!size || (size >> PAGE_SHIFT) > num_physpages)
456 return NULL;
458 area = get_vm_area(size, VM_ALLOC);
459 if (!area)
460 return NULL;
462 return __vmalloc_area(area, gfp_mask, prot);
465 EXPORT_SYMBOL(__vmalloc);
468 * vmalloc - allocate virtually contiguous memory
470 * @size: allocation size
472 * Allocate enough pages to cover @size from the page level
473 * allocator and map them into contiguous kernel virtual space.
475 * For tight cotrol over page level allocator and protection flags
476 * use __vmalloc() instead.
478 void *vmalloc(unsigned long size)
480 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
483 EXPORT_SYMBOL(vmalloc);
485 #ifndef PAGE_KERNEL_EXEC
486 # define PAGE_KERNEL_EXEC PAGE_KERNEL
487 #endif
490 * vmalloc_exec - allocate virtually contiguous, executable memory
492 * @size: allocation size
494 * Kernel-internal function to allocate enough pages to cover @size
495 * the page level allocator and map them into contiguous and
496 * executable kernel virtual space.
498 * For tight cotrol over page level allocator and protection flags
499 * use __vmalloc() instead.
502 void *vmalloc_exec(unsigned long size)
504 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
508 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
510 * @size: allocation size
512 * Allocate enough 32bit PA addressable pages to cover @size from the
513 * page level allocator and map them into contiguous kernel virtual space.
515 void *vmalloc_32(unsigned long size)
517 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
520 EXPORT_SYMBOL(vmalloc_32);
522 long vread(char *buf, char *addr, unsigned long count)
524 struct vm_struct *tmp;
525 char *vaddr, *buf_start = buf;
526 unsigned long n;
528 /* Don't allow overflow */
529 if ((unsigned long) addr + count < count)
530 count = -(unsigned long) addr;
532 read_lock(&vmlist_lock);
533 for (tmp = vmlist; tmp; tmp = tmp->next) {
534 vaddr = (char *) tmp->addr;
535 if (addr >= vaddr + tmp->size - PAGE_SIZE)
536 continue;
537 while (addr < vaddr) {
538 if (count == 0)
539 goto finished;
540 *buf = '\0';
541 buf++;
542 addr++;
543 count--;
545 n = vaddr + tmp->size - PAGE_SIZE - addr;
546 do {
547 if (count == 0)
548 goto finished;
549 *buf = *addr;
550 buf++;
551 addr++;
552 count--;
553 } while (--n > 0);
555 finished:
556 read_unlock(&vmlist_lock);
557 return buf - buf_start;
560 long vwrite(char *buf, char *addr, unsigned long count)
562 struct vm_struct *tmp;
563 char *vaddr, *buf_start = buf;
564 unsigned long n;
566 /* Don't allow overflow */
567 if ((unsigned long) addr + count < count)
568 count = -(unsigned long) addr;
570 read_lock(&vmlist_lock);
571 for (tmp = vmlist; tmp; tmp = tmp->next) {
572 vaddr = (char *) tmp->addr;
573 if (addr >= vaddr + tmp->size - PAGE_SIZE)
574 continue;
575 while (addr < vaddr) {
576 if (count == 0)
577 goto finished;
578 buf++;
579 addr++;
580 count--;
582 n = vaddr + tmp->size - PAGE_SIZE - addr;
583 do {
584 if (count == 0)
585 goto finished;
586 *addr = *buf;
587 buf++;
588 addr++;
589 count--;
590 } while (--n > 0);
592 finished:
593 read_unlock(&vmlist_lock);
594 return buf - buf_start;