signals: __group_complete_signal: cache the value of p->signal
[linux-2.6/mini2440.git] / mm / vmalloc.c
blobe33e0ae69ad142d6241921d64cfaed37abfdb51a
1 /*
2 * linux/mm/vmalloc.c
4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 * Numa awareness, Christoph Lameter, SGI, June 2005
9 */
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/seq_file.h>
18 #include <linux/vmalloc.h>
19 #include <linux/kallsyms.h>
21 #include <asm/uaccess.h>
22 #include <asm/tlbflush.h>
25 DEFINE_RWLOCK(vmlist_lock);
26 struct vm_struct *vmlist;
28 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
29 int node, void *caller);
31 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
33 pte_t *pte;
35 pte = pte_offset_kernel(pmd, addr);
36 do {
37 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
38 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
39 } while (pte++, addr += PAGE_SIZE, addr != end);
42 static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
43 unsigned long end)
45 pmd_t *pmd;
46 unsigned long next;
48 pmd = pmd_offset(pud, addr);
49 do {
50 next = pmd_addr_end(addr, end);
51 if (pmd_none_or_clear_bad(pmd))
52 continue;
53 vunmap_pte_range(pmd, addr, next);
54 } while (pmd++, addr = next, addr != end);
57 static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
58 unsigned long end)
60 pud_t *pud;
61 unsigned long next;
63 pud = pud_offset(pgd, addr);
64 do {
65 next = pud_addr_end(addr, end);
66 if (pud_none_or_clear_bad(pud))
67 continue;
68 vunmap_pmd_range(pud, addr, next);
69 } while (pud++, addr = next, addr != end);
72 void unmap_kernel_range(unsigned long addr, unsigned long size)
74 pgd_t *pgd;
75 unsigned long next;
76 unsigned long start = addr;
77 unsigned long end = addr + size;
79 BUG_ON(addr >= end);
80 pgd = pgd_offset_k(addr);
81 flush_cache_vunmap(addr, end);
82 do {
83 next = pgd_addr_end(addr, end);
84 if (pgd_none_or_clear_bad(pgd))
85 continue;
86 vunmap_pud_range(pgd, addr, next);
87 } while (pgd++, addr = next, addr != end);
88 flush_tlb_kernel_range(start, end);
91 static void unmap_vm_area(struct vm_struct *area)
93 unmap_kernel_range((unsigned long)area->addr, area->size);
96 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
97 unsigned long end, pgprot_t prot, struct page ***pages)
99 pte_t *pte;
101 pte = pte_alloc_kernel(pmd, addr);
102 if (!pte)
103 return -ENOMEM;
104 do {
105 struct page *page = **pages;
106 WARN_ON(!pte_none(*pte));
107 if (!page)
108 return -ENOMEM;
109 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
110 (*pages)++;
111 } while (pte++, addr += PAGE_SIZE, addr != end);
112 return 0;
115 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
116 unsigned long end, pgprot_t prot, struct page ***pages)
118 pmd_t *pmd;
119 unsigned long next;
121 pmd = pmd_alloc(&init_mm, pud, addr);
122 if (!pmd)
123 return -ENOMEM;
124 do {
125 next = pmd_addr_end(addr, end);
126 if (vmap_pte_range(pmd, addr, next, prot, pages))
127 return -ENOMEM;
128 } while (pmd++, addr = next, addr != end);
129 return 0;
132 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
133 unsigned long end, pgprot_t prot, struct page ***pages)
135 pud_t *pud;
136 unsigned long next;
138 pud = pud_alloc(&init_mm, pgd, addr);
139 if (!pud)
140 return -ENOMEM;
141 do {
142 next = pud_addr_end(addr, end);
143 if (vmap_pmd_range(pud, addr, next, prot, pages))
144 return -ENOMEM;
145 } while (pud++, addr = next, addr != end);
146 return 0;
149 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
151 pgd_t *pgd;
152 unsigned long next;
153 unsigned long addr = (unsigned long) area->addr;
154 unsigned long end = addr + area->size - PAGE_SIZE;
155 int err;
157 BUG_ON(addr >= end);
158 pgd = pgd_offset_k(addr);
159 do {
160 next = pgd_addr_end(addr, end);
161 err = vmap_pud_range(pgd, addr, next, prot, pages);
162 if (err)
163 break;
164 } while (pgd++, addr = next, addr != end);
165 flush_cache_vmap((unsigned long) area->addr, end);
166 return err;
168 EXPORT_SYMBOL_GPL(map_vm_area);
171 * Map a vmalloc()-space virtual address to the physical page.
173 struct page *vmalloc_to_page(const void *vmalloc_addr)
175 unsigned long addr = (unsigned long) vmalloc_addr;
176 struct page *page = NULL;
177 pgd_t *pgd = pgd_offset_k(addr);
178 pud_t *pud;
179 pmd_t *pmd;
180 pte_t *ptep, pte;
182 if (!pgd_none(*pgd)) {
183 pud = pud_offset(pgd, addr);
184 if (!pud_none(*pud)) {
185 pmd = pmd_offset(pud, addr);
186 if (!pmd_none(*pmd)) {
187 ptep = pte_offset_map(pmd, addr);
188 pte = *ptep;
189 if (pte_present(pte))
190 page = pte_page(pte);
191 pte_unmap(ptep);
195 return page;
197 EXPORT_SYMBOL(vmalloc_to_page);
200 * Map a vmalloc()-space virtual address to the physical page frame number.
202 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
204 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
206 EXPORT_SYMBOL(vmalloc_to_pfn);
208 static struct vm_struct *
209 __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
210 unsigned long end, int node, gfp_t gfp_mask, void *caller)
212 struct vm_struct **p, *tmp, *area;
213 unsigned long align = 1;
214 unsigned long addr;
216 BUG_ON(in_interrupt());
217 if (flags & VM_IOREMAP) {
218 int bit = fls(size);
220 if (bit > IOREMAP_MAX_ORDER)
221 bit = IOREMAP_MAX_ORDER;
222 else if (bit < PAGE_SHIFT)
223 bit = PAGE_SHIFT;
225 align = 1ul << bit;
227 addr = ALIGN(start, align);
228 size = PAGE_ALIGN(size);
229 if (unlikely(!size))
230 return NULL;
232 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
234 if (unlikely(!area))
235 return NULL;
238 * We always allocate a guard page.
240 size += PAGE_SIZE;
242 write_lock(&vmlist_lock);
243 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
244 if ((unsigned long)tmp->addr < addr) {
245 if((unsigned long)tmp->addr + tmp->size >= addr)
246 addr = ALIGN(tmp->size +
247 (unsigned long)tmp->addr, align);
248 continue;
250 if ((size + addr) < addr)
251 goto out;
252 if (size + addr <= (unsigned long)tmp->addr)
253 goto found;
254 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
255 if (addr > end - size)
256 goto out;
258 if ((size + addr) < addr)
259 goto out;
260 if (addr > end - size)
261 goto out;
263 found:
264 area->next = *p;
265 *p = area;
267 area->flags = flags;
268 area->addr = (void *)addr;
269 area->size = size;
270 area->pages = NULL;
271 area->nr_pages = 0;
272 area->phys_addr = 0;
273 area->caller = caller;
274 write_unlock(&vmlist_lock);
276 return area;
278 out:
279 write_unlock(&vmlist_lock);
280 kfree(area);
281 if (printk_ratelimit())
282 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
283 return NULL;
286 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
287 unsigned long start, unsigned long end)
289 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
290 __builtin_return_address(0));
292 EXPORT_SYMBOL_GPL(__get_vm_area);
295 * get_vm_area - reserve a contiguous kernel virtual area
296 * @size: size of the area
297 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
299 * Search an area of @size in the kernel virtual mapping area,
300 * and reserved it for out purposes. Returns the area descriptor
301 * on success or %NULL on failure.
303 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
305 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
306 -1, GFP_KERNEL, __builtin_return_address(0));
309 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
310 void *caller)
312 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
313 -1, GFP_KERNEL, caller);
316 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
317 int node, gfp_t gfp_mask)
319 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
320 gfp_mask, __builtin_return_address(0));
323 /* Caller must hold vmlist_lock */
324 static struct vm_struct *__find_vm_area(const void *addr)
326 struct vm_struct *tmp;
328 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
329 if (tmp->addr == addr)
330 break;
333 return tmp;
336 /* Caller must hold vmlist_lock */
337 static struct vm_struct *__remove_vm_area(const void *addr)
339 struct vm_struct **p, *tmp;
341 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
342 if (tmp->addr == addr)
343 goto found;
345 return NULL;
347 found:
348 unmap_vm_area(tmp);
349 *p = tmp->next;
352 * Remove the guard page.
354 tmp->size -= PAGE_SIZE;
355 return tmp;
359 * remove_vm_area - find and remove a continuous kernel virtual area
360 * @addr: base address
362 * Search for the kernel VM area starting at @addr, and remove it.
363 * This function returns the found VM area, but using it is NOT safe
364 * on SMP machines, except for its size or flags.
366 struct vm_struct *remove_vm_area(const void *addr)
368 struct vm_struct *v;
369 write_lock(&vmlist_lock);
370 v = __remove_vm_area(addr);
371 write_unlock(&vmlist_lock);
372 return v;
375 static void __vunmap(const void *addr, int deallocate_pages)
377 struct vm_struct *area;
379 if (!addr)
380 return;
382 if ((PAGE_SIZE-1) & (unsigned long)addr) {
383 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
384 WARN_ON(1);
385 return;
388 area = remove_vm_area(addr);
389 if (unlikely(!area)) {
390 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
391 addr);
392 WARN_ON(1);
393 return;
396 debug_check_no_locks_freed(addr, area->size);
398 if (deallocate_pages) {
399 int i;
401 for (i = 0; i < area->nr_pages; i++) {
402 struct page *page = area->pages[i];
404 BUG_ON(!page);
405 __free_page(page);
408 if (area->flags & VM_VPAGES)
409 vfree(area->pages);
410 else
411 kfree(area->pages);
414 kfree(area);
415 return;
419 * vfree - release memory allocated by vmalloc()
420 * @addr: memory base address
422 * Free the virtually continuous memory area starting at @addr, as
423 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
424 * NULL, no operation is performed.
426 * Must not be called in interrupt context.
428 void vfree(const void *addr)
430 BUG_ON(in_interrupt());
431 __vunmap(addr, 1);
433 EXPORT_SYMBOL(vfree);
436 * vunmap - release virtual mapping obtained by vmap()
437 * @addr: memory base address
439 * Free the virtually contiguous memory area starting at @addr,
440 * which was created from the page array passed to vmap().
442 * Must not be called in interrupt context.
444 void vunmap(const void *addr)
446 BUG_ON(in_interrupt());
447 __vunmap(addr, 0);
449 EXPORT_SYMBOL(vunmap);
452 * vmap - map an array of pages into virtually contiguous space
453 * @pages: array of page pointers
454 * @count: number of pages to map
455 * @flags: vm_area->flags
456 * @prot: page protection for the mapping
458 * Maps @count pages from @pages into contiguous kernel virtual
459 * space.
461 void *vmap(struct page **pages, unsigned int count,
462 unsigned long flags, pgprot_t prot)
464 struct vm_struct *area;
466 if (count > num_physpages)
467 return NULL;
469 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
470 __builtin_return_address(0));
471 if (!area)
472 return NULL;
474 if (map_vm_area(area, prot, &pages)) {
475 vunmap(area->addr);
476 return NULL;
479 return area->addr;
481 EXPORT_SYMBOL(vmap);
483 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
484 pgprot_t prot, int node, void *caller)
486 struct page **pages;
487 unsigned int nr_pages, array_size, i;
489 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
490 array_size = (nr_pages * sizeof(struct page *));
492 area->nr_pages = nr_pages;
493 /* Please note that the recursion is strictly bounded. */
494 if (array_size > PAGE_SIZE) {
495 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
496 PAGE_KERNEL, node, caller);
497 area->flags |= VM_VPAGES;
498 } else {
499 pages = kmalloc_node(array_size,
500 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
501 node);
503 area->pages = pages;
504 area->caller = caller;
505 if (!area->pages) {
506 remove_vm_area(area->addr);
507 kfree(area);
508 return NULL;
511 for (i = 0; i < area->nr_pages; i++) {
512 struct page *page;
514 if (node < 0)
515 page = alloc_page(gfp_mask);
516 else
517 page = alloc_pages_node(node, gfp_mask, 0);
519 if (unlikely(!page)) {
520 /* Successfully allocated i pages, free them in __vunmap() */
521 area->nr_pages = i;
522 goto fail;
524 area->pages[i] = page;
527 if (map_vm_area(area, prot, &pages))
528 goto fail;
529 return area->addr;
531 fail:
532 vfree(area->addr);
533 return NULL;
536 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
538 return __vmalloc_area_node(area, gfp_mask, prot, -1,
539 __builtin_return_address(0));
543 * __vmalloc_node - allocate virtually contiguous memory
544 * @size: allocation size
545 * @gfp_mask: flags for the page level allocator
546 * @prot: protection mask for the allocated pages
547 * @node: node to use for allocation or -1
549 * Allocate enough pages to cover @size from the page level
550 * allocator with @gfp_mask flags. Map them into contiguous
551 * kernel virtual space, using a pagetable protection of @prot.
553 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
554 int node, void *caller)
556 struct vm_struct *area;
558 size = PAGE_ALIGN(size);
559 if (!size || (size >> PAGE_SHIFT) > num_physpages)
560 return NULL;
562 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
563 node, gfp_mask, caller);
565 if (!area)
566 return NULL;
568 return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
571 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
573 return __vmalloc_node(size, gfp_mask, prot, -1,
574 __builtin_return_address(0));
576 EXPORT_SYMBOL(__vmalloc);
579 * vmalloc - allocate virtually contiguous memory
580 * @size: allocation size
581 * Allocate enough pages to cover @size from the page level
582 * allocator and map them into contiguous kernel virtual space.
584 * For tight control over page level allocator and protection flags
585 * use __vmalloc() instead.
587 void *vmalloc(unsigned long size)
589 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
590 -1, __builtin_return_address(0));
592 EXPORT_SYMBOL(vmalloc);
595 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
596 * @size: allocation size
598 * The resulting memory area is zeroed so it can be mapped to userspace
599 * without leaking data.
601 void *vmalloc_user(unsigned long size)
603 struct vm_struct *area;
604 void *ret;
606 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
607 if (ret) {
608 write_lock(&vmlist_lock);
609 area = __find_vm_area(ret);
610 area->flags |= VM_USERMAP;
611 write_unlock(&vmlist_lock);
613 return ret;
615 EXPORT_SYMBOL(vmalloc_user);
618 * vmalloc_node - allocate memory on a specific node
619 * @size: allocation size
620 * @node: numa node
622 * Allocate enough pages to cover @size from the page level
623 * allocator and map them into contiguous kernel virtual space.
625 * For tight control over page level allocator and protection flags
626 * use __vmalloc() instead.
628 void *vmalloc_node(unsigned long size, int node)
630 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
631 node, __builtin_return_address(0));
633 EXPORT_SYMBOL(vmalloc_node);
635 #ifndef PAGE_KERNEL_EXEC
636 # define PAGE_KERNEL_EXEC PAGE_KERNEL
637 #endif
640 * vmalloc_exec - allocate virtually contiguous, executable memory
641 * @size: allocation size
643 * Kernel-internal function to allocate enough pages to cover @size
644 * the page level allocator and map them into contiguous and
645 * executable kernel virtual space.
647 * For tight control over page level allocator and protection flags
648 * use __vmalloc() instead.
651 void *vmalloc_exec(unsigned long size)
653 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
656 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
657 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
658 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
659 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
660 #else
661 #define GFP_VMALLOC32 GFP_KERNEL
662 #endif
665 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
666 * @size: allocation size
668 * Allocate enough 32bit PA addressable pages to cover @size from the
669 * page level allocator and map them into contiguous kernel virtual space.
671 void *vmalloc_32(unsigned long size)
673 return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
675 EXPORT_SYMBOL(vmalloc_32);
678 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
679 * @size: allocation size
681 * The resulting memory area is 32bit addressable and zeroed so it can be
682 * mapped to userspace without leaking data.
684 void *vmalloc_32_user(unsigned long size)
686 struct vm_struct *area;
687 void *ret;
689 ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
690 if (ret) {
691 write_lock(&vmlist_lock);
692 area = __find_vm_area(ret);
693 area->flags |= VM_USERMAP;
694 write_unlock(&vmlist_lock);
696 return ret;
698 EXPORT_SYMBOL(vmalloc_32_user);
700 long vread(char *buf, char *addr, unsigned long count)
702 struct vm_struct *tmp;
703 char *vaddr, *buf_start = buf;
704 unsigned long n;
706 /* Don't allow overflow */
707 if ((unsigned long) addr + count < count)
708 count = -(unsigned long) addr;
710 read_lock(&vmlist_lock);
711 for (tmp = vmlist; tmp; tmp = tmp->next) {
712 vaddr = (char *) tmp->addr;
713 if (addr >= vaddr + tmp->size - PAGE_SIZE)
714 continue;
715 while (addr < vaddr) {
716 if (count == 0)
717 goto finished;
718 *buf = '\0';
719 buf++;
720 addr++;
721 count--;
723 n = vaddr + tmp->size - PAGE_SIZE - addr;
724 do {
725 if (count == 0)
726 goto finished;
727 *buf = *addr;
728 buf++;
729 addr++;
730 count--;
731 } while (--n > 0);
733 finished:
734 read_unlock(&vmlist_lock);
735 return buf - buf_start;
738 long vwrite(char *buf, char *addr, unsigned long count)
740 struct vm_struct *tmp;
741 char *vaddr, *buf_start = buf;
742 unsigned long n;
744 /* Don't allow overflow */
745 if ((unsigned long) addr + count < count)
746 count = -(unsigned long) addr;
748 read_lock(&vmlist_lock);
749 for (tmp = vmlist; tmp; tmp = tmp->next) {
750 vaddr = (char *) tmp->addr;
751 if (addr >= vaddr + tmp->size - PAGE_SIZE)
752 continue;
753 while (addr < vaddr) {
754 if (count == 0)
755 goto finished;
756 buf++;
757 addr++;
758 count--;
760 n = vaddr + tmp->size - PAGE_SIZE - addr;
761 do {
762 if (count == 0)
763 goto finished;
764 *addr = *buf;
765 buf++;
766 addr++;
767 count--;
768 } while (--n > 0);
770 finished:
771 read_unlock(&vmlist_lock);
772 return buf - buf_start;
776 * remap_vmalloc_range - map vmalloc pages to userspace
777 * @vma: vma to cover (map full range of vma)
778 * @addr: vmalloc memory
779 * @pgoff: number of pages into addr before first page to map
781 * Returns: 0 for success, -Exxx on failure
783 * This function checks that addr is a valid vmalloc'ed area, and
784 * that it is big enough to cover the vma. Will return failure if
785 * that criteria isn't met.
787 * Similar to remap_pfn_range() (see mm/memory.c)
789 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
790 unsigned long pgoff)
792 struct vm_struct *area;
793 unsigned long uaddr = vma->vm_start;
794 unsigned long usize = vma->vm_end - vma->vm_start;
795 int ret;
797 if ((PAGE_SIZE-1) & (unsigned long)addr)
798 return -EINVAL;
800 read_lock(&vmlist_lock);
801 area = __find_vm_area(addr);
802 if (!area)
803 goto out_einval_locked;
805 if (!(area->flags & VM_USERMAP))
806 goto out_einval_locked;
808 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
809 goto out_einval_locked;
810 read_unlock(&vmlist_lock);
812 addr += pgoff << PAGE_SHIFT;
813 do {
814 struct page *page = vmalloc_to_page(addr);
815 ret = vm_insert_page(vma, uaddr, page);
816 if (ret)
817 return ret;
819 uaddr += PAGE_SIZE;
820 addr += PAGE_SIZE;
821 usize -= PAGE_SIZE;
822 } while (usize > 0);
824 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
825 vma->vm_flags |= VM_RESERVED;
827 return ret;
829 out_einval_locked:
830 read_unlock(&vmlist_lock);
831 return -EINVAL;
833 EXPORT_SYMBOL(remap_vmalloc_range);
836 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
837 * have one.
839 void __attribute__((weak)) vmalloc_sync_all(void)
844 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
846 /* apply_to_page_range() does all the hard work. */
847 return 0;
851 * alloc_vm_area - allocate a range of kernel address space
852 * @size: size of the area
854 * Returns: NULL on failure, vm_struct on success
856 * This function reserves a range of kernel address space, and
857 * allocates pagetables to map that range. No actual mappings
858 * are created. If the kernel address space is not shared
859 * between processes, it syncs the pagetable across all
860 * processes.
862 struct vm_struct *alloc_vm_area(size_t size)
864 struct vm_struct *area;
866 area = get_vm_area_caller(size, VM_IOREMAP,
867 __builtin_return_address(0));
868 if (area == NULL)
869 return NULL;
872 * This ensures that page tables are constructed for this region
873 * of kernel virtual address space and mapped into init_mm.
875 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
876 area->size, f, NULL)) {
877 free_vm_area(area);
878 return NULL;
881 /* Make sure the pagetables are constructed in process kernel
882 mappings */
883 vmalloc_sync_all();
885 return area;
887 EXPORT_SYMBOL_GPL(alloc_vm_area);
889 void free_vm_area(struct vm_struct *area)
891 struct vm_struct *ret;
892 ret = remove_vm_area(area->addr);
893 BUG_ON(ret != area);
894 kfree(area);
896 EXPORT_SYMBOL_GPL(free_vm_area);
899 #ifdef CONFIG_PROC_FS
900 static void *s_start(struct seq_file *m, loff_t *pos)
902 loff_t n = *pos;
903 struct vm_struct *v;
905 read_lock(&vmlist_lock);
906 v = vmlist;
907 while (n > 0 && v) {
908 n--;
909 v = v->next;
911 if (!n)
912 return v;
914 return NULL;
918 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
920 struct vm_struct *v = p;
922 ++*pos;
923 return v->next;
926 static void s_stop(struct seq_file *m, void *p)
928 read_unlock(&vmlist_lock);
931 static int s_show(struct seq_file *m, void *p)
933 struct vm_struct *v = p;
935 seq_printf(m, "0x%p-0x%p %7ld",
936 v->addr, v->addr + v->size, v->size);
938 if (v->caller) {
939 char buff[2 * KSYM_NAME_LEN];
941 seq_putc(m, ' ');
942 sprint_symbol(buff, (unsigned long)v->caller);
943 seq_puts(m, buff);
946 if (v->nr_pages)
947 seq_printf(m, " pages=%d", v->nr_pages);
949 if (v->phys_addr)
950 seq_printf(m, " phys=%lx", v->phys_addr);
952 if (v->flags & VM_IOREMAP)
953 seq_printf(m, " ioremap");
955 if (v->flags & VM_ALLOC)
956 seq_printf(m, " vmalloc");
958 if (v->flags & VM_MAP)
959 seq_printf(m, " vmap");
961 if (v->flags & VM_USERMAP)
962 seq_printf(m, " user");
964 if (v->flags & VM_VPAGES)
965 seq_printf(m, " vpages");
967 seq_putc(m, '\n');
968 return 0;
971 const struct seq_operations vmalloc_op = {
972 .start = s_start,
973 .next = s_next,
974 .stop = s_stop,
975 .show = s_show,
977 #endif